VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 19388

Last change on this file since 19388 was 19388, checked in by vboxsync, 16 years ago

vboxdrv, Solaris/VBoxNetFlt: RTStrDup and some fixes. (flt part untested still)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 188.5 KB
Line 
1/* $Revision: 19388 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/semaphore.h>
41#include <iprt/spinlock.h>
42#include <iprt/thread.h>
43#include <iprt/process.h>
44#include <iprt/mp.h>
45#include <iprt/power.h>
46#include <iprt/cpuset.h>
47#include <iprt/uuid.h>
48#include <VBox/param.h>
49#include <VBox/log.h>
50#include <VBox/err.h>
51#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
52# include <iprt/crc32.h>
53# include <iprt/net.h>
54# include <iprt/string.h>
55#endif
56/* VBox/x86.h not compatible with the Linux kernel sources */
57#ifdef RT_OS_LINUX
58# define X86_CPUID_VENDOR_AMD_EBX 0x68747541
59# define X86_CPUID_VENDOR_AMD_ECX 0x444d4163
60# define X86_CPUID_VENDOR_AMD_EDX 0x69746e65
61#else
62# include <VBox/x86.h>
63#endif
64
65/*
66 * Logging assignments:
67 * Log - useful stuff, like failures.
68 * LogFlow - program flow, except the really noisy bits.
69 * Log2 - Cleanup.
70 * Log3 - Loader flow noise.
71 * Log4 - Call VMMR0 flow noise.
72 * Log5 - Native yet-to-be-defined noise.
73 * Log6 - Native ioctl flow noise.
74 *
75 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
76 * instanciation in log-vbox.c(pp).
77 */
78
79
80/*******************************************************************************
81* Defined Constants And Macros *
82*******************************************************************************/
83/* from x86.h - clashes with linux thus this duplication */
84#undef X86_CR0_PG
85#define X86_CR0_PG RT_BIT(31)
86#undef X86_CR0_PE
87#define X86_CR0_PE RT_BIT(0)
88#undef X86_CPUID_AMD_FEATURE_EDX_NX
89#define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)
90#undef MSR_K6_EFER
91#define MSR_K6_EFER 0xc0000080
92#undef MSR_K6_EFER_NXE
93#define MSR_K6_EFER_NXE RT_BIT(11)
94#undef MSR_K6_EFER_LMA
95#define MSR_K6_EFER_LMA RT_BIT(10)
96#undef X86_CR4_PGE
97#define X86_CR4_PGE RT_BIT(7)
98#undef X86_CR4_PAE
99#define X86_CR4_PAE RT_BIT(5)
100#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
101#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)
102
103
104/** The frequency by which we recalculate the u32UpdateHz and
105 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
106#define GIP_UPDATEHZ_RECALC_FREQ 0x800
107
108/**
109 * Validates a session pointer.
110 *
111 * @returns true/false accordingly.
112 * @param pSession The session.
113 */
114#define SUP_IS_SESSION_VALID(pSession) \
115 ( VALID_PTR(pSession) \
116 && pSession->u32Cookie == BIRD_INV)
117
118/** @def VBOX_SVN_REV
119 * The makefile should define this if it can. */
120#ifndef VBOX_SVN_REV
121# define VBOX_SVN_REV 0
122#endif
123
124/*******************************************************************************
125* Internal Functions *
126*******************************************************************************/
127static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
128static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
129static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
130static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
131static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
132static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
133static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
134static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
135static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
136static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
137static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
138static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
139static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq);
140static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);
141#ifdef RT_OS_WINDOWS
142static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
143static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
144#endif /* RT_OS_WINDOWS */
145static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
146static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
147static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
148static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
149static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
150
151#ifdef RT_WITH_W64_UNWIND_HACK
152DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, unsigned idCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
153DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, unsigned idCpu, unsigned uOperation);
154DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
155DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
156DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
157DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
158DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
159
160DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
161DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
162DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
163DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
164DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
165DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRefEx)(void *pvObj, PSUPDRVSESSION pSession, bool fNoPreempt);
166DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
167DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
168DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
169DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
170DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
171DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
172DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
173DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
174DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
175DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
176DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
177DECLASM(int) UNWIND_WRAP(SUPR0PageAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages);
178DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
179//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
180DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
181DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
182DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
183DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
184DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
185DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
186DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
187DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
188DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
189DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
190DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
191DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
192DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
193DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
194DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
195DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
196DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
197/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
198/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
199/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
200/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
201/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
202DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
203/* RTProcSelf - not necessary */
204/* RTR0ProcHandleSelf - not necessary */
205DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
206DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
207DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
208DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
209DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
210DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
211DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
212DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
213DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
214DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
215DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
216DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
217DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
218DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
219DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
220DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
221DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
222DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
223DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
224DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
225DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
226/* RTTimeNanoTS - not necessary */
227/* RTTimeMilliTS - not necessary */
228/* RTTimeSystemNanoTS - not necessary */
229/* RTTimeSystemMilliTS - not necessary */
230/* RTThreadNativeSelf - not necessary */
231DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
232DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
233#if 0
234/* RTThreadSelf - not necessary */
235DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
236 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
237DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
238DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
239DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
240DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
241DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
242DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
243DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
244DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
245DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
246DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
247#endif
248/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
249/* RTMpCpuId - not necessary */
250/* RTMpCpuIdFromSetIndex - not necessary */
251/* RTMpCpuIdToSetIndex - not necessary */
252/* RTMpIsCpuPossible - not necessary */
253/* RTMpGetCount - not necessary */
254/* RTMpGetMaxCpuId - not necessary */
255/* RTMpGetOnlineCount - not necessary */
256/* RTMpGetOnlineSet - not necessary */
257/* RTMpGetSet - not necessary */
258/* RTMpIsCpuOnline - not necessary */
259DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
260DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
261DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
262DECLASM(int) UNWIND_WRAP(RTMpIsCpuWorkPending)(void);
263/* RTLogRelDefaultInstance - not necessary. */
264DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
265/* RTLogLogger - can't wrap this buster. */
266/* RTLogLoggerEx - can't wrap this buster. */
267DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
268/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
269DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
270DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
271/* AssertMsg2 - can't wrap this buster. */
272#endif /* RT_WITH_W64_UNWIND_HACK */
273
274
275/*******************************************************************************
276* Global Variables *
277*******************************************************************************/
278/**
279 * Array of the R0 SUP API.
280 */
281static SUPFUNC g_aFunctions[] =
282{
283 /* name function */
284 /* Entries with absolute addresses determined at runtime, fixup
285 code makes ugly ASSUMPTIONS about the order here: */
286 { "SUPR0AbsIs64bit", (void *)0 },
287 { "SUPR0Abs64bitKernelCS", (void *)0 },
288 { "SUPR0Abs64bitKernelSS", (void *)0 },
289 { "SUPR0Abs64bitKernelDS", (void *)0 },
290 { "SUPR0AbsKernelCS", (void *)0 },
291 { "SUPR0AbsKernelSS", (void *)0 },
292 { "SUPR0AbsKernelDS", (void *)0 },
293 { "SUPR0AbsKernelES", (void *)0 },
294 { "SUPR0AbsKernelFS", (void *)0 },
295 { "SUPR0AbsKernelGS", (void *)0 },
296 /* Normal function pointers: */
297 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
298 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
299 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
300 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
301 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
302 { "SUPR0ObjAddRefEx", (void *)UNWIND_WRAP(SUPR0ObjAddRefEx) },
303 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
304 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
305 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
306 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
307 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
308 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
309 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
310 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
311 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
312 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
313 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
314 { "SUPR0PageAlloc", (void *)UNWIND_WRAP(SUPR0PageAlloc) },
315 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
316 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
317 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
318 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
319 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
320 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
321 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
322 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
323 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
324 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
325 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
326 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
327 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
328 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
329 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
330 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
331 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
332 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
333 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
334 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
335 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
336 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
337 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
338 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
339 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
340 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
341/* These don't work yet on linux - use fast mutexes!
342 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
343 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
344 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
345 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
346*/
347 { "RTProcSelf", (void *)RTProcSelf },
348 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
349 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
350 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
351 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
352 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
353 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
354 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
355 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
356 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
357 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
358 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
359 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
360 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
361 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
362 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
363 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
364 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
365 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
366 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
367 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
368 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
369 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
370 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
371 { "RTTimeMillieTS", (void *)RTTimeMilliTS },
372 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
373 { "RTTimeSystemMillieTS", (void *)RTTimeSystemMilliTS },
374 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
375 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
376 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
377#if 0 /* Thread APIs, Part 2. */
378 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
379 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
380 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
381 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
382 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
383 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
384 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
385 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
386 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
387 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
388 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
389 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
390#endif
391 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
392 { "RTMpCpuId", (void *)RTMpCpuId },
393 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
394 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
395 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
396 { "RTMpGetCount", (void *)RTMpGetCount },
397 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
398 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
399 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
400 { "RTMpGetSet", (void *)RTMpGetSet },
401 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
402 { "RTMpIsCpuWorkPending", (void *)UNWIND_WRAP(RTMpIsCpuWorkPending) },
403 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
404 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
405 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
406 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
407 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
408 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
409 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
410 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
411 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
412 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
413 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
414 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
415 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
416 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
417#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
418 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
419#endif
420#if defined(RT_OS_DARWIN)
421 { "RTAssertMsg1", (void *)RTAssertMsg1 },
422 { "RTAssertMsg2", (void *)RTAssertMsg2 },
423 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
424#endif
425};
426
427#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
428/**
429 * Drag in the rest of IRPT since we share it with the
430 * rest of the kernel modules on darwin.
431 */
432PFNRT g_apfnVBoxDrvIPRTDeps[] =
433{
434 (PFNRT)RTCrc32,
435 (PFNRT)RTErrConvertFromErrno,
436 (PFNRT)RTNetIPv4IsHdrValid,
437 (PFNRT)RTNetIPv4TCPChecksum,
438 (PFNRT)RTNetIPv4UDPChecksum,
439 (PFNRT)RTUuidCompare,
440 (PFNRT)RTUuidCompareStr,
441 (PFNRT)RTUuidFromStr,
442 (PFNRT)RTStrDup,
443 (PFNRT)RTStrFree,
444 NULL
445};
446#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
447
448
449/**
450 * Initializes the device extentsion structure.
451 *
452 * @returns IPRT status code.
453 * @param pDevExt The device extension to initialize.
454 */
455int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
456{
457 int rc;
458
459#ifdef SUPDRV_WITH_RELEASE_LOGGER
460 /*
461 * Create the release log.
462 */
463 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
464 PRTLOGGER pRelLogger;
465 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
466 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
467 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
468 if (RT_SUCCESS(rc))
469 RTLogRelSetDefaultInstance(pRelLogger);
470#endif
471
472 /*
473 * Initialize it.
474 */
475 memset(pDevExt, 0, sizeof(*pDevExt));
476 rc = RTSpinlockCreate(&pDevExt->Spinlock);
477 if (!rc)
478 {
479 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
480 if (!rc)
481 {
482 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
483 if (!rc)
484 {
485 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
486 if (!rc)
487 {
488 rc = supdrvGipCreate(pDevExt);
489 if (RT_SUCCESS(rc))
490 {
491 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
492
493 /*
494 * Fixup the absolute symbols.
495 *
496 * Because of the table indexing assumptions we'll have a little #ifdef orgy
497 * here rather than distributing this to OS specific files. At least for now.
498 */
499#ifdef RT_OS_DARWIN
500# if ARCH_BITS == 32
501 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
502 {
503 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
504 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
505 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
506 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
507 }
508 else
509 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
510 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
511 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
512 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
513 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
514 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
515 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
516# else /* 64-bit darwin: */
517 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
518 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
519 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
520 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
521 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
522 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
523 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
524 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
525 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
526 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
527
528# endif
529#else /* !RT_OS_DARWIN */
530# if ARCH_BITS == 64
531 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
532 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
533 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
534 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
535# else
536 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
537# endif
538 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
539 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
540 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
541 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
542 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
543 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
544#endif /* !RT_OS_DARWIN */
545 return VINF_SUCCESS;
546 }
547
548 RTSemFastMutexDestroy(pDevExt->mtxGip);
549 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
550 }
551 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
552 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
553 }
554 RTSemFastMutexDestroy(pDevExt->mtxLdr);
555 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
556 }
557 RTSpinlockDestroy(pDevExt->Spinlock);
558 pDevExt->Spinlock = NIL_RTSPINLOCK;
559 }
560#ifdef SUPDRV_WITH_RELEASE_LOGGER
561 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
562 RTLogDestroy(RTLogSetDefaultInstance(NULL));
563#endif
564
565 return rc;
566}
567
568
569/**
570 * Delete the device extension (e.g. cleanup members).
571 *
572 * @param pDevExt The device extension to delete.
573 */
574void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
575{
576 PSUPDRVOBJ pObj;
577 PSUPDRVUSAGE pUsage;
578
579 /*
580 * Kill mutexes and spinlocks.
581 */
582 RTSemFastMutexDestroy(pDevExt->mtxGip);
583 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
584 RTSemFastMutexDestroy(pDevExt->mtxLdr);
585 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
586 RTSpinlockDestroy(pDevExt->Spinlock);
587 pDevExt->Spinlock = NIL_RTSPINLOCK;
588 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
589 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
590
591 /*
592 * Free lists.
593 */
594 /* objects. */
595 pObj = pDevExt->pObjs;
596#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
597 Assert(!pObj); /* (can trigger on forced unloads) */
598#endif
599 pDevExt->pObjs = NULL;
600 while (pObj)
601 {
602 void *pvFree = pObj;
603 pObj = pObj->pNext;
604 RTMemFree(pvFree);
605 }
606
607 /* usage records. */
608 pUsage = pDevExt->pUsageFree;
609 pDevExt->pUsageFree = NULL;
610 while (pUsage)
611 {
612 void *pvFree = pUsage;
613 pUsage = pUsage->pNext;
614 RTMemFree(pvFree);
615 }
616
617 /* kill the GIP. */
618 supdrvGipDestroy(pDevExt);
619
620#ifdef SUPDRV_WITH_RELEASE_LOGGER
621 /* destroy the loggers. */
622 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
623 RTLogDestroy(RTLogSetDefaultInstance(NULL));
624#endif
625}
626
627
628/**
629 * Create session.
630 *
631 * @returns IPRT status code.
632 * @param pDevExt Device extension.
633 * @param fUser Flag indicating whether this is a user or kernel session.
634 * @param ppSession Where to store the pointer to the session data.
635 */
636int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
637{
638 /*
639 * Allocate memory for the session data.
640 */
641 int rc = VERR_NO_MEMORY;
642 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
643 if (pSession)
644 {
645 /* Initialize session data. */
646 rc = RTSpinlockCreate(&pSession->Spinlock);
647 if (!rc)
648 {
649 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
650 pSession->pDevExt = pDevExt;
651 pSession->u32Cookie = BIRD_INV;
652 /*pSession->pLdrUsage = NULL;
653 pSession->pVM = NULL;
654 pSession->pUsage = NULL;
655 pSession->pGip = NULL;
656 pSession->fGipReferenced = false;
657 pSession->Bundle.cUsed = 0; */
658 pSession->Uid = NIL_RTUID;
659 pSession->Gid = NIL_RTGID;
660 if (fUser)
661 {
662 pSession->Process = RTProcSelf();
663 pSession->R0Process = RTR0ProcHandleSelf();
664 }
665 else
666 {
667 pSession->Process = NIL_RTPROCESS;
668 pSession->R0Process = NIL_RTR0PROCESS;
669 }
670
671 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
672 return VINF_SUCCESS;
673 }
674
675 RTMemFree(pSession);
676 *ppSession = NULL;
677 Log(("Failed to create spinlock, rc=%d!\n", rc));
678 }
679
680 return rc;
681}
682
683
684/**
685 * Shared code for cleaning up a session.
686 *
687 * @param pDevExt Device extension.
688 * @param pSession Session data.
689 * This data will be freed by this routine.
690 */
691void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
692{
693 /*
694 * Cleanup the session first.
695 */
696 supdrvCleanupSession(pDevExt, pSession);
697
698 /*
699 * Free the rest of the session stuff.
700 */
701 RTSpinlockDestroy(pSession->Spinlock);
702 pSession->Spinlock = NIL_RTSPINLOCK;
703 pSession->pDevExt = NULL;
704 RTMemFree(pSession);
705 LogFlow(("supdrvCloseSession: returns\n"));
706}
707
708
709/**
710 * Shared code for cleaning up a session (but not quite freeing it).
711 *
712 * This is primarily intended for MAC OS X where we have to clean up the memory
713 * stuff before the file handle is closed.
714 *
715 * @param pDevExt Device extension.
716 * @param pSession Session data.
717 * This data will be freed by this routine.
718 */
719void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
720{
721 PSUPDRVBUNDLE pBundle;
722 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
723
724 /*
725 * Remove logger instances related to this session.
726 */
727 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
728
729 /*
730 * Release object references made in this session.
731 * In theory there should be noone racing us in this session.
732 */
733 Log2(("release objects - start\n"));
734 if (pSession->pUsage)
735 {
736 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
737 PSUPDRVUSAGE pUsage;
738 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
739
740 while ((pUsage = pSession->pUsage) != NULL)
741 {
742 PSUPDRVOBJ pObj = pUsage->pObj;
743 pSession->pUsage = pUsage->pNext;
744
745 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
746 if (pUsage->cUsage < pObj->cUsage)
747 {
748 pObj->cUsage -= pUsage->cUsage;
749 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
750 }
751 else
752 {
753 /* Destroy the object and free the record. */
754 if (pDevExt->pObjs == pObj)
755 pDevExt->pObjs = pObj->pNext;
756 else
757 {
758 PSUPDRVOBJ pObjPrev;
759 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
760 if (pObjPrev->pNext == pObj)
761 {
762 pObjPrev->pNext = pObj->pNext;
763 break;
764 }
765 Assert(pObjPrev);
766 }
767 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
768
769 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
770 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
771 if (pObj->pfnDestructor)
772#ifdef RT_WITH_W64_UNWIND_HACK
773 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
774#else
775 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
776#endif
777 RTMemFree(pObj);
778 }
779
780 /* free it and continue. */
781 RTMemFree(pUsage);
782
783 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
784 }
785
786 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
787 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
788 }
789 Log2(("release objects - done\n"));
790
791 /*
792 * Release memory allocated in the session.
793 *
794 * We do not serialize this as we assume that the application will
795 * not allocated memory while closing the file handle object.
796 */
797 Log2(("freeing memory:\n"));
798 pBundle = &pSession->Bundle;
799 while (pBundle)
800 {
801 PSUPDRVBUNDLE pToFree;
802 unsigned i;
803
804 /*
805 * Check and unlock all entries in the bundle.
806 */
807 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
808 {
809 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
810 {
811 int rc;
812 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
813 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
814 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
815 {
816 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
817 AssertRC(rc); /** @todo figure out how to handle this. */
818 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
819 }
820 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
821 AssertRC(rc); /** @todo figure out how to handle this. */
822 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
823 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
824 }
825 }
826
827 /*
828 * Advance and free previous bundle.
829 */
830 pToFree = pBundle;
831 pBundle = pBundle->pNext;
832
833 pToFree->pNext = NULL;
834 pToFree->cUsed = 0;
835 if (pToFree != &pSession->Bundle)
836 RTMemFree(pToFree);
837 }
838 Log2(("freeing memory - done\n"));
839
840 /*
841 * Deregister component factories.
842 */
843 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
844 Log2(("deregistering component factories:\n"));
845 if (pDevExt->pComponentFactoryHead)
846 {
847 PSUPDRVFACTORYREG pPrev = NULL;
848 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
849 while (pCur)
850 {
851 if (pCur->pSession == pSession)
852 {
853 /* unlink it */
854 PSUPDRVFACTORYREG pNext = pCur->pNext;
855 if (pPrev)
856 pPrev->pNext = pNext;
857 else
858 pDevExt->pComponentFactoryHead = pNext;
859
860 /* free it */
861 pCur->pNext = NULL;
862 pCur->pSession = NULL;
863 pCur->pFactory = NULL;
864 RTMemFree(pCur);
865
866 /* next */
867 pCur = pNext;
868 }
869 else
870 {
871 /* next */
872 pPrev = pCur;
873 pCur = pCur->pNext;
874 }
875 }
876 }
877 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
878 Log2(("deregistering component factories - done\n"));
879
880 /*
881 * Loaded images needs to be dereferenced and possibly freed up.
882 */
883 RTSemFastMutexRequest(pDevExt->mtxLdr);
884 Log2(("freeing images:\n"));
885 if (pSession->pLdrUsage)
886 {
887 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
888 pSession->pLdrUsage = NULL;
889 while (pUsage)
890 {
891 void *pvFree = pUsage;
892 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
893 if (pImage->cUsage > pUsage->cUsage)
894 pImage->cUsage -= pUsage->cUsage;
895 else
896 supdrvLdrFree(pDevExt, pImage);
897 pUsage->pImage = NULL;
898 pUsage = pUsage->pNext;
899 RTMemFree(pvFree);
900 }
901 }
902 RTSemFastMutexRelease(pDevExt->mtxLdr);
903 Log2(("freeing images - done\n"));
904
905 /*
906 * Unmap the GIP.
907 */
908 Log2(("umapping GIP:\n"));
909 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
910 {
911 SUPR0GipUnmap(pSession);
912 pSession->fGipReferenced = 0;
913 }
914 Log2(("umapping GIP - done\n"));
915}
916
917
918/**
919 * Fast path I/O Control worker.
920 *
921 * @returns VBox status code that should be passed down to ring-3 unchanged.
922 * @param uIOCtl Function number.
923 * @param idCpu VMCPU id.
924 * @param pDevExt Device extention.
925 * @param pSession Session data.
926 */
927int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, unsigned idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
928{
929 /*
930 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
931 */
932 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
933 {
934 switch (uIOCtl)
935 {
936 case SUP_IOCTL_FAST_DO_RAW_RUN:
937#ifdef RT_WITH_W64_UNWIND_HACK
938 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
939#else
940 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
941#endif
942 break;
943 case SUP_IOCTL_FAST_DO_HWACC_RUN:
944#ifdef RT_WITH_W64_UNWIND_HACK
945 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
946#else
947 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
948#endif
949 break;
950 case SUP_IOCTL_FAST_DO_NOP:
951#ifdef RT_WITH_W64_UNWIND_HACK
952 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
953#else
954 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
955#endif
956 break;
957 default:
958 return VERR_INTERNAL_ERROR;
959 }
960 return VINF_SUCCESS;
961 }
962 return VERR_INTERNAL_ERROR;
963}
964
965
966/**
967 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
968 * We would use strpbrk here if this function would be contained in the RedHat kABI white
969 * list, see http://www.kerneldrivers.org/RHEL5.
970 *
971 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
972 * @param pszStr String to check
973 * @param pszChars Character set
974 */
975static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
976{
977 int chCur;
978 while ((chCur = *pszStr++) != '\0')
979 {
980 int ch;
981 const char *psz = pszChars;
982 while ((ch = *psz++) != '\0')
983 if (ch == chCur)
984 return 1;
985
986 }
987 return 0;
988}
989
990
991/**
992 * I/O Control worker.
993 *
994 * @returns 0 on success.
995 * @returns VERR_INVALID_PARAMETER if the request is invalid.
996 *
997 * @param uIOCtl Function number.
998 * @param pDevExt Device extention.
999 * @param pSession Session data.
1000 * @param pReqHdr The request header.
1001 */
1002int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1003{
1004 /*
1005 * Validate the request.
1006 */
1007 /* this first check could probably be omitted as its also done by the OS specific code... */
1008 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
1009 || pReqHdr->cbIn < sizeof(*pReqHdr)
1010 || pReqHdr->cbOut < sizeof(*pReqHdr)))
1011 {
1012 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
1013 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
1014 return VERR_INVALID_PARAMETER;
1015 }
1016 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
1017 {
1018 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
1019 {
1020 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
1021 return VERR_INVALID_PARAMETER;
1022 }
1023 }
1024 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
1025 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
1026 {
1027 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
1028 return VERR_INVALID_PARAMETER;
1029 }
1030
1031/*
1032 * Validation macros
1033 */
1034#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1035 do { \
1036 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1037 { \
1038 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1039 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1040 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1041 } \
1042 } while (0)
1043
1044#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1045
1046#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1047 do { \
1048 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1049 { \
1050 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1051 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1052 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1053 } \
1054 } while (0)
1055
1056#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1057 do { \
1058 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1059 { \
1060 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1061 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1062 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1063 } \
1064 } while (0)
1065
1066#define REQ_CHECK_EXPR(Name, expr) \
1067 do { \
1068 if (RT_UNLIKELY(!(expr))) \
1069 { \
1070 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1071 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1072 } \
1073 } while (0)
1074
1075#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1076 do { \
1077 if (RT_UNLIKELY(!(expr))) \
1078 { \
1079 OSDBGPRINT( fmt ); \
1080 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1081 } \
1082 } while (0)
1083
1084
1085 /*
1086 * The switch.
1087 */
1088 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1089 {
1090 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1091 {
1092 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1093 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1094 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1095 {
1096 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1097 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1098 return 0;
1099 }
1100
1101#if 0
1102 /*
1103 * Call out to the OS specific code and let it do permission checks on the
1104 * client process.
1105 */
1106 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1107 {
1108 pReq->u.Out.u32Cookie = 0xffffffff;
1109 pReq->u.Out.u32SessionCookie = 0xffffffff;
1110 pReq->u.Out.u32SessionVersion = 0xffffffff;
1111 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1112 pReq->u.Out.pSession = NULL;
1113 pReq->u.Out.cFunctions = 0;
1114 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1115 return 0;
1116 }
1117#endif
1118
1119 /*
1120 * Match the version.
1121 * The current logic is very simple, match the major interface version.
1122 */
1123 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1124 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1125 {
1126 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1127 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1128 pReq->u.Out.u32Cookie = 0xffffffff;
1129 pReq->u.Out.u32SessionCookie = 0xffffffff;
1130 pReq->u.Out.u32SessionVersion = 0xffffffff;
1131 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1132 pReq->u.Out.pSession = NULL;
1133 pReq->u.Out.cFunctions = 0;
1134 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1135 return 0;
1136 }
1137
1138 /*
1139 * Fill in return data and be gone.
1140 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1141 * u32SessionVersion <= u32ReqVersion!
1142 */
1143 /** @todo Somehow validate the client and negotiate a secure cookie... */
1144 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1145 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1146 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1147 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1148 pReq->u.Out.pSession = pSession;
1149 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1150 pReq->Hdr.rc = VINF_SUCCESS;
1151 return 0;
1152 }
1153
1154 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1155 {
1156 /* validate */
1157 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1158 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1159
1160 /* execute */
1161 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1162 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1163 pReq->Hdr.rc = VINF_SUCCESS;
1164 return 0;
1165 }
1166
1167 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
1168 {
1169 /* validate */
1170 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
1171 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
1172
1173 /* execute */
1174 pReq->u.Out.u8Idt = 3;
1175 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1176 return 0;
1177 }
1178
1179 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
1180 {
1181 /* validate */
1182 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
1183 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
1184
1185 /* execute */
1186 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1187 return 0;
1188 }
1189
1190 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1191 {
1192 /* validate */
1193 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1194 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1195 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1196 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1197 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1198
1199 /* execute */
1200 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1201 if (RT_FAILURE(pReq->Hdr.rc))
1202 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1203 return 0;
1204 }
1205
1206 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1207 {
1208 /* validate */
1209 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1210 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1211
1212 /* execute */
1213 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1214 return 0;
1215 }
1216
1217 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1218 {
1219 /* validate */
1220 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1221 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1222
1223 /* execute */
1224 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1225 if (RT_FAILURE(pReq->Hdr.rc))
1226 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1227 return 0;
1228 }
1229
1230 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1231 {
1232 /* validate */
1233 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1234 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1235
1236 /* execute */
1237 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1238 return 0;
1239 }
1240
1241 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1242 {
1243 /* validate */
1244 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1245 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1246 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1247 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1248 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1249 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1250 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1251
1252 /* execute */
1253 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1254 return 0;
1255 }
1256
1257 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1258 {
1259 /* validate */
1260 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1261 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1262 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1263 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1264 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1265 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1266 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1267 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1268 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1269 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1270 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1271 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1272 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1273 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1274 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1275
1276 if (pReq->u.In.cSymbols)
1277 {
1278 uint32_t i;
1279 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1280 for (i = 0; i < pReq->u.In.cSymbols; i++)
1281 {
1282 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1283 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1284 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1285 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1286 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1287 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1288 }
1289 }
1290
1291 /* execute */
1292 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1293 return 0;
1294 }
1295
1296 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1297 {
1298 /* validate */
1299 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1300 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1301
1302 /* execute */
1303 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1304 return 0;
1305 }
1306
1307 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1308 {
1309 /* validate */
1310 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1311 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1312 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1313
1314 /* execute */
1315 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1316 return 0;
1317 }
1318
1319 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1320 {
1321 /* validate */
1322 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1323 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1324 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1325
1326 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1327 {
1328 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1329
1330 /* execute */
1331 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1332#ifdef RT_WITH_W64_UNWIND_HACK
1333 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1334#else
1335 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1336#endif
1337 else
1338 pReq->Hdr.rc = VERR_WRONG_ORDER;
1339 }
1340 else
1341 {
1342 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1343 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1344 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1345 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1346 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1347
1348 /* execute */
1349 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1350#ifdef RT_WITH_W64_UNWIND_HACK
1351 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1352#else
1353 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1354#endif
1355 else
1356 pReq->Hdr.rc = VERR_WRONG_ORDER;
1357 }
1358
1359 if ( RT_FAILURE(pReq->Hdr.rc)
1360 && pReq->Hdr.rc != VERR_INTERRUPTED
1361 && pReq->Hdr.rc != VERR_TIMEOUT)
1362 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1363 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1364 else
1365 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1366 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1367 return 0;
1368 }
1369
1370 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1371 {
1372 /* validate */
1373 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1374 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1375
1376 /* execute */
1377 pReq->Hdr.rc = VINF_SUCCESS;
1378 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1379 return 0;
1380 }
1381
1382 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1383 {
1384 /* validate */
1385 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1386 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1387 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1388
1389 /* execute */
1390 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1391 if (RT_FAILURE(pReq->Hdr.rc))
1392 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1393 return 0;
1394 }
1395
1396 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1397 {
1398 /* validate */
1399 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1400 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1401
1402 /* execute */
1403 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1404 return 0;
1405 }
1406
1407 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1408 {
1409 /* validate */
1410 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1411 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1412
1413 /* execute */
1414 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1415 if (RT_SUCCESS(pReq->Hdr.rc))
1416 pReq->u.Out.pGipR0 = pDevExt->pGip;
1417 return 0;
1418 }
1419
1420 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1421 {
1422 /* validate */
1423 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1424 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1425
1426 /* execute */
1427 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1428 return 0;
1429 }
1430
1431 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1432 {
1433 /* validate */
1434 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1435 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1436 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1437 || ( VALID_PTR(pReq->u.In.pVMR0)
1438 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1439 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1440 /* execute */
1441 pSession->pVM = pReq->u.In.pVMR0;
1442 pReq->Hdr.rc = VINF_SUCCESS;
1443 return 0;
1444 }
1445
1446 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1447 {
1448 /* validate */
1449 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1450 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1451 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1452
1453 /* execute */
1454 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1455 if (RT_FAILURE(pReq->Hdr.rc))
1456 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1457 return 0;
1458 }
1459
1460 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1461 {
1462 /* validate */
1463 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1464 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1465 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1466 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1467 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1468 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1469 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1470 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1471 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1472
1473 /* execute */
1474 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1475 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1476 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1477 &pReq->u.Out.aPages[0]);
1478 if (RT_FAILURE(pReq->Hdr.rc))
1479 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1480 return 0;
1481 }
1482
1483 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1484 {
1485 /* validate */
1486 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1487 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1488 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1489 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1490 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1491 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1492
1493 /* execute */
1494 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1495 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1496 if (RT_FAILURE(pReq->Hdr.rc))
1497 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1498 return 0;
1499 }
1500
1501 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1502 {
1503 /* validate */
1504 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1505 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1506
1507 /* execute */
1508 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1509 return 0;
1510 }
1511
1512 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1513 {
1514 /* validate */
1515 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1516 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1517 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1518
1519 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1520 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1521 else
1522 {
1523 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1524 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1525 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1526 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1527 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1528 }
1529 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1530
1531 /* execute */
1532 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1533 return 0;
1534 }
1535
1536 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS(0)):
1537 {
1538 /* validate */
1539 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
1540 size_t cbStrTab;
1541 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
1542 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
1543 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
1544 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
1545 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
1546 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
1547 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
1548 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
1549 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
1550 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
1551 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
1552
1553 /* execute */
1554 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pDevExt, pSession, pReq);
1555 return 0;
1556 }
1557
1558 default:
1559 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1560 break;
1561 }
1562 return SUPDRV_ERR_GENERAL_FAILURE;
1563}
1564
1565
1566/**
1567 * Inter-Driver Communcation (IDC) worker.
1568 *
1569 * @returns VBox status code.
1570 * @retval VINF_SUCCESS on success.
1571 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1572 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1573 *
1574 * @param uReq The request (function) code.
1575 * @param pDevExt Device extention.
1576 * @param pSession Session data.
1577 * @param pReqHdr The request header.
1578 */
1579int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1580{
1581 /*
1582 * The OS specific code has already validated the pSession
1583 * pointer, and the request size being greater or equal to
1584 * size of the header.
1585 *
1586 * So, just check that pSession is a kernel context session.
1587 */
1588 if (RT_UNLIKELY( pSession
1589 && pSession->R0Process != NIL_RTR0PROCESS))
1590 return VERR_INVALID_PARAMETER;
1591
1592/*
1593 * Validation macro.
1594 */
1595#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1596 do { \
1597 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1598 { \
1599 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1600 (long)pReqHdr->cb, (long)(cbExpect))); \
1601 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1602 } \
1603 } while (0)
1604
1605 switch (uReq)
1606 {
1607 case SUPDRV_IDC_REQ_CONNECT:
1608 {
1609 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1610 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1611
1612 /*
1613 * Validate the cookie and other input.
1614 */
1615 if (pReq->Hdr.pSession != NULL)
1616 {
1617 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1618 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1619 }
1620 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1621 {
1622 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1623 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1624 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1625 }
1626 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1627 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1628 {
1629 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1630 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1631 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1632 }
1633
1634 /*
1635 * Match the version.
1636 * The current logic is very simple, match the major interface version.
1637 */
1638 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1639 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1640 {
1641 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1642 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
1643 pReq->u.Out.pSession = NULL;
1644 pReq->u.Out.uSessionVersion = 0xffffffff;
1645 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1646 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1647 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1648 return VINF_SUCCESS;
1649 }
1650
1651 pReq->u.Out.pSession = NULL;
1652 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1653 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1654 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1655
1656 /*
1657 * On NT we will already have a session associated with the
1658 * client, just like with the SUP_IOCTL_COOKIE request, while
1659 * the other doesn't.
1660 */
1661#ifdef RT_OS_WINDOWS
1662 pReq->Hdr.rc = VINF_SUCCESS;
1663#else
1664 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1665 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1666 if (RT_FAILURE(pReq->Hdr.rc))
1667 {
1668 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1669 return VINF_SUCCESS;
1670 }
1671#endif
1672
1673 pReq->u.Out.pSession = pSession;
1674 pReq->Hdr.pSession = pSession;
1675
1676 return VINF_SUCCESS;
1677 }
1678
1679 case SUPDRV_IDC_REQ_DISCONNECT:
1680 {
1681 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1682
1683#ifdef RT_OS_WINDOWS
1684 /* Windows will destroy the session when the file object is destroyed. */
1685#else
1686 supdrvCloseSession(pDevExt, pSession);
1687#endif
1688 return pReqHdr->rc = VINF_SUCCESS;
1689 }
1690
1691 case SUPDRV_IDC_REQ_GET_SYMBOL:
1692 {
1693 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1694 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1695
1696 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1697 return VINF_SUCCESS;
1698 }
1699
1700 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1701 {
1702 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1703 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1704
1705 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1706 return VINF_SUCCESS;
1707 }
1708
1709 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1710 {
1711 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1712 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1713
1714 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1715 return VINF_SUCCESS;
1716 }
1717
1718 default:
1719 Log(("Unknown IDC %#lx\n", (long)uReq));
1720 break;
1721 }
1722
1723#undef REQ_CHECK_IDC_SIZE
1724 return VERR_NOT_SUPPORTED;
1725}
1726
1727
1728/**
1729 * Register a object for reference counting.
1730 * The object is registered with one reference in the specified session.
1731 *
1732 * @returns Unique identifier on success (pointer).
1733 * All future reference must use this identifier.
1734 * @returns NULL on failure.
1735 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1736 * @param pvUser1 The first user argument.
1737 * @param pvUser2 The second user argument.
1738 */
1739SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1740{
1741 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1742 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1743 PSUPDRVOBJ pObj;
1744 PSUPDRVUSAGE pUsage;
1745
1746 /*
1747 * Validate the input.
1748 */
1749 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1750 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1751 AssertPtrReturn(pfnDestructor, NULL);
1752
1753 /*
1754 * Allocate and initialize the object.
1755 */
1756 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1757 if (!pObj)
1758 return NULL;
1759 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1760 pObj->enmType = enmType;
1761 pObj->pNext = NULL;
1762 pObj->cUsage = 1;
1763 pObj->pfnDestructor = pfnDestructor;
1764 pObj->pvUser1 = pvUser1;
1765 pObj->pvUser2 = pvUser2;
1766 pObj->CreatorUid = pSession->Uid;
1767 pObj->CreatorGid = pSession->Gid;
1768 pObj->CreatorProcess= pSession->Process;
1769 supdrvOSObjInitCreator(pObj, pSession);
1770
1771 /*
1772 * Allocate the usage record.
1773 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
1774 */
1775 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1776
1777 pUsage = pDevExt->pUsageFree;
1778 if (pUsage)
1779 pDevExt->pUsageFree = pUsage->pNext;
1780 else
1781 {
1782 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1783 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1784 if (!pUsage)
1785 {
1786 RTMemFree(pObj);
1787 return NULL;
1788 }
1789 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1790 }
1791
1792 /*
1793 * Insert the object and create the session usage record.
1794 */
1795 /* The object. */
1796 pObj->pNext = pDevExt->pObjs;
1797 pDevExt->pObjs = pObj;
1798
1799 /* The session record. */
1800 pUsage->cUsage = 1;
1801 pUsage->pObj = pObj;
1802 pUsage->pNext = pSession->pUsage;
1803 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1804 pSession->pUsage = pUsage;
1805
1806 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1807
1808 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1809 return pObj;
1810}
1811
1812
1813/**
1814 * Increment the reference counter for the object associating the reference
1815 * with the specified session.
1816 *
1817 * @returns IPRT status code.
1818 * @param pvObj The identifier returned by SUPR0ObjRegister().
1819 * @param pSession The session which is referencing the object.
1820 *
1821 * @remarks The caller should not own any spinlocks and must carefully protect
1822 * itself against potential race with the destructor so freed memory
1823 * isn't accessed here.
1824 */
1825SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1826{
1827 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
1828}
1829
1830
1831/**
1832 * Increment the reference counter for the object associating the reference
1833 * with the specified session.
1834 *
1835 * @returns IPRT status code.
1836 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
1837 * couldn't be allocated. (If you see this you're not doing the right
1838 * thing and it won't ever work reliably.)
1839 *
1840 * @param pvObj The identifier returned by SUPR0ObjRegister().
1841 * @param pSession The session which is referencing the object.
1842 * @param fNoBlocking Set if it's not OK to block. Never try to make the
1843 * first reference to an object in a session with this
1844 * argument set.
1845 *
1846 * @remarks The caller should not own any spinlocks and must carefully protect
1847 * itself against potential race with the destructor so freed memory
1848 * isn't accessed here.
1849 */
1850SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
1851{
1852 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1853 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1854 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1855 int rc = VINF_SUCCESS;
1856 PSUPDRVUSAGE pUsagePre;
1857 PSUPDRVUSAGE pUsage;
1858
1859 /*
1860 * Validate the input.
1861 * Be ready for the destruction race (someone might be stuck in the
1862 * destructor waiting a lock we own).
1863 */
1864 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1865 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
1866 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
1867 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
1868 VERR_INVALID_PARAMETER);
1869
1870 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1871
1872 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1873 {
1874 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1875
1876 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1877 return VERR_WRONG_ORDER;
1878 }
1879
1880 /*
1881 * Preallocate the usage record if we can.
1882 */
1883 pUsagePre = pDevExt->pUsageFree;
1884 if (pUsagePre)
1885 pDevExt->pUsageFree = pUsagePre->pNext;
1886 else if (!fNoBlocking)
1887 {
1888 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1889 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1890 if (!pUsagePre)
1891 return VERR_NO_MEMORY;
1892
1893 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1894 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1895 {
1896 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1897
1898 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1899 return VERR_WRONG_ORDER;
1900 }
1901 }
1902
1903 /*
1904 * Reference the object.
1905 */
1906 pObj->cUsage++;
1907
1908 /*
1909 * Look for the session record.
1910 */
1911 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1912 {
1913 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1914 if (pUsage->pObj == pObj)
1915 break;
1916 }
1917 if (pUsage)
1918 pUsage->cUsage++;
1919 else if (pUsagePre)
1920 {
1921 /* create a new session record. */
1922 pUsagePre->cUsage = 1;
1923 pUsagePre->pObj = pObj;
1924 pUsagePre->pNext = pSession->pUsage;
1925 pSession->pUsage = pUsagePre;
1926 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
1927
1928 pUsagePre = NULL;
1929 }
1930 else
1931 {
1932 pObj->cUsage--;
1933 rc = VERR_TRY_AGAIN;
1934 }
1935
1936 /*
1937 * Put any unused usage record into the free list..
1938 */
1939 if (pUsagePre)
1940 {
1941 pUsagePre->pNext = pDevExt->pUsageFree;
1942 pDevExt->pUsageFree = pUsagePre;
1943 }
1944
1945 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1946
1947 return rc;
1948}
1949
1950
1951/**
1952 * Decrement / destroy a reference counter record for an object.
1953 *
1954 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1955 *
1956 * @returns IPRT status code.
1957 * @retval VINF_SUCCESS if not destroyed.
1958 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
1959 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
1960 * string builds.
1961 *
1962 * @param pvObj The identifier returned by SUPR0ObjRegister().
1963 * @param pSession The session which is referencing the object.
1964 */
1965SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1966{
1967 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1968 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1969 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1970 int rc = VERR_INVALID_PARAMETER;
1971 PSUPDRVUSAGE pUsage;
1972 PSUPDRVUSAGE pUsagePrev;
1973
1974 /*
1975 * Validate the input.
1976 */
1977 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1978 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1979 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1980 VERR_INVALID_PARAMETER);
1981
1982 /*
1983 * Acquire the spinlock and look for the usage record.
1984 */
1985 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1986
1987 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1988 pUsage;
1989 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1990 {
1991 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1992 if (pUsage->pObj == pObj)
1993 {
1994 rc = VINF_SUCCESS;
1995 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1996 if (pUsage->cUsage > 1)
1997 {
1998 pObj->cUsage--;
1999 pUsage->cUsage--;
2000 }
2001 else
2002 {
2003 /*
2004 * Free the session record.
2005 */
2006 if (pUsagePrev)
2007 pUsagePrev->pNext = pUsage->pNext;
2008 else
2009 pSession->pUsage = pUsage->pNext;
2010 pUsage->pNext = pDevExt->pUsageFree;
2011 pDevExt->pUsageFree = pUsage;
2012
2013 /* What about the object? */
2014 if (pObj->cUsage > 1)
2015 pObj->cUsage--;
2016 else
2017 {
2018 /*
2019 * Object is to be destroyed, unlink it.
2020 */
2021 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
2022 rc = VINF_OBJECT_DESTROYED;
2023 if (pDevExt->pObjs == pObj)
2024 pDevExt->pObjs = pObj->pNext;
2025 else
2026 {
2027 PSUPDRVOBJ pObjPrev;
2028 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
2029 if (pObjPrev->pNext == pObj)
2030 {
2031 pObjPrev->pNext = pObj->pNext;
2032 break;
2033 }
2034 Assert(pObjPrev);
2035 }
2036 }
2037 }
2038 break;
2039 }
2040 }
2041
2042 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2043
2044 /*
2045 * Call the destructor and free the object if required.
2046 */
2047 if (rc == VINF_OBJECT_DESTROYED)
2048 {
2049 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2050 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2051 if (pObj->pfnDestructor)
2052#ifdef RT_WITH_W64_UNWIND_HACK
2053 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
2054#else
2055 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2056#endif
2057 RTMemFree(pObj);
2058 }
2059
2060 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2061 return rc;
2062}
2063
2064
2065/**
2066 * Verifies that the current process can access the specified object.
2067 *
2068 * @returns The following IPRT status code:
2069 * @retval VINF_SUCCESS if access was granted.
2070 * @retval VERR_PERMISSION_DENIED if denied access.
2071 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2072 *
2073 * @param pvObj The identifier returned by SUPR0ObjRegister().
2074 * @param pSession The session which wishes to access the object.
2075 * @param pszObjName Object string name. This is optional and depends on the object type.
2076 *
2077 * @remark The caller is responsible for making sure the object isn't removed while
2078 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2079 */
2080SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2081{
2082 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2083 int rc;
2084
2085 /*
2086 * Validate the input.
2087 */
2088 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2089 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2090 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2091 VERR_INVALID_PARAMETER);
2092
2093 /*
2094 * Check access. (returns true if a decision has been made.)
2095 */
2096 rc = VERR_INTERNAL_ERROR;
2097 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2098 return rc;
2099
2100 /*
2101 * Default policy is to allow the user to access his own
2102 * stuff but nothing else.
2103 */
2104 if (pObj->CreatorUid == pSession->Uid)
2105 return VINF_SUCCESS;
2106 return VERR_PERMISSION_DENIED;
2107}
2108
2109
2110/**
2111 * Lock pages.
2112 *
2113 * @returns IPRT status code.
2114 * @param pSession Session to which the locked memory should be associated.
2115 * @param pvR3 Start of the memory range to lock.
2116 * This must be page aligned.
2117 * @param cPages Number of pages to lock.
2118 * @param paPages Where to put the physical addresses of locked memory.
2119 */
2120SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2121{
2122 int rc;
2123 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2124 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2125 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2126
2127 /*
2128 * Verify input.
2129 */
2130 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2131 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2132 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2133 || !pvR3)
2134 {
2135 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2136 return VERR_INVALID_PARAMETER;
2137 }
2138
2139#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
2140 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
2141 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
2142 if (RT_SUCCESS(rc))
2143 return rc;
2144#endif
2145
2146 /*
2147 * Let IPRT do the job.
2148 */
2149 Mem.eType = MEMREF_TYPE_LOCKED;
2150 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
2151 if (RT_SUCCESS(rc))
2152 {
2153 uint32_t iPage = cPages;
2154 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2155 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2156
2157 while (iPage-- > 0)
2158 {
2159 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2160 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2161 {
2162 AssertMsgFailed(("iPage=%d\n", iPage));
2163 rc = VERR_INTERNAL_ERROR;
2164 break;
2165 }
2166 }
2167 if (RT_SUCCESS(rc))
2168 rc = supdrvMemAdd(&Mem, pSession);
2169 if (RT_FAILURE(rc))
2170 {
2171 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2172 AssertRC(rc2);
2173 }
2174 }
2175
2176 return rc;
2177}
2178
2179
2180/**
2181 * Unlocks the memory pointed to by pv.
2182 *
2183 * @returns IPRT status code.
2184 * @param pSession Session to which the memory was locked.
2185 * @param pvR3 Memory to unlock.
2186 */
2187SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2188{
2189 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2190 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2191#ifdef RT_OS_WINDOWS
2192 /*
2193 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
2194 * allocations; ignore this call.
2195 */
2196 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
2197 {
2198 LogFlow(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
2199 return VINF_SUCCESS;
2200 }
2201#endif
2202 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2203}
2204
2205
2206/**
2207 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2208 * backing.
2209 *
2210 * @returns IPRT status code.
2211 * @param pSession Session data.
2212 * @param cPages Number of pages to allocate.
2213 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2214 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2215 * @param pHCPhys Where to put the physical address of allocated memory.
2216 */
2217SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2218{
2219 int rc;
2220 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2221 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2222
2223 /*
2224 * Validate input.
2225 */
2226 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2227 if (!ppvR3 || !ppvR0 || !pHCPhys)
2228 {
2229 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2230 pSession, ppvR0, ppvR3, pHCPhys));
2231 return VERR_INVALID_PARAMETER;
2232
2233 }
2234 if (cPages < 1 || cPages >= 256)
2235 {
2236 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2237 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2238 }
2239
2240 /*
2241 * Let IPRT do the job.
2242 */
2243 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2244 if (RT_SUCCESS(rc))
2245 {
2246 int rc2;
2247 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2248 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2249 if (RT_SUCCESS(rc))
2250 {
2251 Mem.eType = MEMREF_TYPE_CONT;
2252 rc = supdrvMemAdd(&Mem, pSession);
2253 if (!rc)
2254 {
2255 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2256 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2257 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2258 return 0;
2259 }
2260
2261 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2262 AssertRC(rc2);
2263 }
2264 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2265 AssertRC(rc2);
2266 }
2267
2268 return rc;
2269}
2270
2271
2272/**
2273 * Frees memory allocated using SUPR0ContAlloc().
2274 *
2275 * @returns IPRT status code.
2276 * @param pSession The session to which the memory was allocated.
2277 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2278 */
2279SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2280{
2281 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2282 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2283 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2284}
2285
2286
2287/**
2288 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2289 *
2290 * The memory isn't zeroed.
2291 *
2292 * @returns IPRT status code.
2293 * @param pSession Session data.
2294 * @param cPages Number of pages to allocate.
2295 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2296 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2297 * @param paPages Where to put the physical addresses of allocated memory.
2298 */
2299SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2300{
2301 unsigned iPage;
2302 int rc;
2303 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2304 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2305
2306 /*
2307 * Validate input.
2308 */
2309 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2310 if (!ppvR3 || !ppvR0 || !paPages)
2311 {
2312 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2313 pSession, ppvR3, ppvR0, paPages));
2314 return VERR_INVALID_PARAMETER;
2315
2316 }
2317 if (cPages < 1 || cPages >= 256)
2318 {
2319 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2320 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2321 }
2322
2323 /*
2324 * Let IPRT do the work.
2325 */
2326 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2327 if (RT_SUCCESS(rc))
2328 {
2329 int rc2;
2330 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2331 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2332 if (RT_SUCCESS(rc))
2333 {
2334 Mem.eType = MEMREF_TYPE_LOW;
2335 rc = supdrvMemAdd(&Mem, pSession);
2336 if (!rc)
2337 {
2338 for (iPage = 0; iPage < cPages; iPage++)
2339 {
2340 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2341 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2342 }
2343 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2344 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2345 return 0;
2346 }
2347
2348 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2349 AssertRC(rc2);
2350 }
2351
2352 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2353 AssertRC(rc2);
2354 }
2355
2356 return rc;
2357}
2358
2359
2360/**
2361 * Frees memory allocated using SUPR0LowAlloc().
2362 *
2363 * @returns IPRT status code.
2364 * @param pSession The session to which the memory was allocated.
2365 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2366 */
2367SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2368{
2369 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2370 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2371 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2372}
2373
2374
2375
2376/**
2377 * Allocates a chunk of memory with both R0 and R3 mappings.
2378 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2379 *
2380 * @returns IPRT status code.
2381 * @param pSession The session to associated the allocation with.
2382 * @param cb Number of bytes to allocate.
2383 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2384 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2385 */
2386SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2387{
2388 int rc;
2389 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2390 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2391
2392 /*
2393 * Validate input.
2394 */
2395 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2396 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2397 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2398 if (cb < 1 || cb >= _4M)
2399 {
2400 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2401 return VERR_INVALID_PARAMETER;
2402 }
2403
2404 /*
2405 * Let IPRT do the work.
2406 */
2407 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2408 if (RT_SUCCESS(rc))
2409 {
2410 int rc2;
2411 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2412 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2413 if (RT_SUCCESS(rc))
2414 {
2415 Mem.eType = MEMREF_TYPE_MEM;
2416 rc = supdrvMemAdd(&Mem, pSession);
2417 if (!rc)
2418 {
2419 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2420 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2421 return VINF_SUCCESS;
2422 }
2423
2424 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2425 AssertRC(rc2);
2426 }
2427
2428 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2429 AssertRC(rc2);
2430 }
2431
2432 return rc;
2433}
2434
2435
2436/**
2437 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2438 *
2439 * @returns IPRT status code.
2440 * @param pSession The session to which the memory was allocated.
2441 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2442 * @param paPages Where to store the physical addresses.
2443 */
2444SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2445{
2446 PSUPDRVBUNDLE pBundle;
2447 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2448 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2449
2450 /*
2451 * Validate input.
2452 */
2453 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2454 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2455 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2456
2457 /*
2458 * Search for the address.
2459 */
2460 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2461 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2462 {
2463 if (pBundle->cUsed > 0)
2464 {
2465 unsigned i;
2466 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2467 {
2468 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2469 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2470 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2471 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2472 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2473 )
2474 )
2475 {
2476 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2477 size_t iPage;
2478 for (iPage = 0; iPage < cPages; iPage++)
2479 {
2480 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2481 paPages[iPage].uReserved = 0;
2482 }
2483 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2484 return VINF_SUCCESS;
2485 }
2486 }
2487 }
2488 }
2489 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2490 Log(("Failed to find %p!!!\n", (void *)uPtr));
2491 return VERR_INVALID_PARAMETER;
2492}
2493
2494
2495/**
2496 * Free memory allocated by SUPR0MemAlloc().
2497 *
2498 * @returns IPRT status code.
2499 * @param pSession The session owning the allocation.
2500 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2501 */
2502SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2503{
2504 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2505 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2506 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2507}
2508
2509
2510/**
2511 * Allocates a chunk of memory with only a R3 mappings.
2512 *
2513 * The memory is fixed and it's possible to query the physical addresses using
2514 * SUPR0MemGetPhys().
2515 *
2516 * @returns IPRT status code.
2517 * @param pSession The session to associated the allocation with.
2518 * @param cPages The number of pages to allocate.
2519 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2520 * @param paPages Where to store the addresses of the pages. Optional.
2521 */
2522SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2523{
2524 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2525 return SUPR0PageAllocEx(pSession, cPages, 0 /*fFlags*/, ppvR3, NULL, paPages);
2526}
2527
2528
2529/**
2530 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2531 *
2532 * The memory is fixed and it's possible to query the physical addresses using
2533 * SUPR0MemGetPhys().
2534 *
2535 * @returns IPRT status code.
2536 * @param pSession The session to associated the allocation with.
2537 * @param cPages The number of pages to allocate.
2538 * @param fFlags Flags, reserved for the future. Must be zero.
2539 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2540 * NULL if no ring-3 mapping.
2541 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2542 * NULL if no ring-0 mapping.
2543 * @param paPages Where to store the addresses of the pages. Optional.
2544 */
2545SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2546{
2547 int rc;
2548 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2549 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2550
2551 /*
2552 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2553 */
2554 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2555 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2556 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2557 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2558 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2559 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2560 {
2561 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2562 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2563 }
2564
2565 /*
2566 * Let IPRT do the work.
2567 */
2568 if (ppvR0)
2569 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2570 else
2571 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2572 if (RT_SUCCESS(rc))
2573 {
2574 int rc2;
2575 if (ppvR3)
2576 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2577 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2578 else
2579 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2580 if (RT_SUCCESS(rc))
2581 {
2582 Mem.eType = MEMREF_TYPE_PAGE;
2583 rc = supdrvMemAdd(&Mem, pSession);
2584 if (!rc)
2585 {
2586 if (ppvR3)
2587 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2588 if (ppvR0)
2589 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2590 if (paPages)
2591 {
2592 uint32_t iPage = cPages;
2593 while (iPage-- > 0)
2594 {
2595 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2596 Assert(paPages[iPage] != NIL_RTHCPHYS);
2597 }
2598 }
2599 return VINF_SUCCESS;
2600 }
2601
2602 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2603 AssertRC(rc2);
2604 }
2605
2606 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2607 AssertRC(rc2);
2608 }
2609 return rc;
2610}
2611
2612
2613/**
2614 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2615 *
2616 * The memory is fixed and it's possible to query the physical addresses using
2617 * SUPR0MemGetPhys().
2618 *
2619 * @returns IPRT status code.
2620 * @param pSession The session to associated the allocation with.
2621 * @param cPages The number of pages to allocate.
2622 * @param fFlags Flags, reserved for the future. Must be zero.
2623 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2624 * NULL if no ring-3 mapping.
2625 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2626 * NULL if no ring-0 mapping.
2627 * @param paPages Where to store the addresses of the pages. Optional.
2628 */
2629SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2630 uint32_t fFlags, PRTR0PTR ppvR0)
2631{
2632 int rc;
2633 PSUPDRVBUNDLE pBundle;
2634 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2635 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2636 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2637
2638 /*
2639 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2640 */
2641 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2642 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2643 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2644 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2645 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2646 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2647
2648 /*
2649 * Find the memory object.
2650 */
2651 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2652 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2653 {
2654 if (pBundle->cUsed > 0)
2655 {
2656 unsigned i;
2657 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2658 {
2659 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2660 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2661 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2662 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2663 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2664 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2665 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2666 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2667 {
2668 hMemObj = pBundle->aMem[i].MemObj;
2669 break;
2670 }
2671 }
2672 }
2673 }
2674 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2675
2676 rc = VERR_INVALID_PARAMETER;
2677 if (hMemObj != NIL_RTR0MEMOBJ)
2678 {
2679 /*
2680 * Do some furter input validations before calling IPRT.
2681 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2682 */
2683 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2684 if ( offSub < cbMemObj
2685 && cbSub <= cbMemObj
2686 && offSub + cbSub <= cbMemObj)
2687 {
2688 RTR0MEMOBJ hMapObj;
2689 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2690 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2691 if (RT_SUCCESS(rc))
2692 *ppvR0 = RTR0MemObjAddress(hMapObj);
2693 }
2694 else
2695 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2696
2697 }
2698 return rc;
2699}
2700
2701
2702
2703#ifdef RT_OS_WINDOWS
2704/**
2705 * Check if the pages were locked by SUPR0PageAlloc
2706 *
2707 * This function will be removed along with the lock/unlock hacks when
2708 * we've cleaned up the ring-3 code properly.
2709 *
2710 * @returns boolean
2711 * @param pSession The session to which the memory was allocated.
2712 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2713 */
2714static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2715{
2716 PSUPDRVBUNDLE pBundle;
2717 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2718 LogFlow(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2719
2720 /*
2721 * Search for the address.
2722 */
2723 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2724 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2725 {
2726 if (pBundle->cUsed > 0)
2727 {
2728 unsigned i;
2729 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2730 {
2731 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2732 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2733 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2734 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2735 {
2736 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2737 return true;
2738 }
2739 }
2740 }
2741 }
2742 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2743 return false;
2744}
2745
2746
2747/**
2748 * Get the physical addresses of memory allocated using SUPR0PageAllocEx().
2749 *
2750 * This function will be removed along with the lock/unlock hacks when
2751 * we've cleaned up the ring-3 code properly.
2752 *
2753 * @returns IPRT status code.
2754 * @param pSession The session to which the memory was allocated.
2755 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2756 * @param cPages Number of pages in paPages
2757 * @param paPages Where to store the physical addresses.
2758 */
2759static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2760{
2761 PSUPDRVBUNDLE pBundle;
2762 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2763 LogFlow(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
2764
2765 /*
2766 * Search for the address.
2767 */
2768 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2769 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2770 {
2771 if (pBundle->cUsed > 0)
2772 {
2773 unsigned i;
2774 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2775 {
2776 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2777 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2778 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2779 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2780 {
2781 uint32_t iPage;
2782 size_t cMaxPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2783 cPages = (uint32_t)RT_MIN(cMaxPages, cPages);
2784 for (iPage = 0; iPage < cPages; iPage++)
2785 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2786 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2787 return VINF_SUCCESS;
2788 }
2789 }
2790 }
2791 }
2792 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2793 return VERR_INVALID_PARAMETER;
2794}
2795#endif /* RT_OS_WINDOWS */
2796
2797
2798/**
2799 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2800 *
2801 * @returns IPRT status code.
2802 * @param pSession The session owning the allocation.
2803 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2804 * SUPR0PageAllocEx().
2805 */
2806SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2807{
2808 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2809 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2810 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2811}
2812
2813
2814/**
2815 * Maps the GIP into userspace and/or get the physical address of the GIP.
2816 *
2817 * @returns IPRT status code.
2818 * @param pSession Session to which the GIP mapping should belong.
2819 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2820 * @param pHCPhysGip Where to store the physical address. (optional)
2821 *
2822 * @remark There is no reference counting on the mapping, so one call to this function
2823 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2824 * and remove the session as a GIP user.
2825 */
2826SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2827{
2828 int rc = VINF_SUCCESS;
2829 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2830 RTR3PTR pGip = NIL_RTR3PTR;
2831 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2832 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2833
2834 /*
2835 * Validate
2836 */
2837 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2838 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2839 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2840
2841 RTSemFastMutexRequest(pDevExt->mtxGip);
2842 if (pDevExt->pGip)
2843 {
2844 /*
2845 * Map it?
2846 */
2847 if (ppGipR3)
2848 {
2849 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2850 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2851 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2852 if (RT_SUCCESS(rc))
2853 {
2854 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2855 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2856 }
2857 }
2858
2859 /*
2860 * Get physical address.
2861 */
2862 if (pHCPhysGip && !rc)
2863 HCPhys = pDevExt->HCPhysGip;
2864
2865 /*
2866 * Reference globally.
2867 */
2868 if (!pSession->fGipReferenced && !rc)
2869 {
2870 pSession->fGipReferenced = 1;
2871 pDevExt->cGipUsers++;
2872 if (pDevExt->cGipUsers == 1)
2873 {
2874 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2875 unsigned i;
2876
2877 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2878
2879 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2880 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2881 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2882
2883 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2884 AssertRC(rc); rc = VINF_SUCCESS;
2885 }
2886 }
2887 }
2888 else
2889 {
2890 rc = SUPDRV_ERR_GENERAL_FAILURE;
2891 Log(("SUPR0GipMap: GIP is not available!\n"));
2892 }
2893 RTSemFastMutexRelease(pDevExt->mtxGip);
2894
2895 /*
2896 * Write returns.
2897 */
2898 if (pHCPhysGip)
2899 *pHCPhysGip = HCPhys;
2900 if (ppGipR3)
2901 *ppGipR3 = pGip;
2902
2903#ifdef DEBUG_DARWIN_GIP
2904 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
2905#else
2906 LogFlow(( "SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
2907#endif
2908 return rc;
2909}
2910
2911
2912/**
2913 * Unmaps any user mapping of the GIP and terminates all GIP access
2914 * from this session.
2915 *
2916 * @returns IPRT status code.
2917 * @param pSession Session to which the GIP mapping should belong.
2918 */
2919SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2920{
2921 int rc = VINF_SUCCESS;
2922 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2923#ifdef DEBUG_DARWIN_GIP
2924 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2925 pSession,
2926 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2927 pSession->GipMapObjR3));
2928#else
2929 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
2930#endif
2931 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2932
2933 RTSemFastMutexRequest(pDevExt->mtxGip);
2934
2935 /*
2936 * Unmap anything?
2937 */
2938 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2939 {
2940 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2941 AssertRC(rc);
2942 if (RT_SUCCESS(rc))
2943 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2944 }
2945
2946 /*
2947 * Dereference global GIP.
2948 */
2949 if (pSession->fGipReferenced && !rc)
2950 {
2951 pSession->fGipReferenced = 0;
2952 if ( pDevExt->cGipUsers > 0
2953 && !--pDevExt->cGipUsers)
2954 {
2955 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
2956 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = VINF_SUCCESS;
2957 }
2958 }
2959
2960 RTSemFastMutexRelease(pDevExt->mtxGip);
2961
2962 return rc;
2963}
2964
2965
2966/**
2967 * Register a component factory with the support driver.
2968 *
2969 * This is currently restricted to kernel sessions only.
2970 *
2971 * @returns VBox status code.
2972 * @retval VINF_SUCCESS on success.
2973 * @retval VERR_NO_MEMORY if we're out of memory.
2974 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
2975 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2976 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2977 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2978 *
2979 * @param pSession The SUPDRV session (must be a ring-0 session).
2980 * @param pFactory Pointer to the component factory registration structure.
2981 *
2982 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
2983 */
2984SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2985{
2986 PSUPDRVFACTORYREG pNewReg;
2987 const char *psz;
2988 int rc;
2989
2990 /*
2991 * Validate parameters.
2992 */
2993 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2994 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2995 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2996 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
2997 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
2998 AssertReturn(psz, VERR_INVALID_PARAMETER);
2999
3000 /*
3001 * Allocate and initialize a new registration structure.
3002 */
3003 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
3004 if (pNewReg)
3005 {
3006 pNewReg->pNext = NULL;
3007 pNewReg->pFactory = pFactory;
3008 pNewReg->pSession = pSession;
3009 pNewReg->cchName = psz - &pFactory->szName[0];
3010
3011 /*
3012 * Add it to the tail of the list after checking for prior registration.
3013 */
3014 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3015 if (RT_SUCCESS(rc))
3016 {
3017 PSUPDRVFACTORYREG pPrev = NULL;
3018 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3019 while (pCur && pCur->pFactory != pFactory)
3020 {
3021 pPrev = pCur;
3022 pCur = pCur->pNext;
3023 }
3024 if (!pCur)
3025 {
3026 if (pPrev)
3027 pPrev->pNext = pNewReg;
3028 else
3029 pSession->pDevExt->pComponentFactoryHead = pNewReg;
3030 rc = VINF_SUCCESS;
3031 }
3032 else
3033 rc = VERR_ALREADY_EXISTS;
3034
3035 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3036 }
3037
3038 if (RT_FAILURE(rc))
3039 RTMemFree(pNewReg);
3040 }
3041 else
3042 rc = VERR_NO_MEMORY;
3043 return rc;
3044}
3045
3046
3047/**
3048 * Deregister a component factory.
3049 *
3050 * @returns VBox status code.
3051 * @retval VINF_SUCCESS on success.
3052 * @retval VERR_NOT_FOUND if the factory wasn't registered.
3053 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3054 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3055 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3056 *
3057 * @param pSession The SUPDRV session (must be a ring-0 session).
3058 * @param pFactory Pointer to the component factory registration structure
3059 * previously passed SUPR0ComponentRegisterFactory().
3060 *
3061 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
3062 */
3063SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3064{
3065 int rc;
3066
3067 /*
3068 * Validate parameters.
3069 */
3070 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3071 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3072 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3073
3074 /*
3075 * Take the lock and look for the registration record.
3076 */
3077 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3078 if (RT_SUCCESS(rc))
3079 {
3080 PSUPDRVFACTORYREG pPrev = NULL;
3081 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3082 while (pCur && pCur->pFactory != pFactory)
3083 {
3084 pPrev = pCur;
3085 pCur = pCur->pNext;
3086 }
3087 if (pCur)
3088 {
3089 if (!pPrev)
3090 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3091 else
3092 pPrev->pNext = pCur->pNext;
3093
3094 pCur->pNext = NULL;
3095 pCur->pFactory = NULL;
3096 pCur->pSession = NULL;
3097 rc = VINF_SUCCESS;
3098 }
3099 else
3100 rc = VERR_NOT_FOUND;
3101
3102 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3103
3104 RTMemFree(pCur);
3105 }
3106 return rc;
3107}
3108
3109
3110/**
3111 * Queries a component factory.
3112 *
3113 * @returns VBox status code.
3114 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3115 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3116 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3117 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3118 *
3119 * @param pSession The SUPDRV session.
3120 * @param pszName The name of the component factory.
3121 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3122 * @param ppvFactoryIf Where to store the factory interface.
3123 */
3124SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3125{
3126 const char *pszEnd;
3127 size_t cchName;
3128 int rc;
3129
3130 /*
3131 * Validate parameters.
3132 */
3133 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3134
3135 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3136 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3137 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3138 cchName = pszEnd - pszName;
3139
3140 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3141 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3142 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3143
3144 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3145 *ppvFactoryIf = NULL;
3146
3147 /*
3148 * Take the lock and try all factories by this name.
3149 */
3150 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3151 if (RT_SUCCESS(rc))
3152 {
3153 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3154 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3155 while (pCur)
3156 {
3157 if ( pCur->cchName == cchName
3158 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3159 {
3160#ifdef RT_WITH_W64_UNWIND_HACK
3161 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3162#else
3163 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3164#endif
3165 if (pvFactory)
3166 {
3167 *ppvFactoryIf = pvFactory;
3168 rc = VINF_SUCCESS;
3169 break;
3170 }
3171 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3172 }
3173
3174 /* next */
3175 pCur = pCur->pNext;
3176 }
3177
3178 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3179 }
3180 return rc;
3181}
3182
3183
3184/**
3185 * Adds a memory object to the session.
3186 *
3187 * @returns IPRT status code.
3188 * @param pMem Memory tracking structure containing the
3189 * information to track.
3190 * @param pSession The session.
3191 */
3192static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3193{
3194 PSUPDRVBUNDLE pBundle;
3195 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3196
3197 /*
3198 * Find free entry and record the allocation.
3199 */
3200 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3201 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3202 {
3203 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3204 {
3205 unsigned i;
3206 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3207 {
3208 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3209 {
3210 pBundle->cUsed++;
3211 pBundle->aMem[i] = *pMem;
3212 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3213 return VINF_SUCCESS;
3214 }
3215 }
3216 AssertFailed(); /* !!this can't be happening!!! */
3217 }
3218 }
3219 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3220
3221 /*
3222 * Need to allocate a new bundle.
3223 * Insert into the last entry in the bundle.
3224 */
3225 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3226 if (!pBundle)
3227 return VERR_NO_MEMORY;
3228
3229 /* take last entry. */
3230 pBundle->cUsed++;
3231 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3232
3233 /* insert into list. */
3234 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3235 pBundle->pNext = pSession->Bundle.pNext;
3236 pSession->Bundle.pNext = pBundle;
3237 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3238
3239 return VINF_SUCCESS;
3240}
3241
3242
3243/**
3244 * Releases a memory object referenced by pointer and type.
3245 *
3246 * @returns IPRT status code.
3247 * @param pSession Session data.
3248 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3249 * @param eType Memory type.
3250 */
3251static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3252{
3253 PSUPDRVBUNDLE pBundle;
3254 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3255
3256 /*
3257 * Validate input.
3258 */
3259 if (!uPtr)
3260 {
3261 Log(("Illegal address %p\n", (void *)uPtr));
3262 return VERR_INVALID_PARAMETER;
3263 }
3264
3265 /*
3266 * Search for the address.
3267 */
3268 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3269 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3270 {
3271 if (pBundle->cUsed > 0)
3272 {
3273 unsigned i;
3274 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3275 {
3276 if ( pBundle->aMem[i].eType == eType
3277 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3278 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3279 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3280 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3281 )
3282 {
3283 /* Make a copy of it and release it outside the spinlock. */
3284 SUPDRVMEMREF Mem = pBundle->aMem[i];
3285 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3286 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3287 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3288 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3289
3290 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3291 {
3292 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3293 AssertRC(rc); /** @todo figure out how to handle this. */
3294 }
3295 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3296 {
3297 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3298 AssertRC(rc); /** @todo figure out how to handle this. */
3299 }
3300 return VINF_SUCCESS;
3301 }
3302 }
3303 }
3304 }
3305 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3306 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3307 return VERR_INVALID_PARAMETER;
3308}
3309
3310
3311/**
3312 * Opens an image. If it's the first time it's opened the call must upload
3313 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3314 *
3315 * This is the 1st step of the loading.
3316 *
3317 * @returns IPRT status code.
3318 * @param pDevExt Device globals.
3319 * @param pSession Session data.
3320 * @param pReq The open request.
3321 */
3322static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3323{
3324 PSUPDRVLDRIMAGE pImage;
3325 unsigned cb;
3326 void *pv;
3327 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
3328 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3329
3330 /*
3331 * Check if we got an instance of the image already.
3332 */
3333 RTSemFastMutexRequest(pDevExt->mtxLdr);
3334 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3335 {
3336 if ( pImage->szName[cchName] == '\0'
3337 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
3338 {
3339 pImage->cUsage++;
3340 pReq->u.Out.pvImageBase = pImage->pvImage;
3341 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3342 supdrvLdrAddUsage(pSession, pImage);
3343 RTSemFastMutexRelease(pDevExt->mtxLdr);
3344 return VINF_SUCCESS;
3345 }
3346 }
3347 /* (not found - add it!) */
3348
3349 /*
3350 * Allocate memory.
3351 */
3352 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3353 pv = RTMemExecAlloc(cb);
3354 if (!pv)
3355 {
3356 RTSemFastMutexRelease(pDevExt->mtxLdr);
3357 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3358 return VERR_NO_MEMORY;
3359 }
3360
3361 /*
3362 * Setup and link in the LDR stuff.
3363 */
3364 pImage = (PSUPDRVLDRIMAGE)pv;
3365 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
3366 pImage->cbImage = pReq->u.In.cbImage;
3367 pImage->pfnModuleInit = NULL;
3368 pImage->pfnModuleTerm = NULL;
3369 pImage->pfnServiceReqHandler = NULL;
3370 pImage->uState = SUP_IOCTL_LDR_OPEN;
3371 pImage->cUsage = 1;
3372 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
3373
3374 pImage->pNext = pDevExt->pLdrImages;
3375 pDevExt->pLdrImages = pImage;
3376
3377 supdrvLdrAddUsage(pSession, pImage);
3378
3379 pReq->u.Out.pvImageBase = pImage->pvImage;
3380 pReq->u.Out.fNeedsLoading = true;
3381 RTSemFastMutexRelease(pDevExt->mtxLdr);
3382
3383#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3384 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3385#endif
3386 return VINF_SUCCESS;
3387}
3388
3389
3390/**
3391 * Loads the image bits.
3392 *
3393 * This is the 2nd step of the loading.
3394 *
3395 * @returns IPRT status code.
3396 * @param pDevExt Device globals.
3397 * @param pSession Session data.
3398 * @param pReq The request.
3399 */
3400static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3401{
3402 PSUPDRVLDRUSAGE pUsage;
3403 PSUPDRVLDRIMAGE pImage;
3404 int rc;
3405 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3406
3407 /*
3408 * Find the ldr image.
3409 */
3410 RTSemFastMutexRequest(pDevExt->mtxLdr);
3411 pUsage = pSession->pLdrUsage;
3412 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3413 pUsage = pUsage->pNext;
3414 if (!pUsage)
3415 {
3416 RTSemFastMutexRelease(pDevExt->mtxLdr);
3417 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3418 return VERR_INVALID_HANDLE;
3419 }
3420 pImage = pUsage->pImage;
3421 if (pImage->cbImage != pReq->u.In.cbImage)
3422 {
3423 RTSemFastMutexRelease(pDevExt->mtxLdr);
3424 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3425 return VERR_INVALID_HANDLE;
3426 }
3427 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3428 {
3429 unsigned uState = pImage->uState;
3430 RTSemFastMutexRelease(pDevExt->mtxLdr);
3431 if (uState != SUP_IOCTL_LDR_LOAD)
3432 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3433 return SUPDRV_ERR_ALREADY_LOADED;
3434 }
3435 switch (pReq->u.In.eEPType)
3436 {
3437 case SUPLDRLOADEP_NOTHING:
3438 break;
3439
3440 case SUPLDRLOADEP_VMMR0:
3441 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
3442 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
3443 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
3444 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
3445 {
3446 RTSemFastMutexRelease(pDevExt->mtxLdr);
3447 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
3448 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3449 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3450 return VERR_INVALID_PARAMETER;
3451 }
3452 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
3453 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3454 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3455 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3456 {
3457 RTSemFastMutexRelease(pDevExt->mtxLdr);
3458 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3459 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3460 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3461 return VERR_INVALID_PARAMETER;
3462 }
3463 break;
3464
3465 case SUPLDRLOADEP_SERVICE:
3466 if (!pReq->u.In.EP.Service.pfnServiceReq)
3467 {
3468 RTSemFastMutexRelease(pDevExt->mtxLdr);
3469 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
3470 return VERR_INVALID_PARAMETER;
3471 }
3472 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3473 {
3474 RTSemFastMutexRelease(pDevExt->mtxLdr);
3475 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3476 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
3477 return VERR_INVALID_PARAMETER;
3478 }
3479 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3480 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3481 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3482 {
3483 RTSemFastMutexRelease(pDevExt->mtxLdr);
3484 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3485 pImage->pvImage, pReq->u.In.cbImage,
3486 pReq->u.In.EP.Service.apvReserved[0],
3487 pReq->u.In.EP.Service.apvReserved[1],
3488 pReq->u.In.EP.Service.apvReserved[2]));
3489 return VERR_INVALID_PARAMETER;
3490 }
3491 break;
3492
3493 default:
3494 RTSemFastMutexRelease(pDevExt->mtxLdr);
3495 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3496 return VERR_INVALID_PARAMETER;
3497 }
3498 if ( pReq->u.In.pfnModuleInit
3499 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3500 {
3501 RTSemFastMutexRelease(pDevExt->mtxLdr);
3502 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3503 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3504 return VERR_INVALID_PARAMETER;
3505 }
3506 if ( pReq->u.In.pfnModuleTerm
3507 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3508 {
3509 RTSemFastMutexRelease(pDevExt->mtxLdr);
3510 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3511 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3512 return VERR_INVALID_PARAMETER;
3513 }
3514
3515 /*
3516 * Copy the memory.
3517 */
3518 /* no need to do try/except as this is a buffered request. */
3519 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3520 pImage->uState = SUP_IOCTL_LDR_LOAD;
3521 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3522 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3523 pImage->offSymbols = pReq->u.In.offSymbols;
3524 pImage->cSymbols = pReq->u.In.cSymbols;
3525 pImage->offStrTab = pReq->u.In.offStrTab;
3526 pImage->cbStrTab = pReq->u.In.cbStrTab;
3527
3528 /*
3529 * Update any entry points.
3530 */
3531 switch (pReq->u.In.eEPType)
3532 {
3533 default:
3534 case SUPLDRLOADEP_NOTHING:
3535 rc = VINF_SUCCESS;
3536 break;
3537 case SUPLDRLOADEP_VMMR0:
3538 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3539 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3540 break;
3541 case SUPLDRLOADEP_SERVICE:
3542 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3543 rc = VINF_SUCCESS;
3544 break;
3545 }
3546
3547 /*
3548 * On success call the module initialization.
3549 */
3550 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3551 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3552 {
3553 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3554#ifdef RT_WITH_W64_UNWIND_HACK
3555 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
3556#else
3557 rc = pImage->pfnModuleInit();
3558#endif
3559 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3560 supdrvLdrUnsetVMMR0EPs(pDevExt);
3561 }
3562
3563 if (rc)
3564 pImage->uState = SUP_IOCTL_LDR_OPEN;
3565
3566 RTSemFastMutexRelease(pDevExt->mtxLdr);
3567 return rc;
3568}
3569
3570
3571/**
3572 * Frees a previously loaded (prep'ed) image.
3573 *
3574 * @returns IPRT status code.
3575 * @param pDevExt Device globals.
3576 * @param pSession Session data.
3577 * @param pReq The request.
3578 */
3579static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3580{
3581 int rc;
3582 PSUPDRVLDRUSAGE pUsagePrev;
3583 PSUPDRVLDRUSAGE pUsage;
3584 PSUPDRVLDRIMAGE pImage;
3585 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3586
3587 /*
3588 * Find the ldr image.
3589 */
3590 RTSemFastMutexRequest(pDevExt->mtxLdr);
3591 pUsagePrev = NULL;
3592 pUsage = pSession->pLdrUsage;
3593 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3594 {
3595 pUsagePrev = pUsage;
3596 pUsage = pUsage->pNext;
3597 }
3598 if (!pUsage)
3599 {
3600 RTSemFastMutexRelease(pDevExt->mtxLdr);
3601 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3602 return VERR_INVALID_HANDLE;
3603 }
3604
3605 /*
3606 * Check if we can remove anything.
3607 */
3608 rc = VINF_SUCCESS;
3609 pImage = pUsage->pImage;
3610 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3611 {
3612 /*
3613 * Check if there are any objects with destructors in the image, if
3614 * so leave it for the session cleanup routine so we get a chance to
3615 * clean things up in the right order and not leave them all dangling.
3616 */
3617 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3618 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3619 if (pImage->cUsage <= 1)
3620 {
3621 PSUPDRVOBJ pObj;
3622 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3623 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3624 {
3625 rc = VERR_DANGLING_OBJECTS;
3626 break;
3627 }
3628 }
3629 else
3630 {
3631 PSUPDRVUSAGE pGenUsage;
3632 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3633 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3634 {
3635 rc = VERR_DANGLING_OBJECTS;
3636 break;
3637 }
3638 }
3639 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3640 if (rc == VINF_SUCCESS)
3641 {
3642 /* unlink it */
3643 if (pUsagePrev)
3644 pUsagePrev->pNext = pUsage->pNext;
3645 else
3646 pSession->pLdrUsage = pUsage->pNext;
3647
3648 /* free it */
3649 pUsage->pImage = NULL;
3650 pUsage->pNext = NULL;
3651 RTMemFree(pUsage);
3652
3653 /*
3654 * Derefrence the image.
3655 */
3656 if (pImage->cUsage <= 1)
3657 supdrvLdrFree(pDevExt, pImage);
3658 else
3659 pImage->cUsage--;
3660 }
3661 else
3662 {
3663 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3664 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
3665 }
3666 }
3667 else
3668 {
3669 /*
3670 * Dereference both image and usage.
3671 */
3672 pImage->cUsage--;
3673 pUsage->cUsage--;
3674 }
3675
3676 RTSemFastMutexRelease(pDevExt->mtxLdr);
3677 return rc;
3678}
3679
3680
3681/**
3682 * Gets the address of a symbol in an open image.
3683 *
3684 * @returns 0 on success.
3685 * @returns SUPDRV_ERR_* on failure.
3686 * @param pDevExt Device globals.
3687 * @param pSession Session data.
3688 * @param pReq The request buffer.
3689 */
3690static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3691{
3692 PSUPDRVLDRIMAGE pImage;
3693 PSUPDRVLDRUSAGE pUsage;
3694 uint32_t i;
3695 PSUPLDRSYM paSyms;
3696 const char *pchStrings;
3697 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3698 void *pvSymbol = NULL;
3699 int rc = VERR_GENERAL_FAILURE;
3700 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3701
3702 /*
3703 * Find the ldr image.
3704 */
3705 RTSemFastMutexRequest(pDevExt->mtxLdr);
3706 pUsage = pSession->pLdrUsage;
3707 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3708 pUsage = pUsage->pNext;
3709 if (!pUsage)
3710 {
3711 RTSemFastMutexRelease(pDevExt->mtxLdr);
3712 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3713 return VERR_INVALID_HANDLE;
3714 }
3715 pImage = pUsage->pImage;
3716 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3717 {
3718 unsigned uState = pImage->uState;
3719 RTSemFastMutexRelease(pDevExt->mtxLdr);
3720 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3721 return VERR_ALREADY_LOADED;
3722 }
3723
3724 /*
3725 * Search the symbol strings.
3726 */
3727 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3728 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3729 for (i = 0; i < pImage->cSymbols; i++)
3730 {
3731 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3732 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3733 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3734 {
3735 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3736 rc = VINF_SUCCESS;
3737 break;
3738 }
3739 }
3740 RTSemFastMutexRelease(pDevExt->mtxLdr);
3741 pReq->u.Out.pvSymbol = pvSymbol;
3742 return rc;
3743}
3744
3745
3746/**
3747 * Gets the address of a symbol in an open image or the support driver.
3748 *
3749 * @returns VINF_SUCCESS on success.
3750 * @returns
3751 * @param pDevExt Device globals.
3752 * @param pSession Session data.
3753 * @param pReq The request buffer.
3754 */
3755static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3756{
3757 int rc = VINF_SUCCESS;
3758 const char *pszSymbol = pReq->u.In.pszSymbol;
3759 const char *pszModule = pReq->u.In.pszModule;
3760 size_t cbSymbol;
3761 char const *pszEnd;
3762 uint32_t i;
3763
3764 /*
3765 * Input validation.
3766 */
3767 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3768 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3769 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3770 cbSymbol = pszEnd - pszSymbol + 1;
3771
3772 if (pszModule)
3773 {
3774 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3775 pszEnd = (char *)memchr(pszModule, '\0', 64);
3776 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3777 }
3778 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3779
3780
3781 if ( !pszModule
3782 || !strcmp(pszModule, "SupDrv"))
3783 {
3784 /*
3785 * Search the support driver export table.
3786 */
3787 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3788 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3789 {
3790 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3791 break;
3792 }
3793 }
3794 else
3795 {
3796 /*
3797 * Find the loader image.
3798 */
3799 PSUPDRVLDRIMAGE pImage;
3800
3801 RTSemFastMutexRequest(pDevExt->mtxLdr);
3802
3803 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3804 if (!strcmp(pImage->szName, pszModule))
3805 break;
3806 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3807 {
3808 /*
3809 * Search the symbol strings.
3810 */
3811 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3812 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3813 for (i = 0; i < pImage->cSymbols; i++)
3814 {
3815 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3816 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3817 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3818 {
3819 /*
3820 * Found it! Calc the symbol address and add a reference to the module.
3821 */
3822 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3823 rc = supdrvLdrAddUsage(pSession, pImage);
3824 break;
3825 }
3826 }
3827 }
3828 else
3829 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3830
3831 RTSemFastMutexRelease(pDevExt->mtxLdr);
3832 }
3833 return rc;
3834}
3835
3836
3837/**
3838 * Updates the VMMR0 entry point pointers.
3839 *
3840 * @returns IPRT status code.
3841 * @param pDevExt Device globals.
3842 * @param pSession Session data.
3843 * @param pVMMR0 VMMR0 image handle.
3844 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3845 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3846 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3847 * @remark Caller must own the loader mutex.
3848 */
3849static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3850{
3851 int rc = VINF_SUCCESS;
3852 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3853
3854
3855 /*
3856 * Check if not yet set.
3857 */
3858 if (!pDevExt->pvVMMR0)
3859 {
3860 pDevExt->pvVMMR0 = pvVMMR0;
3861 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3862 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3863 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3864 }
3865 else
3866 {
3867 /*
3868 * Return failure or success depending on whether the values match or not.
3869 */
3870 if ( pDevExt->pvVMMR0 != pvVMMR0
3871 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3872 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3873 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3874 {
3875 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3876 rc = VERR_INVALID_PARAMETER;
3877 }
3878 }
3879 return rc;
3880}
3881
3882
3883/**
3884 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3885 *
3886 * @param pDevExt Device globals.
3887 */
3888static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3889{
3890 pDevExt->pvVMMR0 = NULL;
3891 pDevExt->pfnVMMR0EntryInt = NULL;
3892 pDevExt->pfnVMMR0EntryFast = NULL;
3893 pDevExt->pfnVMMR0EntryEx = NULL;
3894}
3895
3896
3897/**
3898 * Adds a usage reference in the specified session of an image.
3899 *
3900 * Called while owning the loader semaphore.
3901 *
3902 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
3903 * @param pSession Session in question.
3904 * @param pImage Image which the session is using.
3905 */
3906static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3907{
3908 PSUPDRVLDRUSAGE pUsage;
3909 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3910
3911 /*
3912 * Referenced it already?
3913 */
3914 pUsage = pSession->pLdrUsage;
3915 while (pUsage)
3916 {
3917 if (pUsage->pImage == pImage)
3918 {
3919 pUsage->cUsage++;
3920 return VINF_SUCCESS;
3921 }
3922 pUsage = pUsage->pNext;
3923 }
3924
3925 /*
3926 * Allocate new usage record.
3927 */
3928 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3929 AssertReturn(pUsage, VERR_NO_MEMORY);
3930 pUsage->cUsage = 1;
3931 pUsage->pImage = pImage;
3932 pUsage->pNext = pSession->pLdrUsage;
3933 pSession->pLdrUsage = pUsage;
3934 return VINF_SUCCESS;
3935}
3936
3937
3938/**
3939 * Frees a load image.
3940 *
3941 * @param pDevExt Pointer to device extension.
3942 * @param pImage Pointer to the image we're gonna free.
3943 * This image must exit!
3944 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3945 */
3946static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3947{
3948 PSUPDRVLDRIMAGE pImagePrev;
3949 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
3950
3951 /* find it - arg. should've used doubly linked list. */
3952 Assert(pDevExt->pLdrImages);
3953 pImagePrev = NULL;
3954 if (pDevExt->pLdrImages != pImage)
3955 {
3956 pImagePrev = pDevExt->pLdrImages;
3957 while (pImagePrev->pNext != pImage)
3958 pImagePrev = pImagePrev->pNext;
3959 Assert(pImagePrev->pNext == pImage);
3960 }
3961
3962 /* unlink */
3963 if (pImagePrev)
3964 pImagePrev->pNext = pImage->pNext;
3965 else
3966 pDevExt->pLdrImages = pImage->pNext;
3967
3968 /* check if this is VMMR0.r0 unset its entry point pointers. */
3969 if (pDevExt->pvVMMR0 == pImage->pvImage)
3970 supdrvLdrUnsetVMMR0EPs(pDevExt);
3971
3972 /* check for objects with destructors in this image. (Shouldn't happen.) */
3973 if (pDevExt->pObjs)
3974 {
3975 unsigned cObjs = 0;
3976 PSUPDRVOBJ pObj;
3977 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3978 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3979 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3980 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3981 {
3982 pObj->pfnDestructor = NULL;
3983 cObjs++;
3984 }
3985 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3986 if (cObjs)
3987 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
3988 }
3989
3990 /* call termination function if fully loaded. */
3991 if ( pImage->pfnModuleTerm
3992 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3993 {
3994 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3995#ifdef RT_WITH_W64_UNWIND_HACK
3996 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
3997#else
3998 pImage->pfnModuleTerm();
3999#endif
4000 }
4001
4002 /* free the image */
4003 pImage->cUsage = 0;
4004 pImage->pNext = 0;
4005 pImage->uState = SUP_IOCTL_LDR_FREE;
4006 RTMemExecFree(pImage);
4007}
4008
4009
4010/**
4011 * Implements the service call request.
4012 *
4013 * @returns VBox status code.
4014 * @param pDevExt The device extension.
4015 * @param pSession The calling session.
4016 * @param pReq The request packet, valid.
4017 */
4018static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
4019{
4020#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
4021 int rc;
4022
4023 /*
4024 * Find the module first in the module referenced by the calling session.
4025 */
4026 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
4027 if (RT_SUCCESS(rc))
4028 {
4029 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
4030 PSUPDRVLDRUSAGE pUsage;
4031
4032 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
4033 if ( pUsage->pImage->pfnServiceReqHandler
4034 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
4035 {
4036 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
4037 break;
4038 }
4039 RTSemFastMutexRelease(pDevExt->mtxLdr);
4040
4041 if (pfnServiceReqHandler)
4042 {
4043 /*
4044 * Call it.
4045 */
4046 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
4047#ifdef RT_WITH_W64_UNWIND_HACK
4048 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4049#else
4050 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4051#endif
4052 else
4053#ifdef RT_WITH_W64_UNWIND_HACK
4054 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
4055 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4056#else
4057 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4058#endif
4059 }
4060 else
4061 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
4062 }
4063
4064 /* log it */
4065 if ( RT_FAILURE(rc)
4066 && rc != VERR_INTERRUPTED
4067 && rc != VERR_TIMEOUT)
4068 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4069 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4070 else
4071 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4072 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4073 return rc;
4074#else /* RT_OS_WINDOWS && !DEBUG */
4075 return VERR_NOT_IMPLEMENTED;
4076#endif /* RT_OS_WINDOWS && !DEBUG */
4077}
4078
4079
4080/**
4081 * Implements the logger settings request.
4082 *
4083 * @returns VBox status code.
4084 * @param pDevExt The device extension.
4085 * @param pSession The caller's session.
4086 * @param pReq The request.
4087 */
4088static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq)
4089{
4090 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
4091 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
4092 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
4093 PRTLOGGER pLogger = NULL;
4094 int rc;
4095
4096 /*
4097 * Some further validation.
4098 */
4099 switch (pReq->u.In.fWhat)
4100 {
4101 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4102 case SUPLOGGERSETTINGS_WHAT_CREATE:
4103 break;
4104
4105 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4106 if (*pszGroup || *pszFlags || *pszDest)
4107 return VERR_INVALID_PARAMETER;
4108 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
4109 return VERR_ACCESS_DENIED;
4110 break;
4111
4112 default:
4113 return VERR_INTERNAL_ERROR;
4114 }
4115
4116 /*
4117 * Get the logger.
4118 */
4119 switch (pReq->u.In.fWhich)
4120 {
4121 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4122 pLogger = RTLogGetDefaultInstance();
4123 break;
4124
4125 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4126 pLogger = RTLogRelDefaultInstance();
4127 break;
4128
4129 default:
4130 return VERR_INTERNAL_ERROR;
4131 }
4132
4133 /*
4134 * Do the job.
4135 */
4136 switch (pReq->u.In.fWhat)
4137 {
4138 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4139 if (pLogger)
4140 {
4141 rc = RTLogFlags(pLogger, pszFlags);
4142 if (RT_SUCCESS(rc))
4143 rc = RTLogGroupSettings(pLogger, pszGroup);
4144 NOREF(pszDest);
4145 }
4146 else
4147 rc = VERR_NOT_FOUND;
4148 break;
4149
4150 case SUPLOGGERSETTINGS_WHAT_CREATE:
4151 {
4152 if (pLogger)
4153 rc = VERR_ALREADY_EXISTS;
4154 else
4155 {
4156 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
4157
4158 rc = RTLogCreate(&pLogger,
4159 0 /* fFlags */,
4160 pszGroup,
4161 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
4162 ? "VBOX_LOG"
4163 : "VBOX_RELEASE_LOG",
4164 RT_ELEMENTS(s_apszGroups),
4165 s_apszGroups,
4166 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
4167 NULL);
4168 if (RT_SUCCESS(rc))
4169 {
4170 rc = RTLogFlags(pLogger, pszFlags);
4171 NOREF(pszDest);
4172 if (RT_SUCCESS(rc))
4173 {
4174 switch (pReq->u.In.fWhich)
4175 {
4176 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4177 pLogger = RTLogSetDefaultInstance(pLogger);
4178 break;
4179 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4180 pLogger = RTLogRelSetDefaultInstance(pLogger);
4181 break;
4182 }
4183 }
4184 RTLogDestroy(pLogger);
4185 }
4186 }
4187 break;
4188 }
4189
4190 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4191 switch (pReq->u.In.fWhich)
4192 {
4193 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4194 pLogger = RTLogSetDefaultInstance(NULL);
4195 break;
4196 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4197 pLogger = RTLogRelSetDefaultInstance(NULL);
4198 break;
4199 }
4200 rc = RTLogDestroy(pLogger);
4201 break;
4202
4203 default:
4204 {
4205 rc = VERR_INTERNAL_ERROR;
4206 break;
4207 }
4208 }
4209
4210 return rc;
4211}
4212
4213
4214/**
4215 * Gets the paging mode of the current CPU.
4216 *
4217 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4218 */
4219SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4220{
4221 SUPPAGINGMODE enmMode;
4222
4223 RTR0UINTREG cr0 = ASMGetCR0();
4224 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4225 enmMode = SUPPAGINGMODE_INVALID;
4226 else
4227 {
4228 RTR0UINTREG cr4 = ASMGetCR4();
4229 uint32_t fNXEPlusLMA = 0;
4230 if (cr4 & X86_CR4_PAE)
4231 {
4232 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
4233 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
4234 {
4235 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4236 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4237 fNXEPlusLMA |= RT_BIT(0);
4238 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4239 fNXEPlusLMA |= RT_BIT(1);
4240 }
4241 }
4242
4243 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4244 {
4245 case 0:
4246 enmMode = SUPPAGINGMODE_32_BIT;
4247 break;
4248
4249 case X86_CR4_PGE:
4250 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4251 break;
4252
4253 case X86_CR4_PAE:
4254 enmMode = SUPPAGINGMODE_PAE;
4255 break;
4256
4257 case X86_CR4_PAE | RT_BIT(0):
4258 enmMode = SUPPAGINGMODE_PAE_NX;
4259 break;
4260
4261 case X86_CR4_PAE | X86_CR4_PGE:
4262 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4263 break;
4264
4265 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4266 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4267 break;
4268
4269 case RT_BIT(1) | X86_CR4_PAE:
4270 enmMode = SUPPAGINGMODE_AMD64;
4271 break;
4272
4273 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4274 enmMode = SUPPAGINGMODE_AMD64_NX;
4275 break;
4276
4277 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4278 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4279 break;
4280
4281 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4282 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4283 break;
4284
4285 default:
4286 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4287 enmMode = SUPPAGINGMODE_INVALID;
4288 break;
4289 }
4290 }
4291 return enmMode;
4292}
4293
4294
4295/**
4296 * Enables or disabled hardware virtualization extensions using native OS APIs.
4297 *
4298 * @returns VBox status code.
4299 * @retval VINF_SUCCESS on success.
4300 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4301 *
4302 * @param fEnable Whether to enable or disable.
4303 */
4304SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4305{
4306#ifdef RT_OS_DARWIN
4307 return supdrvOSEnableVTx(fEnable);
4308#else
4309 return VERR_NOT_SUPPORTED;
4310#endif
4311}
4312
4313
4314/**
4315 * Creates the GIP.
4316 *
4317 * @returns VBox status code.
4318 * @param pDevExt Instance data. GIP stuff may be updated.
4319 */
4320static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4321{
4322 PSUPGLOBALINFOPAGE pGip;
4323 RTHCPHYS HCPhysGip;
4324 uint32_t u32SystemResolution;
4325 uint32_t u32Interval;
4326 int rc;
4327
4328 LogFlow(("supdrvGipCreate:\n"));
4329
4330 /* assert order */
4331 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4332 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4333 Assert(!pDevExt->pGipTimer);
4334
4335 /*
4336 * Allocate a suitable page with a default kernel mapping.
4337 */
4338 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4339 if (RT_FAILURE(rc))
4340 {
4341 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4342 return rc;
4343 }
4344 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4345 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4346
4347#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4348 * It only applies to Windows and should probably revisited later, if possible made part of the
4349 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4350 /*
4351 * Try bump up the system timer resolution.
4352 * The more interrupts the better...
4353 */
4354 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4355 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4356 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4357 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4358 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4359 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4360 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4361 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4362 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4363 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4364 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4365 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4366 )
4367 {
4368 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4369 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4370 }
4371#endif
4372
4373 /*
4374 * Find a reasonable update interval and initialize the structure.
4375 */
4376 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4377 while (u32Interval < 10000000 /* 10 ms */)
4378 u32Interval += u32SystemResolution;
4379
4380 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4381
4382 /*
4383 * Create the timer.
4384 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4385 */
4386 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4387 {
4388 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4389 if (rc == VERR_NOT_SUPPORTED)
4390 {
4391 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4392 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4393 }
4394 }
4395 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4396 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4397 if (RT_SUCCESS(rc))
4398 {
4399 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4400 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4401 if (RT_SUCCESS(rc))
4402 {
4403 /*
4404 * We're good.
4405 */
4406 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4407 return VINF_SUCCESS;
4408 }
4409
4410 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4411 }
4412 else
4413 {
4414 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4415 Assert(!pDevExt->pGipTimer);
4416 }
4417 supdrvGipDestroy(pDevExt);
4418 return rc;
4419}
4420
4421
4422/**
4423 * Terminates the GIP.
4424 *
4425 * @param pDevExt Instance data. GIP stuff may be updated.
4426 */
4427static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4428{
4429 int rc;
4430#ifdef DEBUG_DARWIN_GIP
4431 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4432 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4433 pDevExt->pGipTimer, pDevExt->GipMemObj));
4434#endif
4435
4436 /*
4437 * Invalid the GIP data.
4438 */
4439 if (pDevExt->pGip)
4440 {
4441 supdrvGipTerm(pDevExt->pGip);
4442 pDevExt->pGip = NULL;
4443 }
4444
4445 /*
4446 * Destroy the timer and free the GIP memory object.
4447 */
4448 if (pDevExt->pGipTimer)
4449 {
4450 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4451 pDevExt->pGipTimer = NULL;
4452 }
4453
4454 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4455 {
4456 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4457 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4458 }
4459
4460 /*
4461 * Finally, release the system timer resolution request if one succeeded.
4462 */
4463 if (pDevExt->u32SystemTimerGranularityGrant)
4464 {
4465 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4466 pDevExt->u32SystemTimerGranularityGrant = 0;
4467 }
4468}
4469
4470
4471/**
4472 * Timer callback function sync GIP mode.
4473 * @param pTimer The timer.
4474 * @param pvUser The device extension.
4475 */
4476static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4477{
4478 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4479 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4480
4481 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4482
4483 ASMSetFlags(fOldFlags);
4484}
4485
4486
4487/**
4488 * Timer callback function for async GIP mode.
4489 * @param pTimer The timer.
4490 * @param pvUser The device extension.
4491 */
4492static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4493{
4494 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4495 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4496 RTCPUID idCpu = RTMpCpuId();
4497 uint64_t NanoTS = RTTimeSystemNanoTS();
4498
4499 /** @todo reset the transaction number and whatnot when iTick == 1. */
4500 if (pDevExt->idGipMaster == idCpu)
4501 supdrvGipUpdate(pDevExt->pGip, NanoTS);
4502 else
4503 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, ASMGetApicId());
4504
4505 ASMSetFlags(fOldFlags);
4506}
4507
4508
4509/**
4510 * Multiprocessor event notification callback.
4511 *
4512 * This is used to make sue that the GIP master gets passed on to
4513 * another CPU.
4514 *
4515 * @param enmEvent The event.
4516 * @param idCpu The cpu it applies to.
4517 * @param pvUser Pointer to the device extension.
4518 */
4519static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4520{
4521 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4522 if (enmEvent == RTMPEVENT_OFFLINE)
4523 {
4524 RTCPUID idGipMaster;
4525 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4526 if (idGipMaster == idCpu)
4527 {
4528 /*
4529 * Find a new GIP master.
4530 */
4531 bool fIgnored;
4532 unsigned i;
4533 RTCPUID idNewGipMaster = NIL_RTCPUID;
4534 RTCPUSET OnlineCpus;
4535 RTMpGetOnlineSet(&OnlineCpus);
4536
4537 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4538 {
4539 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4540 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4541 && idCurCpu != idGipMaster)
4542 {
4543 idNewGipMaster = idCurCpu;
4544 break;
4545 }
4546 }
4547
4548 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4549 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4550 NOREF(fIgnored);
4551 }
4552 }
4553}
4554
4555
4556/**
4557 * Initializes the GIP data.
4558 *
4559 * @returns IPRT status code.
4560 * @param pDevExt Pointer to the device instance data.
4561 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4562 * @param HCPhys The physical address of the GIP.
4563 * @param u64NanoTS The current nanosecond timestamp.
4564 * @param uUpdateHz The update freqence.
4565 */
4566int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4567{
4568 unsigned i;
4569#ifdef DEBUG_DARWIN_GIP
4570 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4571#else
4572 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4573#endif
4574
4575 /*
4576 * Initialize the structure.
4577 */
4578 memset(pGip, 0, PAGE_SIZE);
4579 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4580 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4581 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4582 pGip->u32UpdateHz = uUpdateHz;
4583 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4584 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4585
4586 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4587 {
4588 pGip->aCPUs[i].u32TransactionId = 2;
4589 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4590 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4591
4592 /*
4593 * We don't know the following values until we've executed updates.
4594 * So, we'll just insert very high values.
4595 */
4596 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4597 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4598 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4599 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4600 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4601 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4602 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4603 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4604 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4605 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4606 }
4607
4608 /*
4609 * Link it to the device extension.
4610 */
4611 pDevExt->pGip = pGip;
4612 pDevExt->HCPhysGip = HCPhys;
4613 pDevExt->cGipUsers = 0;
4614
4615 return VINF_SUCCESS;
4616}
4617
4618
4619/**
4620 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4621 *
4622 * @param idCpu Ignored.
4623 * @param pvUser1 Where to put the TSC.
4624 * @param pvUser2 Ignored.
4625 */
4626static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4627{
4628#if 1
4629 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4630#else
4631 *(uint64_t *)pvUser1 = ASMReadTSC();
4632#endif
4633}
4634
4635
4636/**
4637 * Determine if Async GIP mode is required because of TSC drift.
4638 *
4639 * When using the default/normal timer code it is essential that the time stamp counter
4640 * (TSC) runs never backwards, that is, a read operation to the counter should return
4641 * a bigger value than any previous read operation. This is guaranteed by the latest
4642 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4643 * case we have to choose the asynchronous timer mode.
4644 *
4645 * @param poffMin Pointer to the determined difference between different cores.
4646 * @return false if the time stamp counters appear to be synchron, true otherwise.
4647 */
4648bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4649{
4650 /*
4651 * Just iterate all the cpus 8 times and make sure that the TSC is
4652 * ever increasing. We don't bother taking TSC rollover into account.
4653 */
4654 RTCPUSET CpuSet;
4655 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4656 int iCpu;
4657 int cLoops = 8;
4658 bool fAsync = false;
4659 int rc = VINF_SUCCESS;
4660 uint64_t offMax = 0;
4661 uint64_t offMin = ~(uint64_t)0;
4662 uint64_t PrevTsc = ASMReadTSC();
4663
4664 while (cLoops-- > 0)
4665 {
4666 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4667 {
4668 uint64_t CurTsc;
4669 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4670 if (RT_SUCCESS(rc))
4671 {
4672 if (CurTsc <= PrevTsc)
4673 {
4674 fAsync = true;
4675 offMin = offMax = PrevTsc - CurTsc;
4676 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4677 iCpu, cLoops, CurTsc, PrevTsc));
4678 break;
4679 }
4680
4681 /* Gather statistics (except the first time). */
4682 if (iCpu != 0 || cLoops != 7)
4683 {
4684 uint64_t off = CurTsc - PrevTsc;
4685 if (off < offMin)
4686 offMin = off;
4687 if (off > offMax)
4688 offMax = off;
4689 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4690 }
4691
4692 /* Next */
4693 PrevTsc = CurTsc;
4694 }
4695 else if (rc == VERR_NOT_SUPPORTED)
4696 break;
4697 else
4698 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4699 }
4700
4701 /* broke out of the loop. */
4702 if (iCpu <= iLastCpu)
4703 break;
4704 }
4705
4706 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4707 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4708 fAsync, iLastCpu, rc, offMin, offMax));
4709#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4710 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4711#endif
4712 return fAsync;
4713}
4714
4715
4716/**
4717 * Determin the GIP TSC mode.
4718 *
4719 * @returns The most suitable TSC mode.
4720 * @param pDevExt Pointer to the device instance data.
4721 */
4722static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)
4723{
4724 /*
4725 * On SMP we're faced with two problems:
4726 * (1) There might be a skew between the CPU, so that cpu0
4727 * returns a TSC that is sligtly different from cpu1.
4728 * (2) Power management (and other things) may cause the TSC
4729 * to run at a non-constant speed, and cause the speed
4730 * to be different on the cpus. This will result in (1).
4731 *
4732 * So, on SMP systems we'll have to select the ASYNC update method
4733 * if there are symphoms of these problems.
4734 */
4735 if (RTMpGetCount() > 1)
4736 {
4737 uint32_t uEAX, uEBX, uECX, uEDX;
4738 uint64_t u64DiffCoresIgnored;
4739
4740 /* Permit the user and/or the OS specfic bits to force async mode. */
4741 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
4742 return SUPGIPMODE_ASYNC_TSC;
4743
4744 /* Try check for current differences between the cpus. */
4745 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))
4746 return SUPGIPMODE_ASYNC_TSC;
4747
4748 /*
4749 * If the CPU supports power management and is an AMD one we
4750 * won't trust it unless it has the TscInvariant bit is set.
4751 */
4752 /* Check for "AuthenticAMD" */
4753 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4754 if ( uEAX >= 1
4755 && uEBX == X86_CPUID_VENDOR_AMD_EBX
4756 && uECX == X86_CPUID_VENDOR_AMD_ECX
4757 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
4758 {
4759 /* Check for APM support and that TscInvariant is cleared. */
4760 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4761 if (uEAX >= 0x80000007)
4762 {
4763 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4764 if ( !(uEDX & RT_BIT(8))/* TscInvariant */
4765 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4766 return SUPGIPMODE_ASYNC_TSC;
4767 }
4768 }
4769 }
4770 return SUPGIPMODE_SYNC_TSC;
4771}
4772
4773
4774/**
4775 * Invalidates the GIP data upon termination.
4776 *
4777 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4778 */
4779void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4780{
4781 unsigned i;
4782 pGip->u32Magic = 0;
4783 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4784 {
4785 pGip->aCPUs[i].u64NanoTS = 0;
4786 pGip->aCPUs[i].u64TSC = 0;
4787 pGip->aCPUs[i].iTSCHistoryHead = 0;
4788 }
4789}
4790
4791
4792/**
4793 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4794 * updates all the per cpu data except the transaction id.
4795 *
4796 * @param pGip The GIP.
4797 * @param pGipCpu Pointer to the per cpu data.
4798 * @param u64NanoTS The current time stamp.
4799 */
4800static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4801{
4802 uint64_t u64TSC;
4803 uint64_t u64TSCDelta;
4804 uint32_t u32UpdateIntervalTSC;
4805 uint32_t u32UpdateIntervalTSCSlack;
4806 unsigned iTSCHistoryHead;
4807 uint64_t u64CpuHz;
4808
4809 /*
4810 * Update the NanoTS.
4811 */
4812 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4813
4814 /*
4815 * Calc TSC delta.
4816 */
4817 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4818 u64TSC = ASMReadTSC();
4819 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4820 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4821
4822 if (u64TSCDelta >> 32)
4823 {
4824 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4825 pGipCpu->cErrors++;
4826 }
4827
4828 /*
4829 * TSC History.
4830 */
4831 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4832
4833 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4834 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4835 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4836
4837 /*
4838 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4839 */
4840 if (pGip->u32UpdateHz >= 1000)
4841 {
4842 uint32_t u32;
4843 u32 = pGipCpu->au32TSCHistory[0];
4844 u32 += pGipCpu->au32TSCHistory[1];
4845 u32 += pGipCpu->au32TSCHistory[2];
4846 u32 += pGipCpu->au32TSCHistory[3];
4847 u32 >>= 2;
4848 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4849 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4850 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4851 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4852 u32UpdateIntervalTSC >>= 2;
4853 u32UpdateIntervalTSC += u32;
4854 u32UpdateIntervalTSC >>= 1;
4855
4856 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4857 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4858 }
4859 else if (pGip->u32UpdateHz >= 90)
4860 {
4861 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4862 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4863 u32UpdateIntervalTSC >>= 1;
4864
4865 /* value choosen on a 2GHz thinkpad running windows */
4866 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4867 }
4868 else
4869 {
4870 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4871
4872 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4873 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4874 }
4875 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4876
4877 /*
4878 * CpuHz.
4879 */
4880 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4881 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4882}
4883
4884
4885/**
4886 * Updates the GIP.
4887 *
4888 * @param pGip Pointer to the GIP.
4889 * @param u64NanoTS The current nanosecond timesamp.
4890 */
4891void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4892{
4893 /*
4894 * Determin the relevant CPU data.
4895 */
4896 PSUPGIPCPU pGipCpu;
4897 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4898 pGipCpu = &pGip->aCPUs[0];
4899 else
4900 {
4901 unsigned iCpu = ASMGetApicId();
4902 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4903 return;
4904 pGipCpu = &pGip->aCPUs[iCpu];
4905 }
4906
4907 /*
4908 * Start update transaction.
4909 */
4910 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4911 {
4912 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4913 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4914 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4915 pGipCpu->cErrors++;
4916 return;
4917 }
4918
4919 /*
4920 * Recalc the update frequency every 0x800th time.
4921 */
4922 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4923 {
4924 if (pGip->u64NanoTSLastUpdateHz)
4925 {
4926#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4927 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4928 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4929 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4930 {
4931 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4932 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4933 }
4934#endif
4935 }
4936 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4937 }
4938
4939 /*
4940 * Update the data.
4941 */
4942 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4943
4944 /*
4945 * Complete transaction.
4946 */
4947 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4948}
4949
4950
4951/**
4952 * Updates the per cpu GIP data for the calling cpu.
4953 *
4954 * @param pGip Pointer to the GIP.
4955 * @param u64NanoTS The current nanosecond timesamp.
4956 * @param iCpu The CPU index.
4957 */
4958void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4959{
4960 PSUPGIPCPU pGipCpu;
4961
4962 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4963 {
4964 pGipCpu = &pGip->aCPUs[iCpu];
4965
4966 /*
4967 * Start update transaction.
4968 */
4969 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4970 {
4971 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4972 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4973 pGipCpu->cErrors++;
4974 return;
4975 }
4976
4977 /*
4978 * Update the data.
4979 */
4980 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4981
4982 /*
4983 * Complete transaction.
4984 */
4985 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4986 }
4987}
4988
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette