VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 20554

Last change on this file since 20554 was 20528, checked in by vboxsync, 16 years ago

SUP: SUPR0PageProtect & SUPR0PageProtect - for creating guard (hyper) heap/stack pages.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 211.2 KB
Line 
1/* $Revision: 20528 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/cpuset.h>
41#include <iprt/handletable.h>
42#include <iprt/mp.h>
43#include <iprt/power.h>
44#include <iprt/process.h>
45#include <iprt/semaphore.h>
46#include <iprt/spinlock.h>
47#include <iprt/thread.h>
48#include <iprt/uuid.h>
49#include <VBox/param.h>
50#include <VBox/log.h>
51#include <VBox/err.h>
52#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
53# include <iprt/crc32.h>
54# include <iprt/net.h>
55# include <iprt/string.h>
56#endif
57/* VBox/x86.h not compatible with the Linux kernel sources */
58#ifdef RT_OS_LINUX
59# define X86_CPUID_VENDOR_AMD_EBX 0x68747541
60# define X86_CPUID_VENDOR_AMD_ECX 0x444d4163
61# define X86_CPUID_VENDOR_AMD_EDX 0x69746e65
62#else
63# include <VBox/x86.h>
64#endif
65
66/*
67 * Logging assignments:
68 * Log - useful stuff, like failures.
69 * LogFlow - program flow, except the really noisy bits.
70 * Log2 - Cleanup.
71 * Log3 - Loader flow noise.
72 * Log4 - Call VMMR0 flow noise.
73 * Log5 - Native yet-to-be-defined noise.
74 * Log6 - Native ioctl flow noise.
75 *
76 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
77 * instanciation in log-vbox.c(pp).
78 */
79
80
81/*******************************************************************************
82* Defined Constants And Macros *
83*******************************************************************************/
84/* from x86.h - clashes with linux thus this duplication */
85#undef X86_CR0_PG
86#define X86_CR0_PG RT_BIT(31)
87#undef X86_CR0_PE
88#define X86_CR0_PE RT_BIT(0)
89#undef X86_CPUID_AMD_FEATURE_EDX_NX
90#define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)
91#undef MSR_K6_EFER
92#define MSR_K6_EFER 0xc0000080
93#undef MSR_K6_EFER_NXE
94#define MSR_K6_EFER_NXE RT_BIT(11)
95#undef MSR_K6_EFER_LMA
96#define MSR_K6_EFER_LMA RT_BIT(10)
97#undef X86_CR4_PGE
98#define X86_CR4_PGE RT_BIT(7)
99#undef X86_CR4_PAE
100#define X86_CR4_PAE RT_BIT(5)
101#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
102#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)
103
104
105/** The frequency by which we recalculate the u32UpdateHz and
106 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
107#define GIP_UPDATEHZ_RECALC_FREQ 0x800
108
109/**
110 * Validates a session pointer.
111 *
112 * @returns true/false accordingly.
113 * @param pSession The session.
114 */
115#define SUP_IS_SESSION_VALID(pSession) \
116 ( VALID_PTR(pSession) \
117 && pSession->u32Cookie == BIRD_INV)
118
119/** @def VBOX_SVN_REV
120 * The makefile should define this if it can. */
121#ifndef VBOX_SVN_REV
122# define VBOX_SVN_REV 0
123#endif
124
125/*******************************************************************************
126* Internal Functions *
127*******************************************************************************/
128static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
129static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
130static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
131static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
132static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
133static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
134static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
135static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
136static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
137static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
138static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
139static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
140static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
141static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
142static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq);
143static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);
144#ifdef RT_OS_WINDOWS
145static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
146static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
147#endif /* RT_OS_WINDOWS */
148static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
149static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
150static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
151static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
152static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
153
154#ifdef RT_WITH_W64_UNWIND_HACK
155DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, VMCPUID idCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
156DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, VMCPUID idCpu, unsigned uOperation);
157DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
158DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
159DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
160DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
161DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
162
163DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
164DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
165DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
166DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
167DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
168DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRefEx)(void *pvObj, PSUPDRVSESSION pSession, bool fNoPreempt);
169DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
170DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
171DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
172DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
173DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
174DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
175DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
176DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
177DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
178DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
179DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
180DECLASM(int) UNWIND_WRAP(SUPR0PageAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages);
181DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
182//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
183DECLASM(int) UNWIND_WRAP(SUPSemEventCreate)(PSUPDRVSESSION pSession, PSUPSEMEVENT phEvent);
184DECLASM(int) UNWIND_WRAP(SUPSemEventClose)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent);
185DECLASM(int) UNWIND_WRAP(SUPSemEventSignal)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent);
186DECLASM(int) UNWIND_WRAP(SUPSemEventWait)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent, uint32_t cMillies);
187DECLASM(int) UNWIND_WRAP(SUPSemEventWaitNoResume)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent, uint32_t cMillies);
188DECLASM(int) UNWIND_WRAP(SUPSemEventMultiCreate)(PSUPDRVSESSION pSession, PSUPSEMEVENTMULTI phEventMulti);
189DECLASM(int) UNWIND_WRAP(SUPSemEventMultiClose)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti);
190DECLASM(int) UNWIND_WRAP(SUPSemEventMultiSignal)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti);
191DECLASM(int) UNWIND_WRAP(SUPSemEventMultiReset)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti);
192DECLASM(int) UNWIND_WRAP(SUPSemEventMultiWait)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti, uint32_t cMillies);
193DECLASM(int) UNWIND_WRAP(SUPSemEventMultiWaitNoResume)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti, uint32_t cMillies);
194DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
195DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
196DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
197DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
198DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
199DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
200DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
201DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
202DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
203DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
204DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
205DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
206DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
207DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
208DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
209DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
210DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
211DECLASM(int) UNWIND_WRAP(RTR0MemObjProtect)(RTR0MEMOBJ hMemObj, size_t offsub, size_t cbSub, uint32_t fProt);
212/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
213/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
214/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
215/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
216/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
217DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
218/* RTProcSelf - not necessary */
219/* RTR0ProcHandleSelf - not necessary */
220DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
221DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
222DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
223DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
224DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
225DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
226DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
227DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
228DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
229DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
230DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
231DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
232DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
233DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
234DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
235DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
236DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
237DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
238DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
239DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
240DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
241/* RTTimeNanoTS - not necessary */
242/* RTTimeMilliTS - not necessary */
243/* RTTimeSystemNanoTS - not necessary */
244/* RTTimeSystemMilliTS - not necessary */
245/* RTThreadNativeSelf - not necessary */
246DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
247DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
248#if 0
249/* RTThreadSelf - not necessary */
250DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
251 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
252DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
253DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
254DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
255DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
256DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
257DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
258DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
259DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
260DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
261DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
262#endif
263/* RTThreadPreemptIsEnabled - not necessary */
264/* RTThreadPreemptIsPending - not necessary */
265/* RTThreadPreemptIsPendingTrusty - not necessary */
266/* RTThreadPreemptDisable - not necessary */
267DECLASM(void) UNWIND_WRAP(RTThreadPreemptRestore)(RTTHREADPREEMPTSTATE pState);
268/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
269/* RTMpCpuId - not necessary */
270/* RTMpCpuIdFromSetIndex - not necessary */
271/* RTMpCpuIdToSetIndex - not necessary */
272/* RTMpIsCpuPossible - not necessary */
273/* RTMpGetCount - not necessary */
274/* RTMpGetMaxCpuId - not necessary */
275/* RTMpGetOnlineCount - not necessary */
276/* RTMpGetOnlineSet - not necessary */
277/* RTMpGetSet - not necessary */
278/* RTMpIsCpuOnline - not necessary */
279DECLASM(int) UNWIND_WRAP(RTMpIsCpuWorkPending)(void);
280DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
281DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
282DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
283DECLASM(int) UNWIND_WRAP(RTMpPokeCpu)(RTCPUID idCpu);
284/* RTLogRelDefaultInstance - not necessary. */
285DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
286/* RTLogLogger - can't wrap this buster. */
287/* RTLogLoggerEx - can't wrap this buster. */
288DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
289/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
290DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
291DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
292/* AssertMsg2 - can't wrap this buster. */
293#endif /* RT_WITH_W64_UNWIND_HACK */
294
295
296/*******************************************************************************
297* Global Variables *
298*******************************************************************************/
299/**
300 * Array of the R0 SUP API.
301 */
302static SUPFUNC g_aFunctions[] =
303{
304 /* name function */
305 /* Entries with absolute addresses determined at runtime, fixup
306 code makes ugly ASSUMPTIONS about the order here: */
307 { "SUPR0AbsIs64bit", (void *)0 },
308 { "SUPR0Abs64bitKernelCS", (void *)0 },
309 { "SUPR0Abs64bitKernelSS", (void *)0 },
310 { "SUPR0Abs64bitKernelDS", (void *)0 },
311 { "SUPR0AbsKernelCS", (void *)0 },
312 { "SUPR0AbsKernelSS", (void *)0 },
313 { "SUPR0AbsKernelDS", (void *)0 },
314 { "SUPR0AbsKernelES", (void *)0 },
315 { "SUPR0AbsKernelFS", (void *)0 },
316 { "SUPR0AbsKernelGS", (void *)0 },
317 /* Normal function pointers: */
318 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
319 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
320 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
321 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
322 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
323 { "SUPR0ObjAddRefEx", (void *)UNWIND_WRAP(SUPR0ObjAddRefEx) },
324 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
325 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
326 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
327 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
328 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
329 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
330 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
331 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
332 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
333 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
334 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
335 { "SUPR0PageAlloc", (void *)UNWIND_WRAP(SUPR0PageAlloc) },
336 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
337 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
338 { "SUPSemEventCreate", (void *)UNWIND_WRAP(SUPSemEventCreate) },
339 { "SUPSemEventClose", (void *)UNWIND_WRAP(SUPSemEventClose) },
340 { "SUPSemEventSignal", (void *)UNWIND_WRAP(SUPSemEventSignal) },
341 { "SUPSemEventWait", (void *)UNWIND_WRAP(SUPSemEventWait) },
342 { "SUPSemEventWaitNoResume", (void *)UNWIND_WRAP(SUPSemEventWaitNoResume) },
343 { "SUPSemEventMultiCreate", (void *)UNWIND_WRAP(SUPSemEventMultiCreate) },
344 { "SUPSemEventMultiClose", (void *)UNWIND_WRAP(SUPSemEventMultiClose) },
345 { "SUPSemEventMultiSignal", (void *)UNWIND_WRAP(SUPSemEventMultiSignal) },
346 { "SUPSemEventMultiReset", (void *)UNWIND_WRAP(SUPSemEventMultiReset) },
347 { "SUPSemEventMultiWait", (void *)UNWIND_WRAP(SUPSemEventMultiWait) },
348 { "SUPSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(SUPSemEventMultiWaitNoResume) },
349 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
350 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
351 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
352 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
353 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
354 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
355 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
356 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
357 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
358 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
359 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
360 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
361 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
362 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
363 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
364 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
365 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
366 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
367 { "RTR0MemObjProtect", (void *)UNWIND_WRAP(RTR0MemObjProtect) },
368 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
369 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
370 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
371 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
372 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
373 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
374/* These don't work yet on linux - use fast mutexes!
375 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
376 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
377 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
378 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
379*/
380 { "RTProcSelf", (void *)RTProcSelf },
381 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
382 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
383 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
384 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
385 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
386 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
387 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
388 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
389 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
390 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
391 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
392 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
393 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
394 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
395 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
396 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
397 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
398 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
399 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
400 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
401 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
402 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
403 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
404 { "RTTimeMilliTS", (void *)RTTimeMilliTS },
405 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
406 { "RTTimeSystemMilliTS", (void *)RTTimeSystemMilliTS },
407 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
408 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
409 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
410#if 0 /* Thread APIs, Part 2. */
411 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
412 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
413 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
414 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
415 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
416 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
417 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
418 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
419 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
420 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
421 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
422 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
423#endif
424 { "RTThreadPreemptIsEnabled", (void *)RTThreadPreemptIsEnabled },
425 { "RTThreadPreemptIsPending", (void *)RTThreadPreemptIsPending },
426 { "RTThreadPreemptIsPendingTrusty", (void *)RTThreadPreemptIsPendingTrusty },
427 { "RTThreadPreemptDisable", (void *)RTThreadPreemptDisable },
428 { "RTThreadPreemptRestore", (void *)UNWIND_WRAP(RTThreadPreemptRestore) },
429
430 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
431 { "RTMpCpuId", (void *)RTMpCpuId },
432 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
433 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
434 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
435 { "RTMpGetCount", (void *)RTMpGetCount },
436 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
437 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
438 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
439 { "RTMpGetSet", (void *)RTMpGetSet },
440 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
441 { "RTMpIsCpuWorkPending", (void *)UNWIND_WRAP(RTMpIsCpuWorkPending) },
442 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
443 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
444 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
445 { "RTMpPokeCpu", (void *)UNWIND_WRAP(RTMpPokeCpu) },
446 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
447 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
448 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
449 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
450 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
451 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
452 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
453 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
454 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
455 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
456 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
457#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
458 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
459#endif
460#if defined(RT_OS_DARWIN)
461 { "RTAssertMsg1", (void *)RTAssertMsg1 },
462 { "RTAssertMsg2", (void *)RTAssertMsg2 },
463 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
464#endif
465};
466
467#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
468/**
469 * Drag in the rest of IRPT since we share it with the
470 * rest of the kernel modules on darwin.
471 */
472PFNRT g_apfnVBoxDrvIPRTDeps[] =
473{
474 (PFNRT)RTCrc32,
475 (PFNRT)RTErrConvertFromErrno,
476 (PFNRT)RTNetIPv4IsHdrValid,
477 (PFNRT)RTNetIPv4TCPChecksum,
478 (PFNRT)RTNetIPv4UDPChecksum,
479 (PFNRT)RTUuidCompare,
480 (PFNRT)RTUuidCompareStr,
481 (PFNRT)RTUuidFromStr,
482 (PFNRT)RTStrDup,
483 (PFNRT)RTStrFree,
484 NULL
485};
486#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_SOLARIS */
487
488
489/**
490 * Initializes the device extentsion structure.
491 *
492 * @returns IPRT status code.
493 * @param pDevExt The device extension to initialize.
494 */
495int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
496{
497 int rc;
498
499#ifdef SUPDRV_WITH_RELEASE_LOGGER
500 /*
501 * Create the release log.
502 */
503 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
504 PRTLOGGER pRelLogger;
505 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
506 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
507 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
508 if (RT_SUCCESS(rc))
509 RTLogRelSetDefaultInstance(pRelLogger);
510#endif
511
512 /*
513 * Initialize it.
514 */
515 memset(pDevExt, 0, sizeof(*pDevExt));
516 rc = RTSpinlockCreate(&pDevExt->Spinlock);
517 if (!rc)
518 {
519 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
520 if (!rc)
521 {
522 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
523 if (!rc)
524 {
525 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
526 if (!rc)
527 {
528 rc = supdrvGipCreate(pDevExt);
529 if (RT_SUCCESS(rc))
530 {
531 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
532
533 /*
534 * Fixup the absolute symbols.
535 *
536 * Because of the table indexing assumptions we'll have a little #ifdef orgy
537 * here rather than distributing this to OS specific files. At least for now.
538 */
539#ifdef RT_OS_DARWIN
540# if ARCH_BITS == 32
541 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
542 {
543 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
544 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
545 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
546 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
547 }
548 else
549 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
550 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
551 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
552 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
553 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
554 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
555 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
556# else /* 64-bit darwin: */
557 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
558 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
559 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
560 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
561 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
562 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
563 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
564 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
565 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
566 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
567
568# endif
569#else /* !RT_OS_DARWIN */
570# if ARCH_BITS == 64
571 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
572 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
573 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
574 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
575# else
576 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
577# endif
578 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
579 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
580 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
581 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
582 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
583 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
584#endif /* !RT_OS_DARWIN */
585 return VINF_SUCCESS;
586 }
587
588 RTSemFastMutexDestroy(pDevExt->mtxGip);
589 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
590 }
591 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
592 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
593 }
594 RTSemFastMutexDestroy(pDevExt->mtxLdr);
595 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
596 }
597 RTSpinlockDestroy(pDevExt->Spinlock);
598 pDevExt->Spinlock = NIL_RTSPINLOCK;
599 }
600#ifdef SUPDRV_WITH_RELEASE_LOGGER
601 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
602 RTLogDestroy(RTLogSetDefaultInstance(NULL));
603#endif
604
605 return rc;
606}
607
608
609/**
610 * Delete the device extension (e.g. cleanup members).
611 *
612 * @param pDevExt The device extension to delete.
613 */
614void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
615{
616 PSUPDRVOBJ pObj;
617 PSUPDRVUSAGE pUsage;
618
619 /*
620 * Kill mutexes and spinlocks.
621 */
622 RTSemFastMutexDestroy(pDevExt->mtxGip);
623 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
624 RTSemFastMutexDestroy(pDevExt->mtxLdr);
625 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
626 RTSpinlockDestroy(pDevExt->Spinlock);
627 pDevExt->Spinlock = NIL_RTSPINLOCK;
628 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
629 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
630
631 /*
632 * Free lists.
633 */
634 /* objects. */
635 pObj = pDevExt->pObjs;
636#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
637 Assert(!pObj); /* (can trigger on forced unloads) */
638#endif
639 pDevExt->pObjs = NULL;
640 while (pObj)
641 {
642 void *pvFree = pObj;
643 pObj = pObj->pNext;
644 RTMemFree(pvFree);
645 }
646
647 /* usage records. */
648 pUsage = pDevExt->pUsageFree;
649 pDevExt->pUsageFree = NULL;
650 while (pUsage)
651 {
652 void *pvFree = pUsage;
653 pUsage = pUsage->pNext;
654 RTMemFree(pvFree);
655 }
656
657 /* kill the GIP. */
658 supdrvGipDestroy(pDevExt);
659
660#ifdef SUPDRV_WITH_RELEASE_LOGGER
661 /* destroy the loggers. */
662 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
663 RTLogDestroy(RTLogSetDefaultInstance(NULL));
664#endif
665}
666
667
668/**
669 * Create session.
670 *
671 * @returns IPRT status code.
672 * @param pDevExt Device extension.
673 * @param fUser Flag indicating whether this is a user or kernel session.
674 * @param ppSession Where to store the pointer to the session data.
675 */
676int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
677{
678 /*
679 * Allocate memory for the session data.
680 */
681 int rc = VERR_NO_MEMORY;
682 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
683 if (pSession)
684 {
685 /* Initialize session data. */
686 rc = RTSpinlockCreate(&pSession->Spinlock);
687 if (!rc)
688 {
689 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
690 RTHANDLETABLE_FLAGS_LOCKED | RTHANDLETABLE_FLAGS_CONTEXT,
691 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
692 if (RT_SUCCESS(rc))
693 {
694 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
695 pSession->pDevExt = pDevExt;
696 pSession->u32Cookie = BIRD_INV;
697 /*pSession->pLdrUsage = NULL;
698 pSession->pVM = NULL;
699 pSession->pUsage = NULL;
700 pSession->pGip = NULL;
701 pSession->fGipReferenced = false;
702 pSession->Bundle.cUsed = 0; */
703 pSession->Uid = NIL_RTUID;
704 pSession->Gid = NIL_RTGID;
705 if (fUser)
706 {
707 pSession->Process = RTProcSelf();
708 pSession->R0Process = RTR0ProcHandleSelf();
709 }
710 else
711 {
712 pSession->Process = NIL_RTPROCESS;
713 pSession->R0Process = NIL_RTR0PROCESS;
714 }
715
716 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
717 return VINF_SUCCESS;
718 }
719
720 RTSpinlockDestroy(pSession->Spinlock);
721 }
722 RTMemFree(pSession);
723 *ppSession = NULL;
724 Log(("Failed to create spinlock, rc=%d!\n", rc));
725 }
726
727 return rc;
728}
729
730
731/**
732 * Shared code for cleaning up a session.
733 *
734 * @param pDevExt Device extension.
735 * @param pSession Session data.
736 * This data will be freed by this routine.
737 */
738void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
739{
740 /*
741 * Cleanup the session first.
742 */
743 supdrvCleanupSession(pDevExt, pSession);
744
745 /*
746 * Free the rest of the session stuff.
747 */
748 RTSpinlockDestroy(pSession->Spinlock);
749 pSession->Spinlock = NIL_RTSPINLOCK;
750 pSession->pDevExt = NULL;
751 RTMemFree(pSession);
752 LogFlow(("supdrvCloseSession: returns\n"));
753}
754
755
756/**
757 * Shared code for cleaning up a session (but not quite freeing it).
758 *
759 * This is primarily intended for MAC OS X where we have to clean up the memory
760 * stuff before the file handle is closed.
761 *
762 * @param pDevExt Device extension.
763 * @param pSession Session data.
764 * This data will be freed by this routine.
765 */
766void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
767{
768 int rc;
769 PSUPDRVBUNDLE pBundle;
770 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
771
772 /*
773 * Remove logger instances related to this session.
774 */
775 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
776
777 /*
778 * Destroy the handle table.
779 */
780 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
781 AssertRC(rc);
782 pSession->hHandleTable = NIL_RTHANDLETABLE;
783
784 /*
785 * Release object references made in this session.
786 * In theory there should be noone racing us in this session.
787 */
788 Log2(("release objects - start\n"));
789 if (pSession->pUsage)
790 {
791 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
792 PSUPDRVUSAGE pUsage;
793 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
794
795 while ((pUsage = pSession->pUsage) != NULL)
796 {
797 PSUPDRVOBJ pObj = pUsage->pObj;
798 pSession->pUsage = pUsage->pNext;
799
800 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
801 if (pUsage->cUsage < pObj->cUsage)
802 {
803 pObj->cUsage -= pUsage->cUsage;
804 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
805 }
806 else
807 {
808 /* Destroy the object and free the record. */
809 if (pDevExt->pObjs == pObj)
810 pDevExt->pObjs = pObj->pNext;
811 else
812 {
813 PSUPDRVOBJ pObjPrev;
814 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
815 if (pObjPrev->pNext == pObj)
816 {
817 pObjPrev->pNext = pObj->pNext;
818 break;
819 }
820 Assert(pObjPrev);
821 }
822 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
823
824 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
825 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
826 if (pObj->pfnDestructor)
827#ifdef RT_WITH_W64_UNWIND_HACK
828 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
829#else
830 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
831#endif
832 RTMemFree(pObj);
833 }
834
835 /* free it and continue. */
836 RTMemFree(pUsage);
837
838 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
839 }
840
841 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
842 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
843 }
844 Log2(("release objects - done\n"));
845
846 /*
847 * Release memory allocated in the session.
848 *
849 * We do not serialize this as we assume that the application will
850 * not allocated memory while closing the file handle object.
851 */
852 Log2(("freeing memory:\n"));
853 pBundle = &pSession->Bundle;
854 while (pBundle)
855 {
856 PSUPDRVBUNDLE pToFree;
857 unsigned i;
858
859 /*
860 * Check and unlock all entries in the bundle.
861 */
862 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
863 {
864 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
865 {
866 int rc;
867 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
868 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
869 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
870 {
871 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
872 AssertRC(rc); /** @todo figure out how to handle this. */
873 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
874 }
875 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
876 AssertRC(rc); /** @todo figure out how to handle this. */
877 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
878 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
879 }
880 }
881
882 /*
883 * Advance and free previous bundle.
884 */
885 pToFree = pBundle;
886 pBundle = pBundle->pNext;
887
888 pToFree->pNext = NULL;
889 pToFree->cUsed = 0;
890 if (pToFree != &pSession->Bundle)
891 RTMemFree(pToFree);
892 }
893 Log2(("freeing memory - done\n"));
894
895 /*
896 * Deregister component factories.
897 */
898 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
899 Log2(("deregistering component factories:\n"));
900 if (pDevExt->pComponentFactoryHead)
901 {
902 PSUPDRVFACTORYREG pPrev = NULL;
903 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
904 while (pCur)
905 {
906 if (pCur->pSession == pSession)
907 {
908 /* unlink it */
909 PSUPDRVFACTORYREG pNext = pCur->pNext;
910 if (pPrev)
911 pPrev->pNext = pNext;
912 else
913 pDevExt->pComponentFactoryHead = pNext;
914
915 /* free it */
916 pCur->pNext = NULL;
917 pCur->pSession = NULL;
918 pCur->pFactory = NULL;
919 RTMemFree(pCur);
920
921 /* next */
922 pCur = pNext;
923 }
924 else
925 {
926 /* next */
927 pPrev = pCur;
928 pCur = pCur->pNext;
929 }
930 }
931 }
932 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
933 Log2(("deregistering component factories - done\n"));
934
935 /*
936 * Loaded images needs to be dereferenced and possibly freed up.
937 */
938 RTSemFastMutexRequest(pDevExt->mtxLdr);
939 Log2(("freeing images:\n"));
940 if (pSession->pLdrUsage)
941 {
942 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
943 pSession->pLdrUsage = NULL;
944 while (pUsage)
945 {
946 void *pvFree = pUsage;
947 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
948 if (pImage->cUsage > pUsage->cUsage)
949 pImage->cUsage -= pUsage->cUsage;
950 else
951 supdrvLdrFree(pDevExt, pImage);
952 pUsage->pImage = NULL;
953 pUsage = pUsage->pNext;
954 RTMemFree(pvFree);
955 }
956 }
957 RTSemFastMutexRelease(pDevExt->mtxLdr);
958 Log2(("freeing images - done\n"));
959
960 /*
961 * Unmap the GIP.
962 */
963 Log2(("umapping GIP:\n"));
964 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
965 {
966 SUPR0GipUnmap(pSession);
967 pSession->fGipReferenced = 0;
968 }
969 Log2(("umapping GIP - done\n"));
970}
971
972
973/**
974 * RTHandleTableDestroy callback used by supdrvCleanupSession.
975 *
976 * @returns IPRT status code, see SUPR0ObjAddRef.
977 * @param hHandleTable The handle table handle. Ignored.
978 * @param pvObj The object pointer.
979 * @param pvCtx Context, the handle type. Ignored.
980 * @param pvUser Session pointer.
981 */
982static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
983{
984 NOREF(pvCtx);
985 NOREF(hHandleTable);
986 return SUPR0ObjAddRef(pvObj, (PSUPDRVSESSION)pvUser);
987}
988
989
990/**
991 * RTHandleTableDestroy callback used by supdrvCleanupSession.
992 *
993 * @param hHandleTable The handle table handle. Ignored.
994 * @param h The handle value. Ignored.
995 * @param pvObj The object pointer.
996 * @param pvCtx Context, the handle type. Ignored.
997 * @param pvUser Session pointer.
998 */
999static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1000{
1001 NOREF(pvCtx);
1002 NOREF(h);
1003 NOREF(hHandleTable);
1004 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1005}
1006
1007
1008/**
1009 * Fast path I/O Control worker.
1010 *
1011 * @returns VBox status code that should be passed down to ring-3 unchanged.
1012 * @param uIOCtl Function number.
1013 * @param idCpu VMCPU id.
1014 * @param pDevExt Device extention.
1015 * @param pSession Session data.
1016 */
1017int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1018{
1019 /*
1020 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
1021 */
1022 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
1023 {
1024 switch (uIOCtl)
1025 {
1026 case SUP_IOCTL_FAST_DO_RAW_RUN:
1027#ifdef RT_WITH_W64_UNWIND_HACK
1028 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
1029#else
1030 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
1031#endif
1032 break;
1033 case SUP_IOCTL_FAST_DO_HWACC_RUN:
1034#ifdef RT_WITH_W64_UNWIND_HACK
1035 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
1036#else
1037 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
1038#endif
1039 break;
1040 case SUP_IOCTL_FAST_DO_NOP:
1041#ifdef RT_WITH_W64_UNWIND_HACK
1042 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
1043#else
1044 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
1045#endif
1046 break;
1047 default:
1048 return VERR_INTERNAL_ERROR;
1049 }
1050 return VINF_SUCCESS;
1051 }
1052 return VERR_INTERNAL_ERROR;
1053}
1054
1055
1056/**
1057 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
1058 * We would use strpbrk here if this function would be contained in the RedHat kABI white
1059 * list, see http://www.kerneldrivers.org/RHEL5.
1060 *
1061 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
1062 * @param pszStr String to check
1063 * @param pszChars Character set
1064 */
1065static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
1066{
1067 int chCur;
1068 while ((chCur = *pszStr++) != '\0')
1069 {
1070 int ch;
1071 const char *psz = pszChars;
1072 while ((ch = *psz++) != '\0')
1073 if (ch == chCur)
1074 return 1;
1075
1076 }
1077 return 0;
1078}
1079
1080
1081/**
1082 * I/O Control worker.
1083 *
1084 * @returns 0 on success.
1085 * @returns VERR_INVALID_PARAMETER if the request is invalid.
1086 *
1087 * @param uIOCtl Function number.
1088 * @param pDevExt Device extention.
1089 * @param pSession Session data.
1090 * @param pReqHdr The request header.
1091 */
1092int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1093{
1094 /*
1095 * Validate the request.
1096 */
1097 /* this first check could probably be omitted as its also done by the OS specific code... */
1098 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
1099 || pReqHdr->cbIn < sizeof(*pReqHdr)
1100 || pReqHdr->cbOut < sizeof(*pReqHdr)))
1101 {
1102 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
1103 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
1104 return VERR_INVALID_PARAMETER;
1105 }
1106 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
1107 {
1108 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
1109 {
1110 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
1111 return VERR_INVALID_PARAMETER;
1112 }
1113 }
1114 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
1115 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
1116 {
1117 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
1118 return VERR_INVALID_PARAMETER;
1119 }
1120
1121/*
1122 * Validation macros
1123 */
1124#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1125 do { \
1126 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1127 { \
1128 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1129 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1130 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1131 } \
1132 } while (0)
1133
1134#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1135
1136#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1137 do { \
1138 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1139 { \
1140 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1141 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1142 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1143 } \
1144 } while (0)
1145
1146#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1147 do { \
1148 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1149 { \
1150 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1151 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1152 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1153 } \
1154 } while (0)
1155
1156#define REQ_CHECK_EXPR(Name, expr) \
1157 do { \
1158 if (RT_UNLIKELY(!(expr))) \
1159 { \
1160 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1161 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1162 } \
1163 } while (0)
1164
1165#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1166 do { \
1167 if (RT_UNLIKELY(!(expr))) \
1168 { \
1169 OSDBGPRINT( fmt ); \
1170 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1171 } \
1172 } while (0)
1173
1174
1175 /*
1176 * The switch.
1177 */
1178 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1179 {
1180 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1181 {
1182 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1183 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1184 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1185 {
1186 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1187 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1188 return 0;
1189 }
1190
1191#if 0
1192 /*
1193 * Call out to the OS specific code and let it do permission checks on the
1194 * client process.
1195 */
1196 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1197 {
1198 pReq->u.Out.u32Cookie = 0xffffffff;
1199 pReq->u.Out.u32SessionCookie = 0xffffffff;
1200 pReq->u.Out.u32SessionVersion = 0xffffffff;
1201 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1202 pReq->u.Out.pSession = NULL;
1203 pReq->u.Out.cFunctions = 0;
1204 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1205 return 0;
1206 }
1207#endif
1208
1209 /*
1210 * Match the version.
1211 * The current logic is very simple, match the major interface version.
1212 */
1213 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1214 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1215 {
1216 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1217 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1218 pReq->u.Out.u32Cookie = 0xffffffff;
1219 pReq->u.Out.u32SessionCookie = 0xffffffff;
1220 pReq->u.Out.u32SessionVersion = 0xffffffff;
1221 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1222 pReq->u.Out.pSession = NULL;
1223 pReq->u.Out.cFunctions = 0;
1224 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1225 return 0;
1226 }
1227
1228 /*
1229 * Fill in return data and be gone.
1230 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1231 * u32SessionVersion <= u32ReqVersion!
1232 */
1233 /** @todo Somehow validate the client and negotiate a secure cookie... */
1234 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1235 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1236 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1237 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1238 pReq->u.Out.pSession = pSession;
1239 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1240 pReq->Hdr.rc = VINF_SUCCESS;
1241 return 0;
1242 }
1243
1244 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1245 {
1246 /* validate */
1247 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1248 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1249
1250 /* execute */
1251 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1252 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1253 pReq->Hdr.rc = VINF_SUCCESS;
1254 return 0;
1255 }
1256
1257 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
1258 {
1259 /* validate */
1260 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
1261 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
1262
1263 /* execute */
1264 pReq->u.Out.u8Idt = 3;
1265 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1266 return 0;
1267 }
1268
1269 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
1270 {
1271 /* validate */
1272 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
1273 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
1274
1275 /* execute */
1276 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1277 return 0;
1278 }
1279
1280 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1281 {
1282 /* validate */
1283 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1284 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1285 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1286 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1287 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1288
1289 /* execute */
1290 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1291 if (RT_FAILURE(pReq->Hdr.rc))
1292 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1293 return 0;
1294 }
1295
1296 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1297 {
1298 /* validate */
1299 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1300 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1301
1302 /* execute */
1303 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1304 return 0;
1305 }
1306
1307 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1308 {
1309 /* validate */
1310 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1311 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1312
1313 /* execute */
1314 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1315 if (RT_FAILURE(pReq->Hdr.rc))
1316 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1317 return 0;
1318 }
1319
1320 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1321 {
1322 /* validate */
1323 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1324 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1325
1326 /* execute */
1327 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1328 return 0;
1329 }
1330
1331 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1332 {
1333 /* validate */
1334 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1335 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1336 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1337 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1338 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1339 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1340 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1341
1342 /* execute */
1343 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1344 return 0;
1345 }
1346
1347 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1348 {
1349 /* validate */
1350 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1351 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1352 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1353 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1354 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1355 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1356 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1357 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1358 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1359 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1360 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1361 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1362 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1363 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1364 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1365
1366 if (pReq->u.In.cSymbols)
1367 {
1368 uint32_t i;
1369 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1370 for (i = 0; i < pReq->u.In.cSymbols; i++)
1371 {
1372 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1373 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1374 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1375 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1376 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1377 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1378 }
1379 }
1380
1381 /* execute */
1382 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1383 return 0;
1384 }
1385
1386 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1387 {
1388 /* validate */
1389 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1390 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1391
1392 /* execute */
1393 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1394 return 0;
1395 }
1396
1397 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1398 {
1399 /* validate */
1400 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1401 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1402 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1403
1404 /* execute */
1405 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1406 return 0;
1407 }
1408
1409 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1410 {
1411 /* validate */
1412 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1413 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1414 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1415
1416 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1417 {
1418 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1419
1420 /* execute */
1421 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1422#ifdef RT_WITH_W64_UNWIND_HACK
1423 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1424#else
1425 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1426#endif
1427 else
1428 pReq->Hdr.rc = VERR_WRONG_ORDER;
1429 }
1430 else
1431 {
1432 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1433 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1434 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1435 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1436 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1437
1438 /* execute */
1439 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1440#ifdef RT_WITH_W64_UNWIND_HACK
1441 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1442#else
1443 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1444#endif
1445 else
1446 pReq->Hdr.rc = VERR_WRONG_ORDER;
1447 }
1448
1449 if ( RT_FAILURE(pReq->Hdr.rc)
1450 && pReq->Hdr.rc != VERR_INTERRUPTED
1451 && pReq->Hdr.rc != VERR_TIMEOUT)
1452 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1453 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1454 else
1455 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1456 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1457 return 0;
1458 }
1459
1460 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1461 {
1462 /* validate */
1463 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1464 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1465
1466 /* execute */
1467 pReq->Hdr.rc = VINF_SUCCESS;
1468 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1469 return 0;
1470 }
1471
1472 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1473 {
1474 /* validate */
1475 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1476 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1477 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1478
1479 /* execute */
1480 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1481 if (RT_FAILURE(pReq->Hdr.rc))
1482 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1483 return 0;
1484 }
1485
1486 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1487 {
1488 /* validate */
1489 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1490 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1491
1492 /* execute */
1493 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1494 return 0;
1495 }
1496
1497 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1498 {
1499 /* validate */
1500 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1501 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1502
1503 /* execute */
1504 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1505 if (RT_SUCCESS(pReq->Hdr.rc))
1506 pReq->u.Out.pGipR0 = pDevExt->pGip;
1507 return 0;
1508 }
1509
1510 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1511 {
1512 /* validate */
1513 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1514 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1515
1516 /* execute */
1517 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1518 return 0;
1519 }
1520
1521 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1522 {
1523 /* validate */
1524 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1525 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1526 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1527 || ( VALID_PTR(pReq->u.In.pVMR0)
1528 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1529 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1530 /* execute */
1531 pSession->pVM = pReq->u.In.pVMR0;
1532 pReq->Hdr.rc = VINF_SUCCESS;
1533 return 0;
1534 }
1535
1536 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1537 {
1538 /* validate */
1539 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1540 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1541 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1542
1543 /* execute */
1544 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1545 if (RT_FAILURE(pReq->Hdr.rc))
1546 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1547 return 0;
1548 }
1549
1550 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1551 {
1552 /* validate */
1553 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1554 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1555 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1556 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1557 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1558 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1559 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1560 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1561 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1562
1563 /* execute */
1564 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1565 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1566 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1567 &pReq->u.Out.aPages[0]);
1568 if (RT_FAILURE(pReq->Hdr.rc))
1569 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1570 return 0;
1571 }
1572
1573 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1574 {
1575 /* validate */
1576 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1577 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1578 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1579 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1580 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1581 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1582
1583 /* execute */
1584 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1585 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1586 if (RT_FAILURE(pReq->Hdr.rc))
1587 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1588 return 0;
1589 }
1590
1591 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
1592 {
1593 /* validate */
1594 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
1595 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
1596 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
1597 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
1598 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
1599 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1600 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
1601
1602 /* execute */
1603 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
1604 return 0;
1605 }
1606
1607 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1608 {
1609 /* validate */
1610 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1611 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1612
1613 /* execute */
1614 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1615 return 0;
1616 }
1617
1618 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1619 {
1620 /* validate */
1621 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1622 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1623 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1624
1625 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1626 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1627 else
1628 {
1629 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1630 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1631 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1632 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1633 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1634 }
1635 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1636
1637 /* execute */
1638 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1639 return 0;
1640 }
1641
1642 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS(0)):
1643 {
1644 /* validate */
1645 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
1646 size_t cbStrTab;
1647 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
1648 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
1649 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
1650 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
1651 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
1652 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
1653 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
1654 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
1655 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
1656 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
1657 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
1658
1659 /* execute */
1660 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pDevExt, pSession, pReq);
1661 return 0;
1662 }
1663
1664 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_CREATE):
1665 {
1666 /* validate */
1667 PSUPSEMCREATE pReq = (PSUPSEMCREATE)pReqHdr;
1668 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_CREATE, SUP_IOCTL_SEM_CREATE_SIZE_IN, SUP_IOCTL_SEM_CREATE_SIZE_OUT);
1669
1670 /* execute */
1671 switch (pReq->u.In.uType)
1672 {
1673 case SUP_SEM_TYPE_EVENT:
1674 {
1675 SUPSEMEVENT hEvent;
1676 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
1677 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
1678 break;
1679 }
1680
1681 case SUP_SEM_TYPE_EVENT_MULTI:
1682 {
1683 SUPSEMEVENTMULTI hEventMulti;
1684 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
1685 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
1686 break;
1687 }
1688
1689 default:
1690 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
1691 break;
1692 }
1693 return 0;
1694 }
1695
1696 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP):
1697 {
1698 /* validate */
1699 PSUPSEMOP pReq = (PSUPSEMOP)pReqHdr;
1700 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP, SUP_IOCTL_SEM_OP_SIZE_IN, SUP_IOCTL_SEM_OP_SIZE_OUT);
1701
1702 /* execute */
1703 switch (pReq->u.In.uType)
1704 {
1705 case SUP_SEM_TYPE_EVENT:
1706 {
1707 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
1708 switch (pReq->u.In.uOp)
1709 {
1710 case SUPSEMOP_WAIT:
1711 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.cMillies);
1712 break;
1713 case SUPSEMOP_SIGNAL:
1714 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
1715 break;
1716 case SUPSEMOP_CLOSE:
1717 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
1718 break;
1719 case SUPSEMOP_RESET:
1720 default:
1721 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
1722 break;
1723 }
1724 break;
1725 }
1726
1727 case SUP_SEM_TYPE_EVENT_MULTI:
1728 {
1729 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
1730 switch (pReq->u.In.uOp)
1731 {
1732 case SUPSEMOP_WAIT:
1733 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.cMillies);
1734 break;
1735 case SUPSEMOP_SIGNAL:
1736 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
1737 break;
1738 case SUPSEMOP_CLOSE:
1739 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
1740 break;
1741 case SUPSEMOP_RESET:
1742 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
1743 break;
1744 default:
1745 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
1746 break;
1747 }
1748 break;
1749 }
1750
1751 default:
1752 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
1753 break;
1754 }
1755 return 0;
1756 }
1757
1758 default:
1759 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1760 break;
1761 }
1762 return SUPDRV_ERR_GENERAL_FAILURE;
1763}
1764
1765
1766/**
1767 * Inter-Driver Communcation (IDC) worker.
1768 *
1769 * @returns VBox status code.
1770 * @retval VINF_SUCCESS on success.
1771 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1772 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1773 *
1774 * @param uReq The request (function) code.
1775 * @param pDevExt Device extention.
1776 * @param pSession Session data.
1777 * @param pReqHdr The request header.
1778 */
1779int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1780{
1781 /*
1782 * The OS specific code has already validated the pSession
1783 * pointer, and the request size being greater or equal to
1784 * size of the header.
1785 *
1786 * So, just check that pSession is a kernel context session.
1787 */
1788 if (RT_UNLIKELY( pSession
1789 && pSession->R0Process != NIL_RTR0PROCESS))
1790 return VERR_INVALID_PARAMETER;
1791
1792/*
1793 * Validation macro.
1794 */
1795#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1796 do { \
1797 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1798 { \
1799 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1800 (long)pReqHdr->cb, (long)(cbExpect))); \
1801 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1802 } \
1803 } while (0)
1804
1805 switch (uReq)
1806 {
1807 case SUPDRV_IDC_REQ_CONNECT:
1808 {
1809 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1810 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1811
1812 /*
1813 * Validate the cookie and other input.
1814 */
1815 if (pReq->Hdr.pSession != NULL)
1816 {
1817 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1818 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1819 }
1820 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1821 {
1822 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1823 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1824 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1825 }
1826 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1827 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1828 {
1829 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1830 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1831 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1832 }
1833
1834 /*
1835 * Match the version.
1836 * The current logic is very simple, match the major interface version.
1837 */
1838 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1839 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1840 {
1841 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1842 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
1843 pReq->u.Out.pSession = NULL;
1844 pReq->u.Out.uSessionVersion = 0xffffffff;
1845 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1846 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1847 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1848 return VINF_SUCCESS;
1849 }
1850
1851 pReq->u.Out.pSession = NULL;
1852 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1853 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1854 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1855
1856 /*
1857 * On NT we will already have a session associated with the
1858 * client, just like with the SUP_IOCTL_COOKIE request, while
1859 * the other doesn't.
1860 */
1861#ifdef RT_OS_WINDOWS
1862 pReq->Hdr.rc = VINF_SUCCESS;
1863#else
1864 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1865 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1866 if (RT_FAILURE(pReq->Hdr.rc))
1867 {
1868 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1869 return VINF_SUCCESS;
1870 }
1871#endif
1872
1873 pReq->u.Out.pSession = pSession;
1874 pReq->Hdr.pSession = pSession;
1875
1876 return VINF_SUCCESS;
1877 }
1878
1879 case SUPDRV_IDC_REQ_DISCONNECT:
1880 {
1881 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1882
1883#ifdef RT_OS_WINDOWS
1884 /* Windows will destroy the session when the file object is destroyed. */
1885#else
1886 supdrvCloseSession(pDevExt, pSession);
1887#endif
1888 return pReqHdr->rc = VINF_SUCCESS;
1889 }
1890
1891 case SUPDRV_IDC_REQ_GET_SYMBOL:
1892 {
1893 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1894 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1895
1896 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1897 return VINF_SUCCESS;
1898 }
1899
1900 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1901 {
1902 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1903 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1904
1905 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1906 return VINF_SUCCESS;
1907 }
1908
1909 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1910 {
1911 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1912 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1913
1914 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1915 return VINF_SUCCESS;
1916 }
1917
1918 default:
1919 Log(("Unknown IDC %#lx\n", (long)uReq));
1920 break;
1921 }
1922
1923#undef REQ_CHECK_IDC_SIZE
1924 return VERR_NOT_SUPPORTED;
1925}
1926
1927
1928/**
1929 * Register a object for reference counting.
1930 * The object is registered with one reference in the specified session.
1931 *
1932 * @returns Unique identifier on success (pointer).
1933 * All future reference must use this identifier.
1934 * @returns NULL on failure.
1935 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1936 * @param pvUser1 The first user argument.
1937 * @param pvUser2 The second user argument.
1938 */
1939SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1940{
1941 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1942 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1943 PSUPDRVOBJ pObj;
1944 PSUPDRVUSAGE pUsage;
1945
1946 /*
1947 * Validate the input.
1948 */
1949 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1950 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1951 AssertPtrReturn(pfnDestructor, NULL);
1952
1953 /*
1954 * Allocate and initialize the object.
1955 */
1956 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1957 if (!pObj)
1958 return NULL;
1959 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1960 pObj->enmType = enmType;
1961 pObj->pNext = NULL;
1962 pObj->cUsage = 1;
1963 pObj->pfnDestructor = pfnDestructor;
1964 pObj->pvUser1 = pvUser1;
1965 pObj->pvUser2 = pvUser2;
1966 pObj->CreatorUid = pSession->Uid;
1967 pObj->CreatorGid = pSession->Gid;
1968 pObj->CreatorProcess= pSession->Process;
1969 supdrvOSObjInitCreator(pObj, pSession);
1970
1971 /*
1972 * Allocate the usage record.
1973 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
1974 */
1975 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1976
1977 pUsage = pDevExt->pUsageFree;
1978 if (pUsage)
1979 pDevExt->pUsageFree = pUsage->pNext;
1980 else
1981 {
1982 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1983 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1984 if (!pUsage)
1985 {
1986 RTMemFree(pObj);
1987 return NULL;
1988 }
1989 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1990 }
1991
1992 /*
1993 * Insert the object and create the session usage record.
1994 */
1995 /* The object. */
1996 pObj->pNext = pDevExt->pObjs;
1997 pDevExt->pObjs = pObj;
1998
1999 /* The session record. */
2000 pUsage->cUsage = 1;
2001 pUsage->pObj = pObj;
2002 pUsage->pNext = pSession->pUsage;
2003 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2004 pSession->pUsage = pUsage;
2005
2006 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2007
2008 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2009 return pObj;
2010}
2011
2012
2013/**
2014 * Increment the reference counter for the object associating the reference
2015 * with the specified session.
2016 *
2017 * @returns IPRT status code.
2018 * @param pvObj The identifier returned by SUPR0ObjRegister().
2019 * @param pSession The session which is referencing the object.
2020 *
2021 * @remarks The caller should not own any spinlocks and must carefully protect
2022 * itself against potential race with the destructor so freed memory
2023 * isn't accessed here.
2024 */
2025SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2026{
2027 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2028}
2029
2030
2031/**
2032 * Increment the reference counter for the object associating the reference
2033 * with the specified session.
2034 *
2035 * @returns IPRT status code.
2036 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2037 * couldn't be allocated. (If you see this you're not doing the right
2038 * thing and it won't ever work reliably.)
2039 *
2040 * @param pvObj The identifier returned by SUPR0ObjRegister().
2041 * @param pSession The session which is referencing the object.
2042 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2043 * first reference to an object in a session with this
2044 * argument set.
2045 *
2046 * @remarks The caller should not own any spinlocks and must carefully protect
2047 * itself against potential race with the destructor so freed memory
2048 * isn't accessed here.
2049 */
2050SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2051{
2052 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2053 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2054 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2055 int rc = VINF_SUCCESS;
2056 PSUPDRVUSAGE pUsagePre;
2057 PSUPDRVUSAGE pUsage;
2058
2059 /*
2060 * Validate the input.
2061 * Be ready for the destruction race (someone might be stuck in the
2062 * destructor waiting a lock we own).
2063 */
2064 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2065 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2066 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2067 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2068 VERR_INVALID_PARAMETER);
2069
2070 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
2071
2072 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2073 {
2074 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2075
2076 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2077 return VERR_WRONG_ORDER;
2078 }
2079
2080 /*
2081 * Preallocate the usage record if we can.
2082 */
2083 pUsagePre = pDevExt->pUsageFree;
2084 if (pUsagePre)
2085 pDevExt->pUsageFree = pUsagePre->pNext;
2086 else if (!fNoBlocking)
2087 {
2088 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2089 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2090 if (!pUsagePre)
2091 return VERR_NO_MEMORY;
2092
2093 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
2094 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2095 {
2096 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2097
2098 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2099 return VERR_WRONG_ORDER;
2100 }
2101 }
2102
2103 /*
2104 * Reference the object.
2105 */
2106 pObj->cUsage++;
2107
2108 /*
2109 * Look for the session record.
2110 */
2111 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2112 {
2113 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2114 if (pUsage->pObj == pObj)
2115 break;
2116 }
2117 if (pUsage)
2118 pUsage->cUsage++;
2119 else if (pUsagePre)
2120 {
2121 /* create a new session record. */
2122 pUsagePre->cUsage = 1;
2123 pUsagePre->pObj = pObj;
2124 pUsagePre->pNext = pSession->pUsage;
2125 pSession->pUsage = pUsagePre;
2126 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2127
2128 pUsagePre = NULL;
2129 }
2130 else
2131 {
2132 pObj->cUsage--;
2133 rc = VERR_TRY_AGAIN;
2134 }
2135
2136 /*
2137 * Put any unused usage record into the free list..
2138 */
2139 if (pUsagePre)
2140 {
2141 pUsagePre->pNext = pDevExt->pUsageFree;
2142 pDevExt->pUsageFree = pUsagePre;
2143 }
2144
2145 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2146
2147 return rc;
2148}
2149
2150
2151/**
2152 * Decrement / destroy a reference counter record for an object.
2153 *
2154 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
2155 *
2156 * @returns IPRT status code.
2157 * @retval VINF_SUCCESS if not destroyed.
2158 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
2159 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
2160 * string builds.
2161 *
2162 * @param pvObj The identifier returned by SUPR0ObjRegister().
2163 * @param pSession The session which is referencing the object.
2164 */
2165SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
2166{
2167 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2168 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2169 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2170 int rc = VERR_INVALID_PARAMETER;
2171 PSUPDRVUSAGE pUsage;
2172 PSUPDRVUSAGE pUsagePrev;
2173
2174 /*
2175 * Validate the input.
2176 */
2177 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2178 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2179 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2180 VERR_INVALID_PARAMETER);
2181
2182 /*
2183 * Acquire the spinlock and look for the usage record.
2184 */
2185 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
2186
2187 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
2188 pUsage;
2189 pUsagePrev = pUsage, pUsage = pUsage->pNext)
2190 {
2191 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2192 if (pUsage->pObj == pObj)
2193 {
2194 rc = VINF_SUCCESS;
2195 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
2196 if (pUsage->cUsage > 1)
2197 {
2198 pObj->cUsage--;
2199 pUsage->cUsage--;
2200 }
2201 else
2202 {
2203 /*
2204 * Free the session record.
2205 */
2206 if (pUsagePrev)
2207 pUsagePrev->pNext = pUsage->pNext;
2208 else
2209 pSession->pUsage = pUsage->pNext;
2210 pUsage->pNext = pDevExt->pUsageFree;
2211 pDevExt->pUsageFree = pUsage;
2212
2213 /* What about the object? */
2214 if (pObj->cUsage > 1)
2215 pObj->cUsage--;
2216 else
2217 {
2218 /*
2219 * Object is to be destroyed, unlink it.
2220 */
2221 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
2222 rc = VINF_OBJECT_DESTROYED;
2223 if (pDevExt->pObjs == pObj)
2224 pDevExt->pObjs = pObj->pNext;
2225 else
2226 {
2227 PSUPDRVOBJ pObjPrev;
2228 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
2229 if (pObjPrev->pNext == pObj)
2230 {
2231 pObjPrev->pNext = pObj->pNext;
2232 break;
2233 }
2234 Assert(pObjPrev);
2235 }
2236 }
2237 }
2238 break;
2239 }
2240 }
2241
2242 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2243
2244 /*
2245 * Call the destructor and free the object if required.
2246 */
2247 if (rc == VINF_OBJECT_DESTROYED)
2248 {
2249 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2250 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2251 if (pObj->pfnDestructor)
2252#ifdef RT_WITH_W64_UNWIND_HACK
2253 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
2254#else
2255 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2256#endif
2257 RTMemFree(pObj);
2258 }
2259
2260 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2261 return rc;
2262}
2263
2264
2265/**
2266 * Verifies that the current process can access the specified object.
2267 *
2268 * @returns The following IPRT status code:
2269 * @retval VINF_SUCCESS if access was granted.
2270 * @retval VERR_PERMISSION_DENIED if denied access.
2271 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2272 *
2273 * @param pvObj The identifier returned by SUPR0ObjRegister().
2274 * @param pSession The session which wishes to access the object.
2275 * @param pszObjName Object string name. This is optional and depends on the object type.
2276 *
2277 * @remark The caller is responsible for making sure the object isn't removed while
2278 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2279 */
2280SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2281{
2282 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2283 int rc;
2284
2285 /*
2286 * Validate the input.
2287 */
2288 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2289 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2290 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2291 VERR_INVALID_PARAMETER);
2292
2293 /*
2294 * Check access. (returns true if a decision has been made.)
2295 */
2296 rc = VERR_INTERNAL_ERROR;
2297 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2298 return rc;
2299
2300 /*
2301 * Default policy is to allow the user to access his own
2302 * stuff but nothing else.
2303 */
2304 if (pObj->CreatorUid == pSession->Uid)
2305 return VINF_SUCCESS;
2306 return VERR_PERMISSION_DENIED;
2307}
2308
2309
2310/**
2311 * Lock pages.
2312 *
2313 * @returns IPRT status code.
2314 * @param pSession Session to which the locked memory should be associated.
2315 * @param pvR3 Start of the memory range to lock.
2316 * This must be page aligned.
2317 * @param cPages Number of pages to lock.
2318 * @param paPages Where to put the physical addresses of locked memory.
2319 */
2320SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2321{
2322 int rc;
2323 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2324 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2325 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2326
2327 /*
2328 * Verify input.
2329 */
2330 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2331 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2332 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2333 || !pvR3)
2334 {
2335 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2336 return VERR_INVALID_PARAMETER;
2337 }
2338
2339#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
2340 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
2341 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
2342 if (RT_SUCCESS(rc))
2343 return rc;
2344#endif
2345
2346 /*
2347 * Let IPRT do the job.
2348 */
2349 Mem.eType = MEMREF_TYPE_LOCKED;
2350 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
2351 if (RT_SUCCESS(rc))
2352 {
2353 uint32_t iPage = cPages;
2354 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2355 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2356
2357 while (iPage-- > 0)
2358 {
2359 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2360 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2361 {
2362 AssertMsgFailed(("iPage=%d\n", iPage));
2363 rc = VERR_INTERNAL_ERROR;
2364 break;
2365 }
2366 }
2367 if (RT_SUCCESS(rc))
2368 rc = supdrvMemAdd(&Mem, pSession);
2369 if (RT_FAILURE(rc))
2370 {
2371 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2372 AssertRC(rc2);
2373 }
2374 }
2375
2376 return rc;
2377}
2378
2379
2380/**
2381 * Unlocks the memory pointed to by pv.
2382 *
2383 * @returns IPRT status code.
2384 * @param pSession Session to which the memory was locked.
2385 * @param pvR3 Memory to unlock.
2386 */
2387SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2388{
2389 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2390 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2391#ifdef RT_OS_WINDOWS
2392 /*
2393 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
2394 * allocations; ignore this call.
2395 */
2396 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
2397 {
2398 LogFlow(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
2399 return VINF_SUCCESS;
2400 }
2401#endif
2402 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2403}
2404
2405
2406/**
2407 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2408 * backing.
2409 *
2410 * @returns IPRT status code.
2411 * @param pSession Session data.
2412 * @param cPages Number of pages to allocate.
2413 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2414 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2415 * @param pHCPhys Where to put the physical address of allocated memory.
2416 */
2417SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2418{
2419 int rc;
2420 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2421 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2422
2423 /*
2424 * Validate input.
2425 */
2426 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2427 if (!ppvR3 || !ppvR0 || !pHCPhys)
2428 {
2429 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2430 pSession, ppvR0, ppvR3, pHCPhys));
2431 return VERR_INVALID_PARAMETER;
2432
2433 }
2434 if (cPages < 1 || cPages >= 256)
2435 {
2436 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2437 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2438 }
2439
2440 /*
2441 * Let IPRT do the job.
2442 */
2443 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2444 if (RT_SUCCESS(rc))
2445 {
2446 int rc2;
2447 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2448 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2449 if (RT_SUCCESS(rc))
2450 {
2451 Mem.eType = MEMREF_TYPE_CONT;
2452 rc = supdrvMemAdd(&Mem, pSession);
2453 if (!rc)
2454 {
2455 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2456 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2457 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2458 return 0;
2459 }
2460
2461 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2462 AssertRC(rc2);
2463 }
2464 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2465 AssertRC(rc2);
2466 }
2467
2468 return rc;
2469}
2470
2471
2472/**
2473 * Frees memory allocated using SUPR0ContAlloc().
2474 *
2475 * @returns IPRT status code.
2476 * @param pSession The session to which the memory was allocated.
2477 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2478 */
2479SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2480{
2481 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2482 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2483 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2484}
2485
2486
2487/**
2488 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2489 *
2490 * The memory isn't zeroed.
2491 *
2492 * @returns IPRT status code.
2493 * @param pSession Session data.
2494 * @param cPages Number of pages to allocate.
2495 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2496 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2497 * @param paPages Where to put the physical addresses of allocated memory.
2498 */
2499SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2500{
2501 unsigned iPage;
2502 int rc;
2503 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2504 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2505
2506 /*
2507 * Validate input.
2508 */
2509 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2510 if (!ppvR3 || !ppvR0 || !paPages)
2511 {
2512 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2513 pSession, ppvR3, ppvR0, paPages));
2514 return VERR_INVALID_PARAMETER;
2515
2516 }
2517 if (cPages < 1 || cPages >= 256)
2518 {
2519 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2520 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2521 }
2522
2523 /*
2524 * Let IPRT do the work.
2525 */
2526 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2527 if (RT_SUCCESS(rc))
2528 {
2529 int rc2;
2530 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2531 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2532 if (RT_SUCCESS(rc))
2533 {
2534 Mem.eType = MEMREF_TYPE_LOW;
2535 rc = supdrvMemAdd(&Mem, pSession);
2536 if (!rc)
2537 {
2538 for (iPage = 0; iPage < cPages; iPage++)
2539 {
2540 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2541 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2542 }
2543 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2544 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2545 return 0;
2546 }
2547
2548 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2549 AssertRC(rc2);
2550 }
2551
2552 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2553 AssertRC(rc2);
2554 }
2555
2556 return rc;
2557}
2558
2559
2560/**
2561 * Frees memory allocated using SUPR0LowAlloc().
2562 *
2563 * @returns IPRT status code.
2564 * @param pSession The session to which the memory was allocated.
2565 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2566 */
2567SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2568{
2569 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2570 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2571 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2572}
2573
2574
2575
2576/**
2577 * Allocates a chunk of memory with both R0 and R3 mappings.
2578 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2579 *
2580 * @returns IPRT status code.
2581 * @param pSession The session to associated the allocation with.
2582 * @param cb Number of bytes to allocate.
2583 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2584 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2585 */
2586SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2587{
2588 int rc;
2589 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2590 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2591
2592 /*
2593 * Validate input.
2594 */
2595 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2596 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2597 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2598 if (cb < 1 || cb >= _4M)
2599 {
2600 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2601 return VERR_INVALID_PARAMETER;
2602 }
2603
2604 /*
2605 * Let IPRT do the work.
2606 */
2607 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2608 if (RT_SUCCESS(rc))
2609 {
2610 int rc2;
2611 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2612 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2613 if (RT_SUCCESS(rc))
2614 {
2615 Mem.eType = MEMREF_TYPE_MEM;
2616 rc = supdrvMemAdd(&Mem, pSession);
2617 if (!rc)
2618 {
2619 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2620 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2621 return VINF_SUCCESS;
2622 }
2623
2624 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2625 AssertRC(rc2);
2626 }
2627
2628 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2629 AssertRC(rc2);
2630 }
2631
2632 return rc;
2633}
2634
2635
2636/**
2637 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2638 *
2639 * @returns IPRT status code.
2640 * @param pSession The session to which the memory was allocated.
2641 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2642 * @param paPages Where to store the physical addresses.
2643 */
2644SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2645{
2646 PSUPDRVBUNDLE pBundle;
2647 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2648 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2649
2650 /*
2651 * Validate input.
2652 */
2653 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2654 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2655 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2656
2657 /*
2658 * Search for the address.
2659 */
2660 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2661 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2662 {
2663 if (pBundle->cUsed > 0)
2664 {
2665 unsigned i;
2666 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2667 {
2668 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2669 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2670 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2671 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2672 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2673 )
2674 )
2675 {
2676 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2677 size_t iPage;
2678 for (iPage = 0; iPage < cPages; iPage++)
2679 {
2680 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2681 paPages[iPage].uReserved = 0;
2682 }
2683 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2684 return VINF_SUCCESS;
2685 }
2686 }
2687 }
2688 }
2689 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2690 Log(("Failed to find %p!!!\n", (void *)uPtr));
2691 return VERR_INVALID_PARAMETER;
2692}
2693
2694
2695/**
2696 * Free memory allocated by SUPR0MemAlloc().
2697 *
2698 * @returns IPRT status code.
2699 * @param pSession The session owning the allocation.
2700 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2701 */
2702SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2703{
2704 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2705 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2706 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2707}
2708
2709
2710/**
2711 * Allocates a chunk of memory with only a R3 mappings.
2712 *
2713 * The memory is fixed and it's possible to query the physical addresses using
2714 * SUPR0MemGetPhys().
2715 *
2716 * @returns IPRT status code.
2717 * @param pSession The session to associated the allocation with.
2718 * @param cPages The number of pages to allocate.
2719 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2720 * @param paPages Where to store the addresses of the pages. Optional.
2721 */
2722SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2723{
2724 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2725 return SUPR0PageAllocEx(pSession, cPages, 0 /*fFlags*/, ppvR3, NULL, paPages);
2726}
2727
2728
2729/**
2730 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2731 *
2732 * The memory is fixed and it's possible to query the physical addresses using
2733 * SUPR0MemGetPhys().
2734 *
2735 * @returns IPRT status code.
2736 * @param pSession The session to associated the allocation with.
2737 * @param cPages The number of pages to allocate.
2738 * @param fFlags Flags, reserved for the future. Must be zero.
2739 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2740 * NULL if no ring-3 mapping.
2741 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2742 * NULL if no ring-0 mapping.
2743 * @param paPages Where to store the addresses of the pages. Optional.
2744 */
2745SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2746{
2747 int rc;
2748 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2749 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2750
2751 /*
2752 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2753 */
2754 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2755 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2756 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2757 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2758 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2759 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2760 {
2761 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2762 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2763 }
2764
2765 /*
2766 * Let IPRT do the work.
2767 */
2768 if (ppvR0)
2769 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2770 else
2771 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2772 if (RT_SUCCESS(rc))
2773 {
2774 int rc2;
2775 if (ppvR3)
2776 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2777 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2778 else
2779 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2780 if (RT_SUCCESS(rc))
2781 {
2782 Mem.eType = MEMREF_TYPE_PAGE;
2783 rc = supdrvMemAdd(&Mem, pSession);
2784 if (!rc)
2785 {
2786 if (ppvR3)
2787 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2788 if (ppvR0)
2789 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2790 if (paPages)
2791 {
2792 uint32_t iPage = cPages;
2793 while (iPage-- > 0)
2794 {
2795 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2796 Assert(paPages[iPage] != NIL_RTHCPHYS);
2797 }
2798 }
2799 return VINF_SUCCESS;
2800 }
2801
2802 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2803 AssertRC(rc2);
2804 }
2805
2806 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2807 AssertRC(rc2);
2808 }
2809 return rc;
2810}
2811
2812
2813/**
2814 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
2815 * space.
2816 *
2817 * @returns IPRT status code.
2818 * @param pSession The session to associated the allocation with.
2819 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
2820 * @param offSub Where to start mapping. Must be page aligned.
2821 * @param cbSub How much to map. Must be page aligned.
2822 * @param fFlags Flags, MBZ.
2823 * @param ppvR0 Where to reutrn the address of the ring-0 mapping on
2824 * success.
2825 */
2826SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2827 uint32_t fFlags, PRTR0PTR ppvR0)
2828{
2829 int rc;
2830 PSUPDRVBUNDLE pBundle;
2831 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2832 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2833 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2834
2835 /*
2836 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2837 */
2838 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2839 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2840 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2841 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2842 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2843 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2844
2845 /*
2846 * Find the memory object.
2847 */
2848 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2849 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2850 {
2851 if (pBundle->cUsed > 0)
2852 {
2853 unsigned i;
2854 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2855 {
2856 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2857 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2858 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2859 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2860 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2861 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2862 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2863 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2864 {
2865 hMemObj = pBundle->aMem[i].MemObj;
2866 break;
2867 }
2868 }
2869 }
2870 }
2871 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2872
2873 rc = VERR_INVALID_PARAMETER;
2874 if (hMemObj != NIL_RTR0MEMOBJ)
2875 {
2876 /*
2877 * Do some furter input validations before calling IPRT.
2878 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2879 */
2880 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2881 if ( offSub < cbMemObj
2882 && cbSub <= cbMemObj
2883 && offSub + cbSub <= cbMemObj)
2884 {
2885 RTR0MEMOBJ hMapObj;
2886 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2887 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2888 if (RT_SUCCESS(rc))
2889 *ppvR0 = RTR0MemObjAddress(hMapObj);
2890 }
2891 else
2892 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2893
2894 }
2895 return rc;
2896}
2897
2898
2899/**
2900 * Changes the page level protection of one or more pages previously allocated
2901 * by SUPR0PageAllocEx.
2902 *
2903 * @returns IPRT status code.
2904 * @param pSession The session to associated the allocation with.
2905 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
2906 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
2907 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
2908 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
2909 * @param offSub Where to start changing. Must be page aligned.
2910 * @param cbSub How much to change. Must be page aligned.
2911 * @param fProt The new page level protection, see RTMEM_PROT_*.
2912 */
2913SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
2914{
2915 int rc;
2916 PSUPDRVBUNDLE pBundle;
2917 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2918 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
2919 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
2920 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
2921
2922 /*
2923 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2924 */
2925 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2926 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
2927 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2928 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2929 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2930
2931 /*
2932 * Find the memory object.
2933 */
2934 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2935 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2936 {
2937 if (pBundle->cUsed > 0)
2938 {
2939 unsigned i;
2940 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2941 {
2942 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2943 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2944 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2945 || pvR3 == NIL_RTR3PTR)
2946 && ( pvR0 != NIL_RTR0PTR
2947 || RTR0MemObjAddress(pBundle->aMem[i].MemObj))
2948 && ( pvR3 != NIL_RTR3PTR
2949 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
2950 {
2951 if (pvR0 != NIL_RTR0PTR)
2952 hMemObjR0 = pBundle->aMem[i].MemObj;
2953 if (pvR3 != NIL_RTR3PTR)
2954 hMemObjR3 = pBundle->aMem[i].MapObjR3;
2955 break;
2956 }
2957 }
2958 }
2959 }
2960 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2961
2962 rc = VERR_INVALID_PARAMETER;
2963 if ( hMemObjR0 != NIL_RTR0MEMOBJ
2964 || hMemObjR3 != NIL_RTR0MEMOBJ)
2965 {
2966 /*
2967 * Do some furter input validations before calling IPRT.
2968 */
2969 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
2970 if ( offSub < cbMemObj
2971 && cbSub <= cbMemObj
2972 && offSub + cbSub <= cbMemObj)
2973 {
2974 rc = VINF_SUCCESS;
2975 if (hMemObjR3 != NIL_RTR0PTR)
2976 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
2977 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
2978 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
2979 }
2980 else
2981 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2982
2983 }
2984 return rc;
2985
2986}
2987
2988
2989
2990#ifdef RT_OS_WINDOWS
2991/**
2992 * Check if the pages were locked by SUPR0PageAlloc
2993 *
2994 * This function will be removed along with the lock/unlock hacks when
2995 * we've cleaned up the ring-3 code properly.
2996 *
2997 * @returns boolean
2998 * @param pSession The session to which the memory was allocated.
2999 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
3000 */
3001static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3002{
3003 PSUPDRVBUNDLE pBundle;
3004 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3005 LogFlow(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3006
3007 /*
3008 * Search for the address.
3009 */
3010 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3011 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3012 {
3013 if (pBundle->cUsed > 0)
3014 {
3015 unsigned i;
3016 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3017 {
3018 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3019 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3020 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3021 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3022 {
3023 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3024 return true;
3025 }
3026 }
3027 }
3028 }
3029 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3030 return false;
3031}
3032
3033
3034/**
3035 * Get the physical addresses of memory allocated using SUPR0PageAllocEx().
3036 *
3037 * This function will be removed along with the lock/unlock hacks when
3038 * we've cleaned up the ring-3 code properly.
3039 *
3040 * @returns IPRT status code.
3041 * @param pSession The session to which the memory was allocated.
3042 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
3043 * @param cPages Number of pages in paPages
3044 * @param paPages Where to store the physical addresses.
3045 */
3046static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3047{
3048 PSUPDRVBUNDLE pBundle;
3049 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3050 LogFlow(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
3051
3052 /*
3053 * Search for the address.
3054 */
3055 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3056 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3057 {
3058 if (pBundle->cUsed > 0)
3059 {
3060 unsigned i;
3061 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3062 {
3063 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3064 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3065 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3066 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3067 {
3068 uint32_t iPage;
3069 size_t cMaxPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3070 cPages = (uint32_t)RT_MIN(cMaxPages, cPages);
3071 for (iPage = 0; iPage < cPages; iPage++)
3072 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3073 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3074 return VINF_SUCCESS;
3075 }
3076 }
3077 }
3078 }
3079 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3080 return VERR_INVALID_PARAMETER;
3081}
3082#endif /* RT_OS_WINDOWS */
3083
3084
3085/**
3086 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3087 *
3088 * @returns IPRT status code.
3089 * @param pSession The session owning the allocation.
3090 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3091 * SUPR0PageAllocEx().
3092 */
3093SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3094{
3095 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3096 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3097 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3098}
3099
3100
3101/**
3102 * Maps the GIP into userspace and/or get the physical address of the GIP.
3103 *
3104 * @returns IPRT status code.
3105 * @param pSession Session to which the GIP mapping should belong.
3106 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
3107 * @param pHCPhysGip Where to store the physical address. (optional)
3108 *
3109 * @remark There is no reference counting on the mapping, so one call to this function
3110 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
3111 * and remove the session as a GIP user.
3112 */
3113SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
3114{
3115 int rc = VINF_SUCCESS;
3116 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3117 RTR3PTR pGip = NIL_RTR3PTR;
3118 RTHCPHYS HCPhys = NIL_RTHCPHYS;
3119 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
3120
3121 /*
3122 * Validate
3123 */
3124 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3125 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
3126 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
3127
3128 RTSemFastMutexRequest(pDevExt->mtxGip);
3129 if (pDevExt->pGip)
3130 {
3131 /*
3132 * Map it?
3133 */
3134 if (ppGipR3)
3135 {
3136 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
3137 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
3138 RTMEM_PROT_READ, RTR0ProcHandleSelf());
3139 if (RT_SUCCESS(rc))
3140 {
3141 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
3142 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
3143 }
3144 }
3145
3146 /*
3147 * Get physical address.
3148 */
3149 if (pHCPhysGip && !rc)
3150 HCPhys = pDevExt->HCPhysGip;
3151
3152 /*
3153 * Reference globally.
3154 */
3155 if (!pSession->fGipReferenced && !rc)
3156 {
3157 pSession->fGipReferenced = 1;
3158 pDevExt->cGipUsers++;
3159 if (pDevExt->cGipUsers == 1)
3160 {
3161 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
3162 unsigned i;
3163
3164 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
3165
3166 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
3167 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
3168 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
3169
3170 rc = RTTimerStart(pDevExt->pGipTimer, 0);
3171 AssertRC(rc); rc = VINF_SUCCESS;
3172 }
3173 }
3174 }
3175 else
3176 {
3177 rc = SUPDRV_ERR_GENERAL_FAILURE;
3178 Log(("SUPR0GipMap: GIP is not available!\n"));
3179 }
3180 RTSemFastMutexRelease(pDevExt->mtxGip);
3181
3182 /*
3183 * Write returns.
3184 */
3185 if (pHCPhysGip)
3186 *pHCPhysGip = HCPhys;
3187 if (ppGipR3)
3188 *ppGipR3 = pGip;
3189
3190#ifdef DEBUG_DARWIN_GIP
3191 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
3192#else
3193 LogFlow(( "SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
3194#endif
3195 return rc;
3196}
3197
3198
3199/**
3200 * Unmaps any user mapping of the GIP and terminates all GIP access
3201 * from this session.
3202 *
3203 * @returns IPRT status code.
3204 * @param pSession Session to which the GIP mapping should belong.
3205 */
3206SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
3207{
3208 int rc = VINF_SUCCESS;
3209 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3210#ifdef DEBUG_DARWIN_GIP
3211 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
3212 pSession,
3213 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
3214 pSession->GipMapObjR3));
3215#else
3216 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
3217#endif
3218 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3219
3220 RTSemFastMutexRequest(pDevExt->mtxGip);
3221
3222 /*
3223 * Unmap anything?
3224 */
3225 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
3226 {
3227 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
3228 AssertRC(rc);
3229 if (RT_SUCCESS(rc))
3230 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
3231 }
3232
3233 /*
3234 * Dereference global GIP.
3235 */
3236 if (pSession->fGipReferenced && !rc)
3237 {
3238 pSession->fGipReferenced = 0;
3239 if ( pDevExt->cGipUsers > 0
3240 && !--pDevExt->cGipUsers)
3241 {
3242 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
3243 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = VINF_SUCCESS;
3244 }
3245 }
3246
3247 RTSemFastMutexRelease(pDevExt->mtxGip);
3248
3249 return rc;
3250}
3251
3252
3253/**
3254 * Register a component factory with the support driver.
3255 *
3256 * This is currently restricted to kernel sessions only.
3257 *
3258 * @returns VBox status code.
3259 * @retval VINF_SUCCESS on success.
3260 * @retval VERR_NO_MEMORY if we're out of memory.
3261 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
3262 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3263 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3264 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3265 *
3266 * @param pSession The SUPDRV session (must be a ring-0 session).
3267 * @param pFactory Pointer to the component factory registration structure.
3268 *
3269 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
3270 */
3271SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3272{
3273 PSUPDRVFACTORYREG pNewReg;
3274 const char *psz;
3275 int rc;
3276
3277 /*
3278 * Validate parameters.
3279 */
3280 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3281 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3282 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3283 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
3284 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
3285 AssertReturn(psz, VERR_INVALID_PARAMETER);
3286
3287 /*
3288 * Allocate and initialize a new registration structure.
3289 */
3290 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
3291 if (pNewReg)
3292 {
3293 pNewReg->pNext = NULL;
3294 pNewReg->pFactory = pFactory;
3295 pNewReg->pSession = pSession;
3296 pNewReg->cchName = psz - &pFactory->szName[0];
3297
3298 /*
3299 * Add it to the tail of the list after checking for prior registration.
3300 */
3301 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3302 if (RT_SUCCESS(rc))
3303 {
3304 PSUPDRVFACTORYREG pPrev = NULL;
3305 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3306 while (pCur && pCur->pFactory != pFactory)
3307 {
3308 pPrev = pCur;
3309 pCur = pCur->pNext;
3310 }
3311 if (!pCur)
3312 {
3313 if (pPrev)
3314 pPrev->pNext = pNewReg;
3315 else
3316 pSession->pDevExt->pComponentFactoryHead = pNewReg;
3317 rc = VINF_SUCCESS;
3318 }
3319 else
3320 rc = VERR_ALREADY_EXISTS;
3321
3322 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3323 }
3324
3325 if (RT_FAILURE(rc))
3326 RTMemFree(pNewReg);
3327 }
3328 else
3329 rc = VERR_NO_MEMORY;
3330 return rc;
3331}
3332
3333
3334/**
3335 * Deregister a component factory.
3336 *
3337 * @returns VBox status code.
3338 * @retval VINF_SUCCESS on success.
3339 * @retval VERR_NOT_FOUND if the factory wasn't registered.
3340 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3341 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3342 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3343 *
3344 * @param pSession The SUPDRV session (must be a ring-0 session).
3345 * @param pFactory Pointer to the component factory registration structure
3346 * previously passed SUPR0ComponentRegisterFactory().
3347 *
3348 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
3349 */
3350SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3351{
3352 int rc;
3353
3354 /*
3355 * Validate parameters.
3356 */
3357 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3358 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3359 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3360
3361 /*
3362 * Take the lock and look for the registration record.
3363 */
3364 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3365 if (RT_SUCCESS(rc))
3366 {
3367 PSUPDRVFACTORYREG pPrev = NULL;
3368 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3369 while (pCur && pCur->pFactory != pFactory)
3370 {
3371 pPrev = pCur;
3372 pCur = pCur->pNext;
3373 }
3374 if (pCur)
3375 {
3376 if (!pPrev)
3377 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3378 else
3379 pPrev->pNext = pCur->pNext;
3380
3381 pCur->pNext = NULL;
3382 pCur->pFactory = NULL;
3383 pCur->pSession = NULL;
3384 rc = VINF_SUCCESS;
3385 }
3386 else
3387 rc = VERR_NOT_FOUND;
3388
3389 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3390
3391 RTMemFree(pCur);
3392 }
3393 return rc;
3394}
3395
3396
3397/**
3398 * Queries a component factory.
3399 *
3400 * @returns VBox status code.
3401 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3402 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3403 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3404 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3405 *
3406 * @param pSession The SUPDRV session.
3407 * @param pszName The name of the component factory.
3408 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3409 * @param ppvFactoryIf Where to store the factory interface.
3410 */
3411SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3412{
3413 const char *pszEnd;
3414 size_t cchName;
3415 int rc;
3416
3417 /*
3418 * Validate parameters.
3419 */
3420 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3421
3422 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3423 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3424 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3425 cchName = pszEnd - pszName;
3426
3427 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3428 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3429 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3430
3431 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3432 *ppvFactoryIf = NULL;
3433
3434 /*
3435 * Take the lock and try all factories by this name.
3436 */
3437 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3438 if (RT_SUCCESS(rc))
3439 {
3440 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3441 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3442 while (pCur)
3443 {
3444 if ( pCur->cchName == cchName
3445 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3446 {
3447#ifdef RT_WITH_W64_UNWIND_HACK
3448 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3449#else
3450 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3451#endif
3452 if (pvFactory)
3453 {
3454 *ppvFactoryIf = pvFactory;
3455 rc = VINF_SUCCESS;
3456 break;
3457 }
3458 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3459 }
3460
3461 /* next */
3462 pCur = pCur->pNext;
3463 }
3464
3465 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3466 }
3467 return rc;
3468}
3469
3470
3471/**
3472 * Destructor for objects created by SUPSemEventCreate.
3473 *
3474 * @param pvObj The object handle.
3475 * @param pvUser1 The IPRT event handle.
3476 * @param pvUser2 NULL.
3477 */
3478static DECLCALLBACK(void) supR0SemEventDestructor(void *pvObj, void *pvUser1, void *pvUser2)
3479{
3480 Assert(pvUser2 == NULL);
3481 NOREF(pvObj);
3482 RTSemEventDestroy((RTSEMEVENT)pvUser1);
3483}
3484
3485
3486SUPDECL(int) SUPSemEventCreate(PSUPDRVSESSION pSession, PSUPSEMEVENT phEvent)
3487{
3488 int rc;
3489 RTSEMEVENT hEventReal;
3490
3491 /*
3492 * Input validation.
3493 */
3494 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3495 AssertPtrReturn(phEvent, VERR_INVALID_POINTER);
3496
3497 /*
3498 * Create the event semaphore object.
3499 */
3500 rc = RTSemEventCreate(&hEventReal);
3501 if (RT_SUCCESS(rc))
3502 {
3503 void *pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_SEM_EVENT, supR0SemEventDestructor, hEventReal, NULL);
3504 if (pvObj)
3505 {
3506 uint32_t h32;
3507 rc = RTHandleTableAllocWithCtx(pSession->hHandleTable, pvObj, SUPDRV_HANDLE_CTX_EVENT, &h32);
3508 if (RT_SUCCESS(rc))
3509 {
3510 *phEvent = (SUPSEMEVENT)(uintptr_t)h32;
3511 return VINF_SUCCESS;
3512 }
3513 SUPR0ObjRelease(pvObj, pSession);
3514 }
3515 else
3516 RTSemEventDestroy(hEventReal);
3517 }
3518 return rc;
3519}
3520
3521
3522SUPDECL(int) SUPSemEventClose(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent)
3523{
3524 uint32_t h32;
3525 PSUPDRVOBJ pObj;
3526
3527 /*
3528 * Input validation.
3529 */
3530 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3531 if (hEvent == NIL_SUPSEMEVENT)
3532 return VINF_SUCCESS;
3533 h32 = (uint32_t)(uintptr_t)hEvent;
3534 if (h32 != (uintptr_t)hEvent)
3535 return VERR_INVALID_HANDLE;
3536
3537 /*
3538 * Do the job.
3539 */
3540 pObj = (PSUPDRVOBJ)RTHandleTableFreeWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT);
3541 if (!pObj)
3542 return VERR_INVALID_HANDLE;
3543
3544 Assert(pObj->cUsage >= 2);
3545 SUPR0ObjRelease(pObj, pSession); /* The free call above. */
3546 return SUPR0ObjRelease(pObj, pSession); /* The handle table reference. */
3547}
3548
3549
3550SUPDECL(int) SUPSemEventSignal(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent)
3551{
3552 int rc;
3553 uint32_t h32;
3554 PSUPDRVOBJ pObj;
3555
3556 /*
3557 * Input validation.
3558 */
3559 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3560 h32 = (uint32_t)(uintptr_t)hEvent;
3561 if (h32 != (uintptr_t)hEvent)
3562 return VERR_INVALID_HANDLE;
3563 pObj = (PSUPDRVOBJ)RTHandleTableLookupWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT);
3564 if (!pObj)
3565 return VERR_INVALID_HANDLE;
3566
3567 /*
3568 * Do the job.
3569 */
3570 rc = RTSemEventSignal((RTSEMEVENT)pObj->pvUser1);
3571
3572 SUPR0ObjRelease(pObj, pSession);
3573 return rc;
3574}
3575
3576
3577SUPDECL(int) SUPSemEventWait(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent, uint32_t cMillies)
3578{
3579 int rc;
3580 uint32_t h32;
3581 PSUPDRVOBJ pObj;
3582
3583 /*
3584 * Input validation.
3585 */
3586 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3587 h32 = (uint32_t)(uintptr_t)hEvent;
3588 if (h32 != (uintptr_t)hEvent)
3589 return VERR_INVALID_HANDLE;
3590 pObj = (PSUPDRVOBJ)RTHandleTableLookupWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT);
3591 if (!pObj)
3592 return VERR_INVALID_HANDLE;
3593
3594 /*
3595 * Do the job.
3596 */
3597 rc = RTSemEventWait((RTSEMEVENT)pObj->pvUser1, cMillies);
3598
3599 SUPR0ObjRelease(pObj, pSession);
3600 return rc;
3601}
3602
3603
3604SUPDECL(int) SUPSemEventWaitNoResume(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent, uint32_t cMillies)
3605{
3606 int rc;
3607 uint32_t h32;
3608 PSUPDRVOBJ pObj;
3609
3610 /*
3611 * Input validation.
3612 */
3613 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3614 h32 = (uint32_t)(uintptr_t)hEvent;
3615 if (h32 != (uintptr_t)hEvent)
3616 return VERR_INVALID_HANDLE;
3617 pObj = (PSUPDRVOBJ)RTHandleTableLookupWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT);
3618 if (!pObj)
3619 return VERR_INVALID_HANDLE;
3620
3621 /*
3622 * Do the job.
3623 */
3624 rc = RTSemEventWaitNoResume((RTSEMEVENT)pObj->pvUser1, cMillies);
3625
3626 SUPR0ObjRelease(pObj, pSession);
3627 return rc;
3628}
3629
3630
3631/**
3632 * Destructor for objects created by SUPSemEventMultiCreate.
3633 *
3634 * @param pvObj The object handle.
3635 * @param pvUser1 The IPRT event handle.
3636 * @param pvUser2 NULL.
3637 */
3638static DECLCALLBACK(void) supR0SemEventMultiDestructor(void *pvObj, void *pvUser1, void *pvUser2)
3639{
3640 Assert(pvUser2 == NULL);
3641 NOREF(pvObj);
3642 RTSemEventMultiDestroy((RTSEMEVENTMULTI)pvUser1);
3643}
3644
3645
3646SUPDECL(int) SUPSemEventMultiCreate(PSUPDRVSESSION pSession, PSUPSEMEVENTMULTI phEventMulti)
3647{
3648 int rc;
3649 RTSEMEVENTMULTI hEventMultReal;
3650
3651 /*
3652 * Input validation.
3653 */
3654 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3655 AssertPtrReturn(phEventMulti, VERR_INVALID_POINTER);
3656
3657 /*
3658 * Create the event semaphore object.
3659 */
3660 rc = RTSemEventMultiCreate(&hEventMultReal);
3661 if (RT_SUCCESS(rc))
3662 {
3663 void *pvObj = SUPR0ObjRegister(pSession, SUPDRVOBJTYPE_SEM_EVENT_MULTI, supR0SemEventMultiDestructor, hEventMultReal, NULL);
3664 if (pvObj)
3665 {
3666 uint32_t h32;
3667 rc = RTHandleTableAllocWithCtx(pSession->hHandleTable, pvObj, SUPDRV_HANDLE_CTX_EVENT_MULTI, &h32);
3668 if (RT_SUCCESS(rc))
3669 {
3670 *phEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)h32;
3671 return VINF_SUCCESS;
3672 }
3673 SUPR0ObjRelease(pvObj, pSession);
3674 }
3675 else
3676 RTSemEventMultiDestroy(hEventMultReal);
3677 }
3678 return rc;
3679}
3680
3681
3682SUPDECL(int) SUPSemEventMultiClose(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti)
3683{
3684 uint32_t h32;
3685 PSUPDRVOBJ pObj;
3686
3687 /*
3688 * Input validation.
3689 */
3690 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3691 if (hEventMulti == NIL_SUPSEMEVENTMULTI)
3692 return VINF_SUCCESS;
3693 h32 = (uint32_t)(uintptr_t)hEventMulti;
3694 if (h32 != (uintptr_t)hEventMulti)
3695 return VERR_INVALID_HANDLE;
3696
3697 /*
3698 * Do the job.
3699 */
3700 pObj = (PSUPDRVOBJ)RTHandleTableFreeWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT_MULTI);
3701 if (!pObj)
3702 return VERR_INVALID_HANDLE;
3703
3704 Assert(pObj->cUsage >= 2);
3705 SUPR0ObjRelease(pObj, pSession); /* The free call above. */
3706 return SUPR0ObjRelease(pObj, pSession); /* The handle table reference. */
3707}
3708
3709
3710SUPDECL(int) SUPSemEventMultiSignal(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti)
3711{
3712 int rc;
3713 uint32_t h32;
3714 PSUPDRVOBJ pObj;
3715
3716 /*
3717 * Input validation.
3718 */
3719 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3720 h32 = (uint32_t)(uintptr_t)hEventMulti;
3721 if (h32 != (uintptr_t)hEventMulti)
3722 return VERR_INVALID_HANDLE;
3723 pObj = (PSUPDRVOBJ)RTHandleTableLookupWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT_MULTI);
3724 if (!pObj)
3725 return VERR_INVALID_HANDLE;
3726
3727 /*
3728 * Do the job.
3729 */
3730 rc = RTSemEventMultiSignal((RTSEMEVENTMULTI)pObj->pvUser1);
3731
3732 SUPR0ObjRelease(pObj, pSession);
3733 return rc;
3734}
3735
3736
3737SUPDECL(int) SUPSemEventMultiReset(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti)
3738{
3739 int rc;
3740 uint32_t h32;
3741 PSUPDRVOBJ pObj;
3742
3743 /*
3744 * Input validation.
3745 */
3746 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3747 h32 = (uint32_t)(uintptr_t)hEventMulti;
3748 if (h32 != (uintptr_t)hEventMulti)
3749 return VERR_INVALID_HANDLE;
3750 pObj = (PSUPDRVOBJ)RTHandleTableLookupWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT_MULTI);
3751 if (!pObj)
3752 return VERR_INVALID_HANDLE;
3753
3754 /*
3755 * Do the job.
3756 */
3757 rc = RTSemEventMultiReset((RTSEMEVENTMULTI)pObj->pvUser1);
3758
3759 SUPR0ObjRelease(pObj, pSession);
3760 return rc;
3761}
3762
3763
3764SUPDECL(int) SUPSemEventMultiWait(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti, uint32_t cMillies)
3765{
3766 int rc;
3767 uint32_t h32;
3768 PSUPDRVOBJ pObj;
3769
3770 /*
3771 * Input validation.
3772 */
3773 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3774 h32 = (uint32_t)(uintptr_t)hEventMulti;
3775 if (h32 != (uintptr_t)hEventMulti)
3776 return VERR_INVALID_HANDLE;
3777 pObj = (PSUPDRVOBJ)RTHandleTableLookupWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT_MULTI);
3778 if (!pObj)
3779 return VERR_INVALID_HANDLE;
3780
3781 /*
3782 * Do the job.
3783 */
3784 rc = RTSemEventMultiWait((RTSEMEVENTMULTI)pObj->pvUser1, cMillies);
3785
3786 SUPR0ObjRelease(pObj, pSession);
3787 return rc;
3788}
3789
3790
3791SUPDECL(int) SUPSemEventMultiWaitNoResume(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti, uint32_t cMillies)
3792{
3793 int rc;
3794 uint32_t h32;
3795 PSUPDRVOBJ pObj;
3796
3797 /*
3798 * Input validation.
3799 */
3800 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3801 h32 = (uint32_t)(uintptr_t)hEventMulti;
3802 if (h32 != (uintptr_t)hEventMulti)
3803 return VERR_INVALID_HANDLE;
3804 pObj = (PSUPDRVOBJ)RTHandleTableLookupWithCtx(pSession->hHandleTable, h32, SUPDRV_HANDLE_CTX_EVENT_MULTI);
3805 if (!pObj)
3806 return VERR_INVALID_HANDLE;
3807
3808 /*
3809 * Do the job.
3810 */
3811 rc = RTSemEventMultiWaitNoResume((RTSEMEVENTMULTI)pObj->pvUser1, cMillies);
3812
3813 SUPR0ObjRelease(pObj, pSession);
3814 return rc;
3815}
3816
3817
3818/**
3819 * Adds a memory object to the session.
3820 *
3821 * @returns IPRT status code.
3822 * @param pMem Memory tracking structure containing the
3823 * information to track.
3824 * @param pSession The session.
3825 */
3826static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3827{
3828 PSUPDRVBUNDLE pBundle;
3829 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3830
3831 /*
3832 * Find free entry and record the allocation.
3833 */
3834 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3835 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3836 {
3837 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3838 {
3839 unsigned i;
3840 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3841 {
3842 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3843 {
3844 pBundle->cUsed++;
3845 pBundle->aMem[i] = *pMem;
3846 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3847 return VINF_SUCCESS;
3848 }
3849 }
3850 AssertFailed(); /* !!this can't be happening!!! */
3851 }
3852 }
3853 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3854
3855 /*
3856 * Need to allocate a new bundle.
3857 * Insert into the last entry in the bundle.
3858 */
3859 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3860 if (!pBundle)
3861 return VERR_NO_MEMORY;
3862
3863 /* take last entry. */
3864 pBundle->cUsed++;
3865 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3866
3867 /* insert into list. */
3868 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3869 pBundle->pNext = pSession->Bundle.pNext;
3870 pSession->Bundle.pNext = pBundle;
3871 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3872
3873 return VINF_SUCCESS;
3874}
3875
3876
3877/**
3878 * Releases a memory object referenced by pointer and type.
3879 *
3880 * @returns IPRT status code.
3881 * @param pSession Session data.
3882 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3883 * @param eType Memory type.
3884 */
3885static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3886{
3887 PSUPDRVBUNDLE pBundle;
3888 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3889
3890 /*
3891 * Validate input.
3892 */
3893 if (!uPtr)
3894 {
3895 Log(("Illegal address %p\n", (void *)uPtr));
3896 return VERR_INVALID_PARAMETER;
3897 }
3898
3899 /*
3900 * Search for the address.
3901 */
3902 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3903 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3904 {
3905 if (pBundle->cUsed > 0)
3906 {
3907 unsigned i;
3908 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3909 {
3910 if ( pBundle->aMem[i].eType == eType
3911 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3912 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3913 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3914 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3915 )
3916 {
3917 /* Make a copy of it and release it outside the spinlock. */
3918 SUPDRVMEMREF Mem = pBundle->aMem[i];
3919 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3920 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3921 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3922 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3923
3924 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3925 {
3926 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3927 AssertRC(rc); /** @todo figure out how to handle this. */
3928 }
3929 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3930 {
3931 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3932 AssertRC(rc); /** @todo figure out how to handle this. */
3933 }
3934 return VINF_SUCCESS;
3935 }
3936 }
3937 }
3938 }
3939 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3940 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3941 return VERR_INVALID_PARAMETER;
3942}
3943
3944
3945/**
3946 * Opens an image. If it's the first time it's opened the call must upload
3947 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3948 *
3949 * This is the 1st step of the loading.
3950 *
3951 * @returns IPRT status code.
3952 * @param pDevExt Device globals.
3953 * @param pSession Session data.
3954 * @param pReq The open request.
3955 */
3956static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3957{
3958 PSUPDRVLDRIMAGE pImage;
3959 unsigned cb;
3960 void *pv;
3961 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
3962 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3963
3964 /*
3965 * Check if we got an instance of the image already.
3966 */
3967 RTSemFastMutexRequest(pDevExt->mtxLdr);
3968 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3969 {
3970 if ( pImage->szName[cchName] == '\0'
3971 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
3972 {
3973 pImage->cUsage++;
3974 pReq->u.Out.pvImageBase = pImage->pvImage;
3975 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3976 supdrvLdrAddUsage(pSession, pImage);
3977 RTSemFastMutexRelease(pDevExt->mtxLdr);
3978 return VINF_SUCCESS;
3979 }
3980 }
3981 /* (not found - add it!) */
3982
3983 /*
3984 * Allocate memory.
3985 */
3986 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3987 pv = RTMemExecAlloc(cb);
3988 if (!pv)
3989 {
3990 RTSemFastMutexRelease(pDevExt->mtxLdr);
3991 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3992 return VERR_NO_MEMORY;
3993 }
3994
3995 /*
3996 * Setup and link in the LDR stuff.
3997 */
3998 pImage = (PSUPDRVLDRIMAGE)pv;
3999 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
4000 pImage->cbImage = pReq->u.In.cbImage;
4001 pImage->pfnModuleInit = NULL;
4002 pImage->pfnModuleTerm = NULL;
4003 pImage->pfnServiceReqHandler = NULL;
4004 pImage->uState = SUP_IOCTL_LDR_OPEN;
4005 pImage->cUsage = 1;
4006 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
4007
4008 pImage->pNext = pDevExt->pLdrImages;
4009 pDevExt->pLdrImages = pImage;
4010
4011 supdrvLdrAddUsage(pSession, pImage);
4012
4013 pReq->u.Out.pvImageBase = pImage->pvImage;
4014 pReq->u.Out.fNeedsLoading = true;
4015 RTSemFastMutexRelease(pDevExt->mtxLdr);
4016
4017#if defined(RT_OS_WINDOWS) && defined(DEBUG)
4018 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
4019#endif
4020 return VINF_SUCCESS;
4021}
4022
4023
4024/**
4025 * Loads the image bits.
4026 *
4027 * This is the 2nd step of the loading.
4028 *
4029 * @returns IPRT status code.
4030 * @param pDevExt Device globals.
4031 * @param pSession Session data.
4032 * @param pReq The request.
4033 */
4034static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
4035{
4036 PSUPDRVLDRUSAGE pUsage;
4037 PSUPDRVLDRIMAGE pImage;
4038 int rc;
4039 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
4040
4041 /*
4042 * Find the ldr image.
4043 */
4044 RTSemFastMutexRequest(pDevExt->mtxLdr);
4045 pUsage = pSession->pLdrUsage;
4046 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
4047 pUsage = pUsage->pNext;
4048 if (!pUsage)
4049 {
4050 RTSemFastMutexRelease(pDevExt->mtxLdr);
4051 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
4052 return VERR_INVALID_HANDLE;
4053 }
4054 pImage = pUsage->pImage;
4055 if (pImage->cbImage != pReq->u.In.cbImage)
4056 {
4057 RTSemFastMutexRelease(pDevExt->mtxLdr);
4058 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
4059 return VERR_INVALID_HANDLE;
4060 }
4061 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
4062 {
4063 unsigned uState = pImage->uState;
4064 RTSemFastMutexRelease(pDevExt->mtxLdr);
4065 if (uState != SUP_IOCTL_LDR_LOAD)
4066 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
4067 return SUPDRV_ERR_ALREADY_LOADED;
4068 }
4069 switch (pReq->u.In.eEPType)
4070 {
4071 case SUPLDRLOADEP_NOTHING:
4072 break;
4073
4074 case SUPLDRLOADEP_VMMR0:
4075 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
4076 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
4077 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
4078 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
4079 {
4080 RTSemFastMutexRelease(pDevExt->mtxLdr);
4081 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
4082 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
4083 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
4084 return VERR_INVALID_PARAMETER;
4085 }
4086 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
4087 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
4088 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
4089 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
4090 {
4091 RTSemFastMutexRelease(pDevExt->mtxLdr);
4092 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
4093 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
4094 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
4095 return VERR_INVALID_PARAMETER;
4096 }
4097 break;
4098
4099 case SUPLDRLOADEP_SERVICE:
4100 if (!pReq->u.In.EP.Service.pfnServiceReq)
4101 {
4102 RTSemFastMutexRelease(pDevExt->mtxLdr);
4103 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
4104 return VERR_INVALID_PARAMETER;
4105 }
4106 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
4107 {
4108 RTSemFastMutexRelease(pDevExt->mtxLdr);
4109 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
4110 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
4111 return VERR_INVALID_PARAMETER;
4112 }
4113 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
4114 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
4115 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
4116 {
4117 RTSemFastMutexRelease(pDevExt->mtxLdr);
4118 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
4119 pImage->pvImage, pReq->u.In.cbImage,
4120 pReq->u.In.EP.Service.apvReserved[0],
4121 pReq->u.In.EP.Service.apvReserved[1],
4122 pReq->u.In.EP.Service.apvReserved[2]));
4123 return VERR_INVALID_PARAMETER;
4124 }
4125 break;
4126
4127 default:
4128 RTSemFastMutexRelease(pDevExt->mtxLdr);
4129 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
4130 return VERR_INVALID_PARAMETER;
4131 }
4132 if ( pReq->u.In.pfnModuleInit
4133 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
4134 {
4135 RTSemFastMutexRelease(pDevExt->mtxLdr);
4136 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
4137 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
4138 return VERR_INVALID_PARAMETER;
4139 }
4140 if ( pReq->u.In.pfnModuleTerm
4141 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
4142 {
4143 RTSemFastMutexRelease(pDevExt->mtxLdr);
4144 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
4145 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
4146 return VERR_INVALID_PARAMETER;
4147 }
4148
4149 /*
4150 * Copy the memory.
4151 */
4152 /* no need to do try/except as this is a buffered request. */
4153 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
4154 pImage->uState = SUP_IOCTL_LDR_LOAD;
4155 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
4156 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
4157 pImage->offSymbols = pReq->u.In.offSymbols;
4158 pImage->cSymbols = pReq->u.In.cSymbols;
4159 pImage->offStrTab = pReq->u.In.offStrTab;
4160 pImage->cbStrTab = pReq->u.In.cbStrTab;
4161
4162 /*
4163 * Update any entry points.
4164 */
4165 switch (pReq->u.In.eEPType)
4166 {
4167 default:
4168 case SUPLDRLOADEP_NOTHING:
4169 rc = VINF_SUCCESS;
4170 break;
4171 case SUPLDRLOADEP_VMMR0:
4172 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
4173 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
4174 break;
4175 case SUPLDRLOADEP_SERVICE:
4176 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
4177 rc = VINF_SUCCESS;
4178 break;
4179 }
4180
4181 /*
4182 * On success call the module initialization.
4183 */
4184 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
4185 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
4186 {
4187 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
4188#ifdef RT_WITH_W64_UNWIND_HACK
4189 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
4190#else
4191 rc = pImage->pfnModuleInit();
4192#endif
4193 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
4194 supdrvLdrUnsetVMMR0EPs(pDevExt);
4195 }
4196
4197 if (rc)
4198 pImage->uState = SUP_IOCTL_LDR_OPEN;
4199
4200 RTSemFastMutexRelease(pDevExt->mtxLdr);
4201 return rc;
4202}
4203
4204
4205/**
4206 * Frees a previously loaded (prep'ed) image.
4207 *
4208 * @returns IPRT status code.
4209 * @param pDevExt Device globals.
4210 * @param pSession Session data.
4211 * @param pReq The request.
4212 */
4213static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
4214{
4215 int rc;
4216 PSUPDRVLDRUSAGE pUsagePrev;
4217 PSUPDRVLDRUSAGE pUsage;
4218 PSUPDRVLDRIMAGE pImage;
4219 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
4220
4221 /*
4222 * Find the ldr image.
4223 */
4224 RTSemFastMutexRequest(pDevExt->mtxLdr);
4225 pUsagePrev = NULL;
4226 pUsage = pSession->pLdrUsage;
4227 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
4228 {
4229 pUsagePrev = pUsage;
4230 pUsage = pUsage->pNext;
4231 }
4232 if (!pUsage)
4233 {
4234 RTSemFastMutexRelease(pDevExt->mtxLdr);
4235 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
4236 return VERR_INVALID_HANDLE;
4237 }
4238
4239 /*
4240 * Check if we can remove anything.
4241 */
4242 rc = VINF_SUCCESS;
4243 pImage = pUsage->pImage;
4244 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
4245 {
4246 /*
4247 * Check if there are any objects with destructors in the image, if
4248 * so leave it for the session cleanup routine so we get a chance to
4249 * clean things up in the right order and not leave them all dangling.
4250 */
4251 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
4252 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
4253 if (pImage->cUsage <= 1)
4254 {
4255 PSUPDRVOBJ pObj;
4256 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
4257 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
4258 {
4259 rc = VERR_DANGLING_OBJECTS;
4260 break;
4261 }
4262 }
4263 else
4264 {
4265 PSUPDRVUSAGE pGenUsage;
4266 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
4267 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
4268 {
4269 rc = VERR_DANGLING_OBJECTS;
4270 break;
4271 }
4272 }
4273 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
4274 if (rc == VINF_SUCCESS)
4275 {
4276 /* unlink it */
4277 if (pUsagePrev)
4278 pUsagePrev->pNext = pUsage->pNext;
4279 else
4280 pSession->pLdrUsage = pUsage->pNext;
4281
4282 /* free it */
4283 pUsage->pImage = NULL;
4284 pUsage->pNext = NULL;
4285 RTMemFree(pUsage);
4286
4287 /*
4288 * Derefrence the image.
4289 */
4290 if (pImage->cUsage <= 1)
4291 supdrvLdrFree(pDevExt, pImage);
4292 else
4293 pImage->cUsage--;
4294 }
4295 else
4296 {
4297 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
4298 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
4299 }
4300 }
4301 else
4302 {
4303 /*
4304 * Dereference both image and usage.
4305 */
4306 pImage->cUsage--;
4307 pUsage->cUsage--;
4308 }
4309
4310 RTSemFastMutexRelease(pDevExt->mtxLdr);
4311 return rc;
4312}
4313
4314
4315/**
4316 * Gets the address of a symbol in an open image.
4317 *
4318 * @returns 0 on success.
4319 * @returns SUPDRV_ERR_* on failure.
4320 * @param pDevExt Device globals.
4321 * @param pSession Session data.
4322 * @param pReq The request buffer.
4323 */
4324static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
4325{
4326 PSUPDRVLDRIMAGE pImage;
4327 PSUPDRVLDRUSAGE pUsage;
4328 uint32_t i;
4329 PSUPLDRSYM paSyms;
4330 const char *pchStrings;
4331 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
4332 void *pvSymbol = NULL;
4333 int rc = VERR_GENERAL_FAILURE;
4334 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
4335
4336 /*
4337 * Find the ldr image.
4338 */
4339 RTSemFastMutexRequest(pDevExt->mtxLdr);
4340 pUsage = pSession->pLdrUsage;
4341 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
4342 pUsage = pUsage->pNext;
4343 if (!pUsage)
4344 {
4345 RTSemFastMutexRelease(pDevExt->mtxLdr);
4346 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
4347 return VERR_INVALID_HANDLE;
4348 }
4349 pImage = pUsage->pImage;
4350 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
4351 {
4352 unsigned uState = pImage->uState;
4353 RTSemFastMutexRelease(pDevExt->mtxLdr);
4354 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
4355 return VERR_ALREADY_LOADED;
4356 }
4357
4358 /*
4359 * Search the symbol strings.
4360 */
4361 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
4362 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
4363 for (i = 0; i < pImage->cSymbols; i++)
4364 {
4365 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
4366 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
4367 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
4368 {
4369 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
4370 rc = VINF_SUCCESS;
4371 break;
4372 }
4373 }
4374 RTSemFastMutexRelease(pDevExt->mtxLdr);
4375 pReq->u.Out.pvSymbol = pvSymbol;
4376 return rc;
4377}
4378
4379
4380/**
4381 * Gets the address of a symbol in an open image or the support driver.
4382 *
4383 * @returns VINF_SUCCESS on success.
4384 * @returns
4385 * @param pDevExt Device globals.
4386 * @param pSession Session data.
4387 * @param pReq The request buffer.
4388 */
4389static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
4390{
4391 int rc = VINF_SUCCESS;
4392 const char *pszSymbol = pReq->u.In.pszSymbol;
4393 const char *pszModule = pReq->u.In.pszModule;
4394 size_t cbSymbol;
4395 char const *pszEnd;
4396 uint32_t i;
4397
4398 /*
4399 * Input validation.
4400 */
4401 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
4402 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
4403 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4404 cbSymbol = pszEnd - pszSymbol + 1;
4405
4406 if (pszModule)
4407 {
4408 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
4409 pszEnd = (char *)memchr(pszModule, '\0', 64);
4410 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4411 }
4412 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
4413
4414
4415 if ( !pszModule
4416 || !strcmp(pszModule, "SupDrv"))
4417 {
4418 /*
4419 * Search the support driver export table.
4420 */
4421 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
4422 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
4423 {
4424 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
4425 break;
4426 }
4427 }
4428 else
4429 {
4430 /*
4431 * Find the loader image.
4432 */
4433 PSUPDRVLDRIMAGE pImage;
4434
4435 RTSemFastMutexRequest(pDevExt->mtxLdr);
4436
4437 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
4438 if (!strcmp(pImage->szName, pszModule))
4439 break;
4440 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
4441 {
4442 /*
4443 * Search the symbol strings.
4444 */
4445 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
4446 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
4447 for (i = 0; i < pImage->cSymbols; i++)
4448 {
4449 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
4450 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
4451 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
4452 {
4453 /*
4454 * Found it! Calc the symbol address and add a reference to the module.
4455 */
4456 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
4457 rc = supdrvLdrAddUsage(pSession, pImage);
4458 break;
4459 }
4460 }
4461 }
4462 else
4463 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
4464
4465 RTSemFastMutexRelease(pDevExt->mtxLdr);
4466 }
4467 return rc;
4468}
4469
4470
4471/**
4472 * Updates the VMMR0 entry point pointers.
4473 *
4474 * @returns IPRT status code.
4475 * @param pDevExt Device globals.
4476 * @param pSession Session data.
4477 * @param pVMMR0 VMMR0 image handle.
4478 * @param pvVMMR0EntryInt VMMR0EntryInt address.
4479 * @param pvVMMR0EntryFast VMMR0EntryFast address.
4480 * @param pvVMMR0EntryEx VMMR0EntryEx address.
4481 * @remark Caller must own the loader mutex.
4482 */
4483static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
4484{
4485 int rc = VINF_SUCCESS;
4486 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
4487
4488
4489 /*
4490 * Check if not yet set.
4491 */
4492 if (!pDevExt->pvVMMR0)
4493 {
4494 pDevExt->pvVMMR0 = pvVMMR0;
4495 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
4496 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
4497 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
4498 }
4499 else
4500 {
4501 /*
4502 * Return failure or success depending on whether the values match or not.
4503 */
4504 if ( pDevExt->pvVMMR0 != pvVMMR0
4505 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
4506 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
4507 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
4508 {
4509 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
4510 rc = VERR_INVALID_PARAMETER;
4511 }
4512 }
4513 return rc;
4514}
4515
4516
4517/**
4518 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
4519 *
4520 * @param pDevExt Device globals.
4521 */
4522static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
4523{
4524 pDevExt->pvVMMR0 = NULL;
4525 pDevExt->pfnVMMR0EntryInt = NULL;
4526 pDevExt->pfnVMMR0EntryFast = NULL;
4527 pDevExt->pfnVMMR0EntryEx = NULL;
4528}
4529
4530
4531/**
4532 * Adds a usage reference in the specified session of an image.
4533 *
4534 * Called while owning the loader semaphore.
4535 *
4536 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
4537 * @param pSession Session in question.
4538 * @param pImage Image which the session is using.
4539 */
4540static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
4541{
4542 PSUPDRVLDRUSAGE pUsage;
4543 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
4544
4545 /*
4546 * Referenced it already?
4547 */
4548 pUsage = pSession->pLdrUsage;
4549 while (pUsage)
4550 {
4551 if (pUsage->pImage == pImage)
4552 {
4553 pUsage->cUsage++;
4554 return VINF_SUCCESS;
4555 }
4556 pUsage = pUsage->pNext;
4557 }
4558
4559 /*
4560 * Allocate new usage record.
4561 */
4562 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
4563 AssertReturn(pUsage, VERR_NO_MEMORY);
4564 pUsage->cUsage = 1;
4565 pUsage->pImage = pImage;
4566 pUsage->pNext = pSession->pLdrUsage;
4567 pSession->pLdrUsage = pUsage;
4568 return VINF_SUCCESS;
4569}
4570
4571
4572/**
4573 * Frees a load image.
4574 *
4575 * @param pDevExt Pointer to device extension.
4576 * @param pImage Pointer to the image we're gonna free.
4577 * This image must exit!
4578 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
4579 */
4580static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
4581{
4582 PSUPDRVLDRIMAGE pImagePrev;
4583 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
4584
4585 /* find it - arg. should've used doubly linked list. */
4586 Assert(pDevExt->pLdrImages);
4587 pImagePrev = NULL;
4588 if (pDevExt->pLdrImages != pImage)
4589 {
4590 pImagePrev = pDevExt->pLdrImages;
4591 while (pImagePrev->pNext != pImage)
4592 pImagePrev = pImagePrev->pNext;
4593 Assert(pImagePrev->pNext == pImage);
4594 }
4595
4596 /* unlink */
4597 if (pImagePrev)
4598 pImagePrev->pNext = pImage->pNext;
4599 else
4600 pDevExt->pLdrImages = pImage->pNext;
4601
4602 /* check if this is VMMR0.r0 unset its entry point pointers. */
4603 if (pDevExt->pvVMMR0 == pImage->pvImage)
4604 supdrvLdrUnsetVMMR0EPs(pDevExt);
4605
4606 /* check for objects with destructors in this image. (Shouldn't happen.) */
4607 if (pDevExt->pObjs)
4608 {
4609 unsigned cObjs = 0;
4610 PSUPDRVOBJ pObj;
4611 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
4612 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
4613 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
4614 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
4615 {
4616 pObj->pfnDestructor = NULL;
4617 cObjs++;
4618 }
4619 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
4620 if (cObjs)
4621 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
4622 }
4623
4624 /* call termination function if fully loaded. */
4625 if ( pImage->pfnModuleTerm
4626 && pImage->uState == SUP_IOCTL_LDR_LOAD)
4627 {
4628 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
4629#ifdef RT_WITH_W64_UNWIND_HACK
4630 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
4631#else
4632 pImage->pfnModuleTerm();
4633#endif
4634 }
4635
4636 /* free the image */
4637 pImage->cUsage = 0;
4638 pImage->pNext = 0;
4639 pImage->uState = SUP_IOCTL_LDR_FREE;
4640 RTMemExecFree(pImage);
4641}
4642
4643
4644/**
4645 * Implements the service call request.
4646 *
4647 * @returns VBox status code.
4648 * @param pDevExt The device extension.
4649 * @param pSession The calling session.
4650 * @param pReq The request packet, valid.
4651 */
4652static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
4653{
4654#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
4655 int rc;
4656
4657 /*
4658 * Find the module first in the module referenced by the calling session.
4659 */
4660 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
4661 if (RT_SUCCESS(rc))
4662 {
4663 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
4664 PSUPDRVLDRUSAGE pUsage;
4665
4666 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
4667 if ( pUsage->pImage->pfnServiceReqHandler
4668 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
4669 {
4670 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
4671 break;
4672 }
4673 RTSemFastMutexRelease(pDevExt->mtxLdr);
4674
4675 if (pfnServiceReqHandler)
4676 {
4677 /*
4678 * Call it.
4679 */
4680 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
4681#ifdef RT_WITH_W64_UNWIND_HACK
4682 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4683#else
4684 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4685#endif
4686 else
4687#ifdef RT_WITH_W64_UNWIND_HACK
4688 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
4689 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4690#else
4691 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4692#endif
4693 }
4694 else
4695 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
4696 }
4697
4698 /* log it */
4699 if ( RT_FAILURE(rc)
4700 && rc != VERR_INTERRUPTED
4701 && rc != VERR_TIMEOUT)
4702 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4703 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4704 else
4705 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4706 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4707 return rc;
4708#else /* RT_OS_WINDOWS && !DEBUG */
4709 return VERR_NOT_IMPLEMENTED;
4710#endif /* RT_OS_WINDOWS && !DEBUG */
4711}
4712
4713
4714/**
4715 * Implements the logger settings request.
4716 *
4717 * @returns VBox status code.
4718 * @param pDevExt The device extension.
4719 * @param pSession The caller's session.
4720 * @param pReq The request.
4721 */
4722static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq)
4723{
4724 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
4725 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
4726 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
4727 PRTLOGGER pLogger = NULL;
4728 int rc;
4729
4730 /*
4731 * Some further validation.
4732 */
4733 switch (pReq->u.In.fWhat)
4734 {
4735 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4736 case SUPLOGGERSETTINGS_WHAT_CREATE:
4737 break;
4738
4739 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4740 if (*pszGroup || *pszFlags || *pszDest)
4741 return VERR_INVALID_PARAMETER;
4742 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
4743 return VERR_ACCESS_DENIED;
4744 break;
4745
4746 default:
4747 return VERR_INTERNAL_ERROR;
4748 }
4749
4750 /*
4751 * Get the logger.
4752 */
4753 switch (pReq->u.In.fWhich)
4754 {
4755 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4756 pLogger = RTLogGetDefaultInstance();
4757 break;
4758
4759 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4760 pLogger = RTLogRelDefaultInstance();
4761 break;
4762
4763 default:
4764 return VERR_INTERNAL_ERROR;
4765 }
4766
4767 /*
4768 * Do the job.
4769 */
4770 switch (pReq->u.In.fWhat)
4771 {
4772 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4773 if (pLogger)
4774 {
4775 rc = RTLogFlags(pLogger, pszFlags);
4776 if (RT_SUCCESS(rc))
4777 rc = RTLogGroupSettings(pLogger, pszGroup);
4778 NOREF(pszDest);
4779 }
4780 else
4781 rc = VERR_NOT_FOUND;
4782 break;
4783
4784 case SUPLOGGERSETTINGS_WHAT_CREATE:
4785 {
4786 if (pLogger)
4787 rc = VERR_ALREADY_EXISTS;
4788 else
4789 {
4790 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
4791
4792 rc = RTLogCreate(&pLogger,
4793 0 /* fFlags */,
4794 pszGroup,
4795 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
4796 ? "VBOX_LOG"
4797 : "VBOX_RELEASE_LOG",
4798 RT_ELEMENTS(s_apszGroups),
4799 s_apszGroups,
4800 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
4801 NULL);
4802 if (RT_SUCCESS(rc))
4803 {
4804 rc = RTLogFlags(pLogger, pszFlags);
4805 NOREF(pszDest);
4806 if (RT_SUCCESS(rc))
4807 {
4808 switch (pReq->u.In.fWhich)
4809 {
4810 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4811 pLogger = RTLogSetDefaultInstance(pLogger);
4812 break;
4813 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4814 pLogger = RTLogRelSetDefaultInstance(pLogger);
4815 break;
4816 }
4817 }
4818 RTLogDestroy(pLogger);
4819 }
4820 }
4821 break;
4822 }
4823
4824 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4825 switch (pReq->u.In.fWhich)
4826 {
4827 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4828 pLogger = RTLogSetDefaultInstance(NULL);
4829 break;
4830 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4831 pLogger = RTLogRelSetDefaultInstance(NULL);
4832 break;
4833 }
4834 rc = RTLogDestroy(pLogger);
4835 break;
4836
4837 default:
4838 {
4839 rc = VERR_INTERNAL_ERROR;
4840 break;
4841 }
4842 }
4843
4844 return rc;
4845}
4846
4847
4848/**
4849 * Gets the paging mode of the current CPU.
4850 *
4851 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4852 */
4853SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4854{
4855 SUPPAGINGMODE enmMode;
4856
4857 RTR0UINTREG cr0 = ASMGetCR0();
4858 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4859 enmMode = SUPPAGINGMODE_INVALID;
4860 else
4861 {
4862 RTR0UINTREG cr4 = ASMGetCR4();
4863 uint32_t fNXEPlusLMA = 0;
4864 if (cr4 & X86_CR4_PAE)
4865 {
4866 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
4867 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
4868 {
4869 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4870 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4871 fNXEPlusLMA |= RT_BIT(0);
4872 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4873 fNXEPlusLMA |= RT_BIT(1);
4874 }
4875 }
4876
4877 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4878 {
4879 case 0:
4880 enmMode = SUPPAGINGMODE_32_BIT;
4881 break;
4882
4883 case X86_CR4_PGE:
4884 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4885 break;
4886
4887 case X86_CR4_PAE:
4888 enmMode = SUPPAGINGMODE_PAE;
4889 break;
4890
4891 case X86_CR4_PAE | RT_BIT(0):
4892 enmMode = SUPPAGINGMODE_PAE_NX;
4893 break;
4894
4895 case X86_CR4_PAE | X86_CR4_PGE:
4896 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4897 break;
4898
4899 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4900 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4901 break;
4902
4903 case RT_BIT(1) | X86_CR4_PAE:
4904 enmMode = SUPPAGINGMODE_AMD64;
4905 break;
4906
4907 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4908 enmMode = SUPPAGINGMODE_AMD64_NX;
4909 break;
4910
4911 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4912 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4913 break;
4914
4915 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4916 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4917 break;
4918
4919 default:
4920 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4921 enmMode = SUPPAGINGMODE_INVALID;
4922 break;
4923 }
4924 }
4925 return enmMode;
4926}
4927
4928
4929/**
4930 * Enables or disabled hardware virtualization extensions using native OS APIs.
4931 *
4932 * @returns VBox status code.
4933 * @retval VINF_SUCCESS on success.
4934 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4935 *
4936 * @param fEnable Whether to enable or disable.
4937 */
4938SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4939{
4940#ifdef RT_OS_DARWIN
4941 return supdrvOSEnableVTx(fEnable);
4942#else
4943 return VERR_NOT_SUPPORTED;
4944#endif
4945}
4946
4947
4948/**
4949 * Creates the GIP.
4950 *
4951 * @returns VBox status code.
4952 * @param pDevExt Instance data. GIP stuff may be updated.
4953 */
4954static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4955{
4956 PSUPGLOBALINFOPAGE pGip;
4957 RTHCPHYS HCPhysGip;
4958 uint32_t u32SystemResolution;
4959 uint32_t u32Interval;
4960 int rc;
4961
4962 LogFlow(("supdrvGipCreate:\n"));
4963
4964 /* assert order */
4965 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4966 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4967 Assert(!pDevExt->pGipTimer);
4968
4969 /*
4970 * Allocate a suitable page with a default kernel mapping.
4971 */
4972 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4973 if (RT_FAILURE(rc))
4974 {
4975 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4976 return rc;
4977 }
4978 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4979 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4980
4981#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4982 * It only applies to Windows and should probably revisited later, if possible made part of the
4983 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4984 /*
4985 * Try bump up the system timer resolution.
4986 * The more interrupts the better...
4987 */
4988 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4989 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4990 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4991 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4992 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4993 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4994 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4995 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4996 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4997 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4998 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4999 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
5000 )
5001 {
5002 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
5003 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
5004 }
5005#endif
5006
5007 /*
5008 * Find a reasonable update interval and initialize the structure.
5009 */
5010 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
5011 while (u32Interval < 10000000 /* 10 ms */)
5012 u32Interval += u32SystemResolution;
5013
5014 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
5015
5016 /*
5017 * Create the timer.
5018 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
5019 */
5020 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
5021 {
5022 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
5023 if (rc == VERR_NOT_SUPPORTED)
5024 {
5025 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
5026 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
5027 }
5028 }
5029 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
5030 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
5031 if (RT_SUCCESS(rc))
5032 {
5033 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
5034 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
5035 if (RT_SUCCESS(rc))
5036 {
5037 /*
5038 * We're good.
5039 */
5040 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
5041 return VINF_SUCCESS;
5042 }
5043
5044 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
5045 }
5046 else
5047 {
5048 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
5049 Assert(!pDevExt->pGipTimer);
5050 }
5051 supdrvGipDestroy(pDevExt);
5052 return rc;
5053}
5054
5055
5056/**
5057 * Terminates the GIP.
5058 *
5059 * @param pDevExt Instance data. GIP stuff may be updated.
5060 */
5061static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
5062{
5063 int rc;
5064#ifdef DEBUG_DARWIN_GIP
5065 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
5066 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
5067 pDevExt->pGipTimer, pDevExt->GipMemObj));
5068#endif
5069
5070 /*
5071 * Invalid the GIP data.
5072 */
5073 if (pDevExt->pGip)
5074 {
5075 supdrvGipTerm(pDevExt->pGip);
5076 pDevExt->pGip = NULL;
5077 }
5078
5079 /*
5080 * Destroy the timer and free the GIP memory object.
5081 */
5082 if (pDevExt->pGipTimer)
5083 {
5084 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
5085 pDevExt->pGipTimer = NULL;
5086 }
5087
5088 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
5089 {
5090 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
5091 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
5092 }
5093
5094 /*
5095 * Finally, release the system timer resolution request if one succeeded.
5096 */
5097 if (pDevExt->u32SystemTimerGranularityGrant)
5098 {
5099 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
5100 pDevExt->u32SystemTimerGranularityGrant = 0;
5101 }
5102}
5103
5104
5105/**
5106 * Timer callback function sync GIP mode.
5107 * @param pTimer The timer.
5108 * @param pvUser The device extension.
5109 */
5110static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
5111{
5112 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
5113 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
5114
5115 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
5116
5117 ASMSetFlags(fOldFlags);
5118}
5119
5120
5121/**
5122 * Timer callback function for async GIP mode.
5123 * @param pTimer The timer.
5124 * @param pvUser The device extension.
5125 */
5126static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
5127{
5128 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
5129 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
5130 RTCPUID idCpu = RTMpCpuId();
5131 uint64_t NanoTS = RTTimeSystemNanoTS();
5132
5133 /** @todo reset the transaction number and whatnot when iTick == 1. */
5134 if (pDevExt->idGipMaster == idCpu)
5135 supdrvGipUpdate(pDevExt->pGip, NanoTS);
5136 else
5137 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, ASMGetApicId());
5138
5139 ASMSetFlags(fOldFlags);
5140}
5141
5142
5143/**
5144 * Multiprocessor event notification callback.
5145 *
5146 * This is used to make sue that the GIP master gets passed on to
5147 * another CPU.
5148 *
5149 * @param enmEvent The event.
5150 * @param idCpu The cpu it applies to.
5151 * @param pvUser Pointer to the device extension.
5152 */
5153static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
5154{
5155 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
5156 if (enmEvent == RTMPEVENT_OFFLINE)
5157 {
5158 RTCPUID idGipMaster;
5159 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
5160 if (idGipMaster == idCpu)
5161 {
5162 /*
5163 * Find a new GIP master.
5164 */
5165 bool fIgnored;
5166 unsigned i;
5167 RTCPUID idNewGipMaster = NIL_RTCPUID;
5168 RTCPUSET OnlineCpus;
5169 RTMpGetOnlineSet(&OnlineCpus);
5170
5171 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
5172 {
5173 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
5174 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
5175 && idCurCpu != idGipMaster)
5176 {
5177 idNewGipMaster = idCurCpu;
5178 break;
5179 }
5180 }
5181
5182 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
5183 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
5184 NOREF(fIgnored);
5185 }
5186 }
5187}
5188
5189
5190/**
5191 * Initializes the GIP data.
5192 *
5193 * @returns IPRT status code.
5194 * @param pDevExt Pointer to the device instance data.
5195 * @param pGip Pointer to the read-write kernel mapping of the GIP.
5196 * @param HCPhys The physical address of the GIP.
5197 * @param u64NanoTS The current nanosecond timestamp.
5198 * @param uUpdateHz The update freqence.
5199 */
5200int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
5201{
5202 unsigned i;
5203#ifdef DEBUG_DARWIN_GIP
5204 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
5205#else
5206 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
5207#endif
5208
5209 /*
5210 * Initialize the structure.
5211 */
5212 memset(pGip, 0, PAGE_SIZE);
5213 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
5214 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
5215 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
5216 pGip->u32UpdateHz = uUpdateHz;
5217 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
5218 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
5219
5220 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
5221 {
5222 pGip->aCPUs[i].u32TransactionId = 2;
5223 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
5224 pGip->aCPUs[i].u64TSC = ASMReadTSC();
5225
5226 /*
5227 * We don't know the following values until we've executed updates.
5228 * So, we'll just insert very high values.
5229 */
5230 pGip->aCPUs[i].u64CpuHz = _4G + 1;
5231 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
5232 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
5233 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
5234 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
5235 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
5236 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
5237 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
5238 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
5239 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
5240 }
5241
5242 /*
5243 * Link it to the device extension.
5244 */
5245 pDevExt->pGip = pGip;
5246 pDevExt->HCPhysGip = HCPhys;
5247 pDevExt->cGipUsers = 0;
5248
5249 return VINF_SUCCESS;
5250}
5251
5252
5253/**
5254 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
5255 *
5256 * @param idCpu Ignored.
5257 * @param pvUser1 Where to put the TSC.
5258 * @param pvUser2 Ignored.
5259 */
5260static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
5261{
5262#if 1
5263 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
5264#else
5265 *(uint64_t *)pvUser1 = ASMReadTSC();
5266#endif
5267}
5268
5269
5270/**
5271 * Determine if Async GIP mode is required because of TSC drift.
5272 *
5273 * When using the default/normal timer code it is essential that the time stamp counter
5274 * (TSC) runs never backwards, that is, a read operation to the counter should return
5275 * a bigger value than any previous read operation. This is guaranteed by the latest
5276 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
5277 * case we have to choose the asynchronous timer mode.
5278 *
5279 * @param poffMin Pointer to the determined difference between different cores.
5280 * @return false if the time stamp counters appear to be synchron, true otherwise.
5281 */
5282bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
5283{
5284 /*
5285 * Just iterate all the cpus 8 times and make sure that the TSC is
5286 * ever increasing. We don't bother taking TSC rollover into account.
5287 */
5288 RTCPUSET CpuSet;
5289 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
5290 int iCpu;
5291 int cLoops = 8;
5292 bool fAsync = false;
5293 int rc = VINF_SUCCESS;
5294 uint64_t offMax = 0;
5295 uint64_t offMin = ~(uint64_t)0;
5296 uint64_t PrevTsc = ASMReadTSC();
5297
5298 while (cLoops-- > 0)
5299 {
5300 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
5301 {
5302 uint64_t CurTsc;
5303 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
5304 if (RT_SUCCESS(rc))
5305 {
5306 if (CurTsc <= PrevTsc)
5307 {
5308 fAsync = true;
5309 offMin = offMax = PrevTsc - CurTsc;
5310 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
5311 iCpu, cLoops, CurTsc, PrevTsc));
5312 break;
5313 }
5314
5315 /* Gather statistics (except the first time). */
5316 if (iCpu != 0 || cLoops != 7)
5317 {
5318 uint64_t off = CurTsc - PrevTsc;
5319 if (off < offMin)
5320 offMin = off;
5321 if (off > offMax)
5322 offMax = off;
5323 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
5324 }
5325
5326 /* Next */
5327 PrevTsc = CurTsc;
5328 }
5329 else if (rc == VERR_NOT_SUPPORTED)
5330 break;
5331 else
5332 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
5333 }
5334
5335 /* broke out of the loop. */
5336 if (iCpu <= iLastCpu)
5337 break;
5338 }
5339
5340 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
5341 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
5342 fAsync, iLastCpu, rc, offMin, offMax));
5343#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
5344 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
5345#endif
5346 return fAsync;
5347}
5348
5349
5350/**
5351 * Determin the GIP TSC mode.
5352 *
5353 * @returns The most suitable TSC mode.
5354 * @param pDevExt Pointer to the device instance data.
5355 */
5356static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)
5357{
5358 /*
5359 * On SMP we're faced with two problems:
5360 * (1) There might be a skew between the CPU, so that cpu0
5361 * returns a TSC that is sligtly different from cpu1.
5362 * (2) Power management (and other things) may cause the TSC
5363 * to run at a non-constant speed, and cause the speed
5364 * to be different on the cpus. This will result in (1).
5365 *
5366 * So, on SMP systems we'll have to select the ASYNC update method
5367 * if there are symphoms of these problems.
5368 */
5369 if (RTMpGetCount() > 1)
5370 {
5371 uint32_t uEAX, uEBX, uECX, uEDX;
5372 uint64_t u64DiffCoresIgnored;
5373
5374 /* Permit the user and/or the OS specfic bits to force async mode. */
5375 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
5376 return SUPGIPMODE_ASYNC_TSC;
5377
5378 /* Try check for current differences between the cpus. */
5379 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))
5380 return SUPGIPMODE_ASYNC_TSC;
5381
5382 /*
5383 * If the CPU supports power management and is an AMD one we
5384 * won't trust it unless it has the TscInvariant bit is set.
5385 */
5386 /* Check for "AuthenticAMD" */
5387 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
5388 if ( uEAX >= 1
5389 && uEBX == X86_CPUID_VENDOR_AMD_EBX
5390 && uECX == X86_CPUID_VENDOR_AMD_ECX
5391 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
5392 {
5393 /* Check for APM support and that TscInvariant is cleared. */
5394 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
5395 if (uEAX >= 0x80000007)
5396 {
5397 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
5398 if ( !(uEDX & RT_BIT(8))/* TscInvariant */
5399 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
5400 return SUPGIPMODE_ASYNC_TSC;
5401 }
5402 }
5403 }
5404 return SUPGIPMODE_SYNC_TSC;
5405}
5406
5407
5408/**
5409 * Invalidates the GIP data upon termination.
5410 *
5411 * @param pGip Pointer to the read-write kernel mapping of the GIP.
5412 */
5413void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
5414{
5415 unsigned i;
5416 pGip->u32Magic = 0;
5417 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
5418 {
5419 pGip->aCPUs[i].u64NanoTS = 0;
5420 pGip->aCPUs[i].u64TSC = 0;
5421 pGip->aCPUs[i].iTSCHistoryHead = 0;
5422 }
5423}
5424
5425
5426/**
5427 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
5428 * updates all the per cpu data except the transaction id.
5429 *
5430 * @param pGip The GIP.
5431 * @param pGipCpu Pointer to the per cpu data.
5432 * @param u64NanoTS The current time stamp.
5433 */
5434static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
5435{
5436 uint64_t u64TSC;
5437 uint64_t u64TSCDelta;
5438 uint32_t u32UpdateIntervalTSC;
5439 uint32_t u32UpdateIntervalTSCSlack;
5440 unsigned iTSCHistoryHead;
5441 uint64_t u64CpuHz;
5442
5443 /*
5444 * Update the NanoTS.
5445 */
5446 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
5447
5448 /*
5449 * Calc TSC delta.
5450 */
5451 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
5452 u64TSC = ASMReadTSC();
5453 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
5454 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
5455
5456 if (u64TSCDelta >> 32)
5457 {
5458 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
5459 pGipCpu->cErrors++;
5460 }
5461
5462 /*
5463 * TSC History.
5464 */
5465 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
5466
5467 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
5468 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
5469 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
5470
5471 /*
5472 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
5473 */
5474 if (pGip->u32UpdateHz >= 1000)
5475 {
5476 uint32_t u32;
5477 u32 = pGipCpu->au32TSCHistory[0];
5478 u32 += pGipCpu->au32TSCHistory[1];
5479 u32 += pGipCpu->au32TSCHistory[2];
5480 u32 += pGipCpu->au32TSCHistory[3];
5481 u32 >>= 2;
5482 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
5483 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
5484 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
5485 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
5486 u32UpdateIntervalTSC >>= 2;
5487 u32UpdateIntervalTSC += u32;
5488 u32UpdateIntervalTSC >>= 1;
5489
5490 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
5491 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
5492 }
5493 else if (pGip->u32UpdateHz >= 90)
5494 {
5495 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
5496 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
5497 u32UpdateIntervalTSC >>= 1;
5498
5499 /* value choosen on a 2GHz thinkpad running windows */
5500 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
5501 }
5502 else
5503 {
5504 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
5505
5506 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
5507 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
5508 }
5509 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
5510
5511 /*
5512 * CpuHz.
5513 */
5514 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
5515 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
5516}
5517
5518
5519/**
5520 * Updates the GIP.
5521 *
5522 * @param pGip Pointer to the GIP.
5523 * @param u64NanoTS The current nanosecond timesamp.
5524 */
5525void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
5526{
5527 /*
5528 * Determin the relevant CPU data.
5529 */
5530 PSUPGIPCPU pGipCpu;
5531 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
5532 pGipCpu = &pGip->aCPUs[0];
5533 else
5534 {
5535 unsigned iCpu = ASMGetApicId();
5536 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
5537 return;
5538 pGipCpu = &pGip->aCPUs[iCpu];
5539 }
5540
5541 /*
5542 * Start update transaction.
5543 */
5544 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
5545 {
5546 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
5547 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
5548 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
5549 pGipCpu->cErrors++;
5550 return;
5551 }
5552
5553 /*
5554 * Recalc the update frequency every 0x800th time.
5555 */
5556 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
5557 {
5558 if (pGip->u64NanoTSLastUpdateHz)
5559 {
5560#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
5561 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
5562 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
5563 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
5564 {
5565 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
5566 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
5567 }
5568#endif
5569 }
5570 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
5571 }
5572
5573 /*
5574 * Update the data.
5575 */
5576 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
5577
5578 /*
5579 * Complete transaction.
5580 */
5581 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
5582}
5583
5584
5585/**
5586 * Updates the per cpu GIP data for the calling cpu.
5587 *
5588 * @param pGip Pointer to the GIP.
5589 * @param u64NanoTS The current nanosecond timesamp.
5590 * @param iCpu The CPU index.
5591 */
5592void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
5593{
5594 PSUPGIPCPU pGipCpu;
5595
5596 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
5597 {
5598 pGipCpu = &pGip->aCPUs[iCpu];
5599
5600 /*
5601 * Start update transaction.
5602 */
5603 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
5604 {
5605 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
5606 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
5607 pGipCpu->cErrors++;
5608 return;
5609 }
5610
5611 /*
5612 * Update the data.
5613 */
5614 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
5615
5616 /*
5617 * Complete transaction.
5618 */
5619 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
5620 }
5621}
5622
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette