VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 14914

Last change on this file since 14914 was 14901, checked in by vboxsync, 16 years ago

SUPDrv,SUPLib: SUPR0NativeEnableHwVirtExt -> SUPR0EnableVTx and darwin implementation - version changed again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 178.5 KB
Line 
1/* $Revision: 14901 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/semaphore.h>
41#include <iprt/spinlock.h>
42#include <iprt/thread.h>
43#include <iprt/process.h>
44#include <iprt/mp.h>
45#include <iprt/power.h>
46#include <iprt/cpuset.h>
47#include <iprt/uuid.h>
48#include <VBox/param.h>
49#include <VBox/log.h>
50#include <VBox/err.h>
51#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
52# include <iprt/crc32.h>
53# include <iprt/net.h>
54#endif
55/* VBox/x86.h not compatible with the Linux kernel sources */
56#ifdef RT_OS_LINUX
57# define X86_CPUID_VENDOR_AMD_EBX 0x68747541
58# define X86_CPUID_VENDOR_AMD_ECX 0x444d4163
59# define X86_CPUID_VENDOR_AMD_EDX 0x69746e65
60#else
61# include <VBox/x86.h>
62#endif
63
64/*
65 * Logging assignments:
66 * Log - useful stuff, like failures.
67 * LogFlow - program flow, except the really noisy bits.
68 * Log2 - Cleanup.
69 * Log3 - Loader flow noise.
70 * Log4 - Call VMMR0 flow noise.
71 * Log5 - Native yet-to-be-defined noise.
72 * Log6 - Native ioctl flow noise.
73 *
74 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
75 * instanciation in log-vbox.c(pp).
76 */
77
78
79/*******************************************************************************
80* Defined Constants And Macros *
81*******************************************************************************/
82/* from x86.h - clashes with linux thus this duplication */
83#undef X86_CR0_PG
84#define X86_CR0_PG RT_BIT(31)
85#undef X86_CR0_PE
86#define X86_CR0_PE RT_BIT(0)
87#undef X86_CPUID_AMD_FEATURE_EDX_NX
88#define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)
89#undef MSR_K6_EFER
90#define MSR_K6_EFER 0xc0000080
91#undef MSR_K6_EFER_NXE
92#define MSR_K6_EFER_NXE RT_BIT(11)
93#undef MSR_K6_EFER_LMA
94#define MSR_K6_EFER_LMA RT_BIT(10)
95#undef X86_CR4_PGE
96#define X86_CR4_PGE RT_BIT(7)
97#undef X86_CR4_PAE
98#define X86_CR4_PAE RT_BIT(5)
99#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
100#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)
101
102
103/** The frequency by which we recalculate the u32UpdateHz and
104 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
105#define GIP_UPDATEHZ_RECALC_FREQ 0x800
106
107/**
108 * Validates a session pointer.
109 *
110 * @returns true/false accordingly.
111 * @param pSession The session.
112 */
113#define SUP_IS_SESSION_VALID(pSession) \
114 ( VALID_PTR(pSession) \
115 && pSession->u32Cookie == BIRD_INV)
116
117/** @def VBOX_SVN_REV
118 * The makefile should define this if it can. */
119#ifndef VBOX_SVN_REV
120# define VBOX_SVN_REV 0
121#endif
122
123/*******************************************************************************
124* Internal Functions *
125*******************************************************************************/
126static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
127static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
128static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
129static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
130static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
131static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
132static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
133static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
134static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
135static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
136static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
137static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
138static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);
139#ifdef RT_OS_WINDOWS
140static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
141static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
142#endif /* RT_OS_WINDOWS */
143static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
144static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
145static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
146static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
147static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
148
149#ifdef RT_WITH_W64_UNWIND_HACK
150DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
151DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, unsigned idCpu, unsigned uOperation);
152DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
153DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
154DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
155DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
156DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
157
158DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
159DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
160DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
161DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
162DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
163DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
164DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
165DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
166DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
167DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
168DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
169DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
170DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
171DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
172DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
173DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
174DECLASM(int) UNWIND_WRAP(SUPR0PageAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages);
175DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
176//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
177DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
178DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
179DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
180DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
181DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
182DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
183DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
184DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
185DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
186DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
187DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
188DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
189DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
190DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
191DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
192DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
193DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
194/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
195/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
196/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
197/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
198/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
199DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
200/* RTProcSelf - not necessary */
201/* RTR0ProcHandleSelf - not necessary */
202DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
203DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
204DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
205DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
206DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
207DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
208DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
209DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
210DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
211DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
212DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
213DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
214DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
215DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
216DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
217DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
218DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
219DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
220DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
221DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
222DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
223/* RTTimeNanoTS - not necessary */
224/* RTTimeMilliTS - not necessary */
225/* RTTimeSystemNanoTS - not necessary */
226/* RTTimeSystemMilliTS - not necessary */
227/* RTThreadNativeSelf - not necessary */
228DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
229DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
230#if 0
231/* RTThreadSelf - not necessary */
232DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
233 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
234DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
235DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
236DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
237DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
238DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
239DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
240DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
241DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
242DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
243DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
244#endif
245/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
246/* RTMpCpuId - not necessary */
247/* RTMpCpuIdFromSetIndex - not necessary */
248/* RTMpCpuIdToSetIndex - not necessary */
249/* RTMpIsCpuPossible - not necessary */
250/* RTMpGetCount - not necessary */
251/* RTMpGetMaxCpuId - not necessary */
252/* RTMpGetOnlineCount - not necessary */
253/* RTMpGetOnlineSet - not necessary */
254/* RTMpGetSet - not necessary */
255/* RTMpIsCpuOnline - not necessary */
256DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
257DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
258DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
259/* RTLogRelDefaultInstance - not necessary. */
260DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
261/* RTLogLogger - can't wrap this buster. */
262/* RTLogLoggerEx - can't wrap this buster. */
263DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
264/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
265DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
266DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
267/* AssertMsg2 - can't wrap this buster. */
268#endif /* RT_WITH_W64_UNWIND_HACK */
269
270
271/*******************************************************************************
272* Global Variables *
273*******************************************************************************/
274/**
275 * Array of the R0 SUP API.
276 */
277static SUPFUNC g_aFunctions[] =
278{
279 /* name function */
280 /* Entries with absolute addresses determined at runtime, fixup
281 code makes ugly ASSUMPTIONS about the order here: */
282 { "SUPR0AbsIs64bit", (void *)0 },
283 { "SUPR0Abs64bitKernelCS", (void *)0 },
284 { "SUPR0Abs64bitKernelSS", (void *)0 },
285 { "SUPR0Abs64bitKernelDS", (void *)0 },
286 { "SUPR0AbsKernelCS", (void *)0 },
287 { "SUPR0AbsKernelSS", (void *)0 },
288 { "SUPR0AbsKernelDS", (void *)0 },
289 { "SUPR0AbsKernelES", (void *)0 },
290 { "SUPR0AbsKernelFS", (void *)0 },
291 { "SUPR0AbsKernelGS", (void *)0 },
292 /* Normal function pointers: */
293 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
294 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
295 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
296 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
297 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
298 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
299 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
300 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
301 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
302 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
303 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
304 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
305 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
306 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
307 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
308 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
309 { "SUPR0PageAlloc", (void *)UNWIND_WRAP(SUPR0PageAlloc) },
310 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
311 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
312 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
313 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
314 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
315 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
316 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
317 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
318 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
319 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
320 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
321 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
322 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
323 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
324 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
325 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
326 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
327 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
328 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
329 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
330 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
331 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
332 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
333 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
334 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
335 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
336/* These don't work yet on linux - use fast mutexes!
337 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
338 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
339 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
340 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
341*/
342 { "RTProcSelf", (void *)RTProcSelf },
343 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
344 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
345 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
346 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
347 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
348 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
349 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
350 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
351 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
352 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
353 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
354 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
355 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
356 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
357 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
358 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
359 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
360 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
361 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
362 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
363 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
364 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
365 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
366 { "RTTimeMillieTS", (void *)RTTimeMilliTS },
367 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
368 { "RTTimeSystemMillieTS", (void *)RTTimeSystemMilliTS },
369 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
370 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
371 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
372#if 0 /* Thread APIs, Part 2. */
373 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
374 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
375 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
376 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
377 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
378 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
379 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
380 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
381 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
382 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
383 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
384 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
385#endif
386 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
387 { "RTMpCpuId", (void *)RTMpCpuId },
388 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
389 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
390 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
391 { "RTMpGetCount", (void *)RTMpGetCount },
392 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
393 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
394 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
395 { "RTMpGetSet", (void *)RTMpGetSet },
396 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
397 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
398 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
399 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
400 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
401 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
402 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
403 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
404 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
405 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
406 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
407 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
408 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
409 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
410 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
411#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
412 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
413#endif
414#if defined(RT_OS_DARWIN)
415 { "RTAssertMsg1", (void *)RTAssertMsg1 },
416 { "RTAssertMsg2", (void *)RTAssertMsg2 },
417 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
418#endif
419};
420
421#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
422/**
423 * Drag in the rest of IRPT since we share it with the
424 * rest of the kernel modules on darwin.
425 */
426PFNRT g_apfnVBoxDrvIPRTDeps[] =
427{
428 (PFNRT)RTCrc32,
429 (PFNRT)RTErrConvertFromErrno,
430 (PFNRT)RTNetIPv4IsHdrValid,
431 (PFNRT)RTNetIPv4TCPChecksum,
432 (PFNRT)RTNetIPv4UDPChecksum,
433 (PFNRT)RTUuidCompare,
434 (PFNRT)RTUuidCompareStr,
435 (PFNRT)RTUuidFromStr,
436 NULL
437};
438#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
439
440
441/**
442 * Initializes the device extentsion structure.
443 *
444 * @returns IPRT status code.
445 * @param pDevExt The device extension to initialize.
446 */
447int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
448{
449 int rc;
450
451#ifdef SUPDRV_WITH_RELEASE_LOGGER
452 /*
453 * Create the release log.
454 */
455 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
456 PRTLOGGER pRelLogger;
457 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
458 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
459 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
460 if (RT_SUCCESS(rc))
461 RTLogRelSetDefaultInstance(pRelLogger);
462#endif
463
464 /*
465 * Initialize it.
466 */
467 memset(pDevExt, 0, sizeof(*pDevExt));
468 rc = RTSpinlockCreate(&pDevExt->Spinlock);
469 if (!rc)
470 {
471 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
472 if (!rc)
473 {
474 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
475 if (!rc)
476 {
477 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
478 if (!rc)
479 {
480 rc = supdrvGipCreate(pDevExt);
481 if (RT_SUCCESS(rc))
482 {
483 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
484
485 /*
486 * Fixup the absolute symbols.
487 *
488 * Because of the table indexing assumptions we'll do #ifdef orgy here rather
489 * than distributing this to OS specific files. At least for now.
490 */
491#ifdef RT_OS_DARWIN
492 g_aFunctions[0].pfn = (void *)(SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64); /* SUPR0AbsIs64bit */
493 g_aFunctions[1].pfn = (void *)0x80; /* KERNEL64_CS, seg.h */
494 g_aFunctions[2].pfn = (void *)0x88; /* KERNEL64_SS, seg.h */
495 g_aFunctions[3].pfn = (void *)0x88; /* KERNEL64_SS, seg.h */
496#elif ARCH_BITS == 64
497 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
498 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
499 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
500 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
501#elif ARCH_BITS == 32
502 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
503#endif
504 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
505 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
506 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
507 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
508 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
509 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
510 return VINF_SUCCESS;
511 }
512
513 RTSemFastMutexDestroy(pDevExt->mtxGip);
514 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
515 }
516 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
517 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
518 }
519 RTSemFastMutexDestroy(pDevExt->mtxLdr);
520 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
521 }
522 RTSpinlockDestroy(pDevExt->Spinlock);
523 pDevExt->Spinlock = NIL_RTSPINLOCK;
524 }
525#ifdef SUPDRV_WITH_RELEASE_LOGGER
526 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
527 RTLogDestroy(RTLogSetDefaultInstance(NULL));
528#endif
529
530 return rc;
531}
532
533
534/**
535 * Delete the device extension (e.g. cleanup members).
536 *
537 * @param pDevExt The device extension to delete.
538 */
539void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
540{
541 PSUPDRVOBJ pObj;
542 PSUPDRVUSAGE pUsage;
543
544 /*
545 * Kill mutexes and spinlocks.
546 */
547 RTSemFastMutexDestroy(pDevExt->mtxGip);
548 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
549 RTSemFastMutexDestroy(pDevExt->mtxLdr);
550 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
551 RTSpinlockDestroy(pDevExt->Spinlock);
552 pDevExt->Spinlock = NIL_RTSPINLOCK;
553 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
554 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
555
556 /*
557 * Free lists.
558 */
559 /* objects. */
560 pObj = pDevExt->pObjs;
561#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
562 Assert(!pObj); /* (can trigger on forced unloads) */
563#endif
564 pDevExt->pObjs = NULL;
565 while (pObj)
566 {
567 void *pvFree = pObj;
568 pObj = pObj->pNext;
569 RTMemFree(pvFree);
570 }
571
572 /* usage records. */
573 pUsage = pDevExt->pUsageFree;
574 pDevExt->pUsageFree = NULL;
575 while (pUsage)
576 {
577 void *pvFree = pUsage;
578 pUsage = pUsage->pNext;
579 RTMemFree(pvFree);
580 }
581
582 /* kill the GIP. */
583 supdrvGipDestroy(pDevExt);
584
585#ifdef SUPDRV_WITH_RELEASE_LOGGER
586 /* destroy the loggers. */
587 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
588 RTLogDestroy(RTLogSetDefaultInstance(NULL));
589#endif
590}
591
592
593/**
594 * Create session.
595 *
596 * @returns IPRT status code.
597 * @param pDevExt Device extension.
598 * @param fUser Flag indicating whether this is a user or kernel session.
599 * @param ppSession Where to store the pointer to the session data.
600 */
601int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
602{
603 /*
604 * Allocate memory for the session data.
605 */
606 int rc = VERR_NO_MEMORY;
607 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
608 if (pSession)
609 {
610 /* Initialize session data. */
611 rc = RTSpinlockCreate(&pSession->Spinlock);
612 if (!rc)
613 {
614 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
615 pSession->pDevExt = pDevExt;
616 pSession->u32Cookie = BIRD_INV;
617 /*pSession->pLdrUsage = NULL;
618 pSession->pVM = NULL;
619 pSession->pUsage = NULL;
620 pSession->pGip = NULL;
621 pSession->fGipReferenced = false;
622 pSession->Bundle.cUsed = 0; */
623 pSession->Uid = NIL_RTUID;
624 pSession->Gid = NIL_RTGID;
625 if (fUser)
626 {
627 pSession->Process = RTProcSelf();
628 pSession->R0Process = RTR0ProcHandleSelf();
629 }
630 else
631 {
632 pSession->Process = NIL_RTPROCESS;
633 pSession->R0Process = NIL_RTR0PROCESS;
634 }
635
636 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
637 return VINF_SUCCESS;
638 }
639
640 RTMemFree(pSession);
641 *ppSession = NULL;
642 Log(("Failed to create spinlock, rc=%d!\n", rc));
643 }
644
645 return rc;
646}
647
648
649/**
650 * Shared code for cleaning up a session.
651 *
652 * @param pDevExt Device extension.
653 * @param pSession Session data.
654 * This data will be freed by this routine.
655 */
656void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
657{
658 /*
659 * Cleanup the session first.
660 */
661 supdrvCleanupSession(pDevExt, pSession);
662
663 /*
664 * Free the rest of the session stuff.
665 */
666 RTSpinlockDestroy(pSession->Spinlock);
667 pSession->Spinlock = NIL_RTSPINLOCK;
668 pSession->pDevExt = NULL;
669 RTMemFree(pSession);
670 LogFlow(("supdrvCloseSession: returns\n"));
671}
672
673
674/**
675 * Shared code for cleaning up a session (but not quite freeing it).
676 *
677 * This is primarily intended for MAC OS X where we have to clean up the memory
678 * stuff before the file handle is closed.
679 *
680 * @param pDevExt Device extension.
681 * @param pSession Session data.
682 * This data will be freed by this routine.
683 */
684void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
685{
686 PSUPDRVBUNDLE pBundle;
687 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
688
689 /*
690 * Remove logger instances related to this session.
691 */
692 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
693
694 /*
695 * Release object references made in this session.
696 * In theory there should be noone racing us in this session.
697 */
698 Log2(("release objects - start\n"));
699 if (pSession->pUsage)
700 {
701 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
702 PSUPDRVUSAGE pUsage;
703 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
704
705 while ((pUsage = pSession->pUsage) != NULL)
706 {
707 PSUPDRVOBJ pObj = pUsage->pObj;
708 pSession->pUsage = pUsage->pNext;
709
710 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
711 if (pUsage->cUsage < pObj->cUsage)
712 {
713 pObj->cUsage -= pUsage->cUsage;
714 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
715 }
716 else
717 {
718 /* Destroy the object and free the record. */
719 if (pDevExt->pObjs == pObj)
720 pDevExt->pObjs = pObj->pNext;
721 else
722 {
723 PSUPDRVOBJ pObjPrev;
724 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
725 if (pObjPrev->pNext == pObj)
726 {
727 pObjPrev->pNext = pObj->pNext;
728 break;
729 }
730 Assert(pObjPrev);
731 }
732 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
733
734 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
735 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
736 if (pObj->pfnDestructor)
737#ifdef RT_WITH_W64_UNWIND_HACK
738 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
739#else
740 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
741#endif
742 RTMemFree(pObj);
743 }
744
745 /* free it and continue. */
746 RTMemFree(pUsage);
747
748 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
749 }
750
751 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
752 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
753 }
754 Log2(("release objects - done\n"));
755
756 /*
757 * Release memory allocated in the session.
758 *
759 * We do not serialize this as we assume that the application will
760 * not allocated memory while closing the file handle object.
761 */
762 Log2(("freeing memory:\n"));
763 pBundle = &pSession->Bundle;
764 while (pBundle)
765 {
766 PSUPDRVBUNDLE pToFree;
767 unsigned i;
768
769 /*
770 * Check and unlock all entries in the bundle.
771 */
772 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
773 {
774 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
775 {
776 int rc;
777 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
778 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
779 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
780 {
781 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
782 AssertRC(rc); /** @todo figure out how to handle this. */
783 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
784 }
785 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
786 AssertRC(rc); /** @todo figure out how to handle this. */
787 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
788 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
789 }
790 }
791
792 /*
793 * Advance and free previous bundle.
794 */
795 pToFree = pBundle;
796 pBundle = pBundle->pNext;
797
798 pToFree->pNext = NULL;
799 pToFree->cUsed = 0;
800 if (pToFree != &pSession->Bundle)
801 RTMemFree(pToFree);
802 }
803 Log2(("freeing memory - done\n"));
804
805 /*
806 * Deregister component factories.
807 */
808 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
809 Log2(("deregistering component factories:\n"));
810 if (pDevExt->pComponentFactoryHead)
811 {
812 PSUPDRVFACTORYREG pPrev = NULL;
813 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
814 while (pCur)
815 {
816 if (pCur->pSession == pSession)
817 {
818 /* unlink it */
819 PSUPDRVFACTORYREG pNext = pCur->pNext;
820 if (pPrev)
821 pPrev->pNext = pNext;
822 else
823 pDevExt->pComponentFactoryHead = pNext;
824
825 /* free it */
826 pCur->pNext = NULL;
827 pCur->pSession = NULL;
828 pCur->pFactory = NULL;
829 RTMemFree(pCur);
830
831 /* next */
832 pCur = pNext;
833 }
834 else
835 {
836 /* next */
837 pPrev = pCur;
838 pCur = pCur->pNext;
839 }
840 }
841 }
842 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
843 Log2(("deregistering component factories - done\n"));
844
845 /*
846 * Loaded images needs to be dereferenced and possibly freed up.
847 */
848 RTSemFastMutexRequest(pDevExt->mtxLdr);
849 Log2(("freeing images:\n"));
850 if (pSession->pLdrUsage)
851 {
852 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
853 pSession->pLdrUsage = NULL;
854 while (pUsage)
855 {
856 void *pvFree = pUsage;
857 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
858 if (pImage->cUsage > pUsage->cUsage)
859 pImage->cUsage -= pUsage->cUsage;
860 else
861 supdrvLdrFree(pDevExt, pImage);
862 pUsage->pImage = NULL;
863 pUsage = pUsage->pNext;
864 RTMemFree(pvFree);
865 }
866 }
867 RTSemFastMutexRelease(pDevExt->mtxLdr);
868 Log2(("freeing images - done\n"));
869
870 /*
871 * Unmap the GIP.
872 */
873 Log2(("umapping GIP:\n"));
874 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
875 {
876 SUPR0GipUnmap(pSession);
877 pSession->fGipReferenced = 0;
878 }
879 Log2(("umapping GIP - done\n"));
880}
881
882
883/**
884 * Fast path I/O Control worker.
885 *
886 * @returns VBox status code that should be passed down to ring-3 unchanged.
887 * @param uIOCtl Function number.
888 * @param idCpu VMCPU id.
889 * @param pDevExt Device extention.
890 * @param pSession Session data.
891 */
892int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, unsigned idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
893{
894 /*
895 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
896 */
897 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
898 {
899 switch (uIOCtl)
900 {
901 case SUP_IOCTL_FAST_DO_RAW_RUN:
902#ifdef RT_WITH_W64_UNWIND_HACK
903 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
904#else
905 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
906#endif
907 break;
908 case SUP_IOCTL_FAST_DO_HWACC_RUN:
909#ifdef RT_WITH_W64_UNWIND_HACK
910 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
911#else
912 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
913#endif
914 break;
915 case SUP_IOCTL_FAST_DO_NOP:
916#ifdef RT_WITH_W64_UNWIND_HACK
917 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
918#else
919 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
920#endif
921 break;
922 default:
923 return VERR_INTERNAL_ERROR;
924 }
925 return VINF_SUCCESS;
926 }
927 return VERR_INTERNAL_ERROR;
928}
929
930
931/**
932 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
933 * We would use strpbrk here if this function would be contained in the RedHat kABI white
934 * list, see http://www.kerneldrivers.org/RHEL5.
935 *
936 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
937 * @param pszStr String to check
938 * @param pszChars Character set
939 */
940static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
941{
942 int chCur;
943 while ((chCur = *pszStr++) != '\0')
944 {
945 int ch;
946 const char *psz = pszChars;
947 while ((ch = *psz++) != '\0')
948 if (ch == chCur)
949 return 1;
950
951 }
952 return 0;
953}
954
955
956/**
957 * I/O Control worker.
958 *
959 * @returns 0 on success.
960 * @returns VERR_INVALID_PARAMETER if the request is invalid.
961 *
962 * @param uIOCtl Function number.
963 * @param pDevExt Device extention.
964 * @param pSession Session data.
965 * @param pReqHdr The request header.
966 */
967int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
968{
969 /*
970 * Validate the request.
971 */
972 /* this first check could probably be omitted as its also done by the OS specific code... */
973 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
974 || pReqHdr->cbIn < sizeof(*pReqHdr)
975 || pReqHdr->cbOut < sizeof(*pReqHdr)))
976 {
977 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
978 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
979 return VERR_INVALID_PARAMETER;
980 }
981 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
982 {
983 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
984 {
985 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
986 return VERR_INVALID_PARAMETER;
987 }
988 }
989 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
990 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
991 {
992 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
993 return VERR_INVALID_PARAMETER;
994 }
995
996/*
997 * Validation macros
998 */
999#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1000 do { \
1001 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1002 { \
1003 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1004 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1005 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1006 } \
1007 } while (0)
1008
1009#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1010
1011#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1012 do { \
1013 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1014 { \
1015 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1016 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1017 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1018 } \
1019 } while (0)
1020
1021#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1022 do { \
1023 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1024 { \
1025 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1026 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1027 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1028 } \
1029 } while (0)
1030
1031#define REQ_CHECK_EXPR(Name, expr) \
1032 do { \
1033 if (RT_UNLIKELY(!(expr))) \
1034 { \
1035 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1036 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1037 } \
1038 } while (0)
1039
1040#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1041 do { \
1042 if (RT_UNLIKELY(!(expr))) \
1043 { \
1044 OSDBGPRINT( fmt ); \
1045 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1046 } \
1047 } while (0)
1048
1049
1050 /*
1051 * The switch.
1052 */
1053 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1054 {
1055 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1056 {
1057 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1058 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1059 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1060 {
1061 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1062 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1063 return 0;
1064 }
1065
1066#if 0
1067 /*
1068 * Call out to the OS specific code and let it do permission checks on the
1069 * client process.
1070 */
1071 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1072 {
1073 pReq->u.Out.u32Cookie = 0xffffffff;
1074 pReq->u.Out.u32SessionCookie = 0xffffffff;
1075 pReq->u.Out.u32SessionVersion = 0xffffffff;
1076 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1077 pReq->u.Out.pSession = NULL;
1078 pReq->u.Out.cFunctions = 0;
1079 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1080 return 0;
1081 }
1082#endif
1083
1084 /*
1085 * Match the version.
1086 * The current logic is very simple, match the major interface version.
1087 */
1088 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1089 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1090 {
1091 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1092 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1093 pReq->u.Out.u32Cookie = 0xffffffff;
1094 pReq->u.Out.u32SessionCookie = 0xffffffff;
1095 pReq->u.Out.u32SessionVersion = 0xffffffff;
1096 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1097 pReq->u.Out.pSession = NULL;
1098 pReq->u.Out.cFunctions = 0;
1099 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1100 return 0;
1101 }
1102
1103 /*
1104 * Fill in return data and be gone.
1105 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1106 * u32SessionVersion <= u32ReqVersion!
1107 */
1108 /** @todo Somehow validate the client and negotiate a secure cookie... */
1109 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1110 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1111 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1112 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1113 pReq->u.Out.pSession = pSession;
1114 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1115 pReq->Hdr.rc = VINF_SUCCESS;
1116 return 0;
1117 }
1118
1119 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1120 {
1121 /* validate */
1122 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1123 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1124
1125 /* execute */
1126 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1127 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1128 pReq->Hdr.rc = VINF_SUCCESS;
1129 return 0;
1130 }
1131
1132 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
1133 {
1134 /* validate */
1135 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
1136 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
1137
1138 /* execute */
1139 pReq->u.Out.u8Idt = 3;
1140 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1141 return 0;
1142 }
1143
1144 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
1145 {
1146 /* validate */
1147 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
1148 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
1149
1150 /* execute */
1151 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1152 return 0;
1153 }
1154
1155 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1156 {
1157 /* validate */
1158 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1159 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1160 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1161 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1162 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1163
1164 /* execute */
1165 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1166 if (RT_FAILURE(pReq->Hdr.rc))
1167 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1168 return 0;
1169 }
1170
1171 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1172 {
1173 /* validate */
1174 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1175 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1176
1177 /* execute */
1178 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1179 return 0;
1180 }
1181
1182 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1183 {
1184 /* validate */
1185 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1186 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1187
1188 /* execute */
1189 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1190 if (RT_FAILURE(pReq->Hdr.rc))
1191 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1192 return 0;
1193 }
1194
1195 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1196 {
1197 /* validate */
1198 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1199 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1200
1201 /* execute */
1202 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1203 return 0;
1204 }
1205
1206 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1207 {
1208 /* validate */
1209 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1210 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1211 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1212 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1213 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1214 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1215 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1216
1217 /* execute */
1218 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1219 return 0;
1220 }
1221
1222 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1223 {
1224 /* validate */
1225 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1226 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1227 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1228 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1229 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1230 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1231 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1232 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1233 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1234 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1235 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1236 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1237 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1238 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1239 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1240
1241 if (pReq->u.In.cSymbols)
1242 {
1243 uint32_t i;
1244 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1245 for (i = 0; i < pReq->u.In.cSymbols; i++)
1246 {
1247 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1248 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1249 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1250 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1251 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1252 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1253 }
1254 }
1255
1256 /* execute */
1257 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1258 return 0;
1259 }
1260
1261 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1262 {
1263 /* validate */
1264 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1265 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1266
1267 /* execute */
1268 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1269 return 0;
1270 }
1271
1272 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1273 {
1274 /* validate */
1275 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1276 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1277 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1278
1279 /* execute */
1280 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1281 return 0;
1282 }
1283
1284 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1285 {
1286 /* validate */
1287 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1288 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1289 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1290
1291 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1292 {
1293 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1294
1295 /* execute */
1296 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1297#ifdef RT_WITH_W64_UNWIND_HACK
1298 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1299#else
1300 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1301#endif
1302 else
1303 pReq->Hdr.rc = VERR_WRONG_ORDER;
1304 }
1305 else
1306 {
1307 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1308 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1309 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1310 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1311 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1312
1313 /* execute */
1314 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1315#ifdef RT_WITH_W64_UNWIND_HACK
1316 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1317#else
1318 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1319#endif
1320 else
1321 pReq->Hdr.rc = VERR_WRONG_ORDER;
1322 }
1323
1324 if ( RT_FAILURE(pReq->Hdr.rc)
1325 && pReq->Hdr.rc != VERR_INTERRUPTED
1326 && pReq->Hdr.rc != VERR_TIMEOUT)
1327 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1328 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1329 else
1330 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1331 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1332 return 0;
1333 }
1334
1335 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1336 {
1337 /* validate */
1338 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1339 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1340
1341 /* execute */
1342 pReq->Hdr.rc = VINF_SUCCESS;
1343 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1344 return 0;
1345 }
1346
1347 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1348 {
1349 /* validate */
1350 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1351 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1352 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1353
1354 /* execute */
1355 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1356 if (RT_FAILURE(pReq->Hdr.rc))
1357 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1358 return 0;
1359 }
1360
1361 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1362 {
1363 /* validate */
1364 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1365 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1366
1367 /* execute */
1368 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1369 return 0;
1370 }
1371
1372 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1373 {
1374 /* validate */
1375 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1376 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1377
1378 /* execute */
1379 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1380 if (RT_SUCCESS(pReq->Hdr.rc))
1381 pReq->u.Out.pGipR0 = pDevExt->pGip;
1382 return 0;
1383 }
1384
1385 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1386 {
1387 /* validate */
1388 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1389 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1390
1391 /* execute */
1392 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1393 return 0;
1394 }
1395
1396 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1397 {
1398 /* validate */
1399 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1400 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1401 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1402 || ( VALID_PTR(pReq->u.In.pVMR0)
1403 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1404 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1405 /* execute */
1406 pSession->pVM = pReq->u.In.pVMR0;
1407 pReq->Hdr.rc = VINF_SUCCESS;
1408 return 0;
1409 }
1410
1411 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1412 {
1413 /* validate */
1414 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1415 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1416 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1417
1418 /* execute */
1419 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1420 if (RT_FAILURE(pReq->Hdr.rc))
1421 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1422 return 0;
1423 }
1424
1425 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1426 {
1427 /* validate */
1428 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1429 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1430 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1431 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1432 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1433 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1434 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1435 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1436 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1437
1438 /* execute */
1439 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1440 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1441 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1442 &pReq->u.Out.aPages[0]);
1443 if (RT_FAILURE(pReq->Hdr.rc))
1444 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1445 return 0;
1446 }
1447
1448 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1449 {
1450 /* validate */
1451 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1452 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1453 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1454 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1455 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1456 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1457
1458 /* execute */
1459 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1460 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1461 if (RT_FAILURE(pReq->Hdr.rc))
1462 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1463 return 0;
1464 }
1465
1466 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1467 {
1468 /* validate */
1469 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1470 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1471
1472 /* execute */
1473 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1474 return 0;
1475 }
1476
1477 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1478 {
1479 /* validate */
1480 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1481 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1482 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1483
1484 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1485 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1486 else
1487 {
1488 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1489 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1490 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1491 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1492 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1493 }
1494 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1495
1496 /* execute */
1497 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1498 return 0;
1499 }
1500
1501 default:
1502 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1503 break;
1504 }
1505 return SUPDRV_ERR_GENERAL_FAILURE;
1506}
1507
1508
1509/**
1510 * Inter-Driver Communcation (IDC) worker.
1511 *
1512 * @returns VBox status code.
1513 * @retval VINF_SUCCESS on success.
1514 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1515 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1516 *
1517 * @param uReq The request (function) code.
1518 * @param pDevExt Device extention.
1519 * @param pSession Session data.
1520 * @param pReqHdr The request header.
1521 */
1522int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1523{
1524 /*
1525 * The OS specific code has already validated the pSession
1526 * pointer, and the request size being greater or equal to
1527 * size of the header.
1528 *
1529 * So, just check that pSession is a kernel context session.
1530 */
1531 if (RT_UNLIKELY( pSession
1532 && pSession->R0Process != NIL_RTR0PROCESS))
1533 return VERR_INVALID_PARAMETER;
1534
1535/*
1536 * Validation macro.
1537 */
1538#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1539 do { \
1540 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1541 { \
1542 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1543 (long)pReqHdr->cb, (long)(cbExpect))); \
1544 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1545 } \
1546 } while (0)
1547
1548 switch (uReq)
1549 {
1550 case SUPDRV_IDC_REQ_CONNECT:
1551 {
1552 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1553 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1554
1555 /*
1556 * Validate the cookie and other input.
1557 */
1558 if (pReq->Hdr.pSession != NULL)
1559 {
1560 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1561 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1562 }
1563 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1564 {
1565 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1566 pReq->u.In.u32MagicCookie, SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1567 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1568 }
1569 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1570 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1571 {
1572 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1573 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1574 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1575 }
1576
1577 /*
1578 * Match the version.
1579 * The current logic is very simple, match the major interface version.
1580 */
1581 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1582 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1583 {
1584 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1585 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, SUPDRV_IDC_VERSION));
1586 pReq->u.Out.pSession = NULL;
1587 pReq->u.Out.uSessionVersion = 0xffffffff;
1588 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1589 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1590 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1591 return VINF_SUCCESS;
1592 }
1593
1594 pReq->u.Out.pSession = NULL;
1595 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1596 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1597 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1598
1599 /*
1600 * On NT we will already have a session associated with the
1601 * client, just like with the SUP_IOCTL_COOKIE request, while
1602 * the other doesn't.
1603 */
1604#ifdef RT_OS_WINDOWS
1605 pReq->Hdr.rc = VINF_SUCCESS;
1606#else
1607 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1608 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1609 if (RT_FAILURE(pReq->Hdr.rc))
1610 {
1611 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1612 return VINF_SUCCESS;
1613 }
1614#endif
1615
1616 pReq->u.Out.pSession = pSession;
1617 pReq->Hdr.pSession = pSession;
1618
1619 return VINF_SUCCESS;
1620 }
1621
1622 case SUPDRV_IDC_REQ_DISCONNECT:
1623 {
1624 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1625
1626#ifdef RT_OS_WINDOWS
1627 /* Windows will destroy the session when the file object is destroyed. */
1628#else
1629 supdrvCloseSession(pDevExt, pSession);
1630#endif
1631 return pReqHdr->rc = VINF_SUCCESS;
1632 }
1633
1634 case SUPDRV_IDC_REQ_GET_SYMBOL:
1635 {
1636 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1637 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1638
1639 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1640 return VINF_SUCCESS;
1641 }
1642
1643 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1644 {
1645 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1646 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1647
1648 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1649 return VINF_SUCCESS;
1650 }
1651
1652 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1653 {
1654 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1655 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1656
1657 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1658 return VINF_SUCCESS;
1659 }
1660
1661 default:
1662 Log(("Unknown IDC %#lx\n", (long)uReq));
1663 break;
1664 }
1665
1666#undef REQ_CHECK_IDC_SIZE
1667 return VERR_NOT_SUPPORTED;
1668}
1669
1670
1671/**
1672 * Register a object for reference counting.
1673 * The object is registered with one reference in the specified session.
1674 *
1675 * @returns Unique identifier on success (pointer).
1676 * All future reference must use this identifier.
1677 * @returns NULL on failure.
1678 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1679 * @param pvUser1 The first user argument.
1680 * @param pvUser2 The second user argument.
1681 */
1682SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1683{
1684 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1685 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1686 PSUPDRVOBJ pObj;
1687 PSUPDRVUSAGE pUsage;
1688
1689 /*
1690 * Validate the input.
1691 */
1692 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1693 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1694 AssertPtrReturn(pfnDestructor, NULL);
1695
1696 /*
1697 * Allocate and initialize the object.
1698 */
1699 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1700 if (!pObj)
1701 return NULL;
1702 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1703 pObj->enmType = enmType;
1704 pObj->pNext = NULL;
1705 pObj->cUsage = 1;
1706 pObj->pfnDestructor = pfnDestructor;
1707 pObj->pvUser1 = pvUser1;
1708 pObj->pvUser2 = pvUser2;
1709 pObj->CreatorUid = pSession->Uid;
1710 pObj->CreatorGid = pSession->Gid;
1711 pObj->CreatorProcess= pSession->Process;
1712 supdrvOSObjInitCreator(pObj, pSession);
1713
1714 /*
1715 * Allocate the usage record.
1716 * (We keep freed usage records around to simplify SUPR0ObjAddRef().)
1717 */
1718 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1719
1720 pUsage = pDevExt->pUsageFree;
1721 if (pUsage)
1722 pDevExt->pUsageFree = pUsage->pNext;
1723 else
1724 {
1725 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1726 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1727 if (!pUsage)
1728 {
1729 RTMemFree(pObj);
1730 return NULL;
1731 }
1732 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1733 }
1734
1735 /*
1736 * Insert the object and create the session usage record.
1737 */
1738 /* The object. */
1739 pObj->pNext = pDevExt->pObjs;
1740 pDevExt->pObjs = pObj;
1741
1742 /* The session record. */
1743 pUsage->cUsage = 1;
1744 pUsage->pObj = pObj;
1745 pUsage->pNext = pSession->pUsage;
1746 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1747 pSession->pUsage = pUsage;
1748
1749 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1750
1751 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1752 return pObj;
1753}
1754
1755
1756/**
1757 * Increment the reference counter for the object associating the reference
1758 * with the specified session.
1759 *
1760 * @returns IPRT status code.
1761 * @param pvObj The identifier returned by SUPR0ObjRegister().
1762 * @param pSession The session which is referencing the object.
1763 *
1764 * @remarks The caller should not own any spinlocks and must carefully protect
1765 * itself against potential race with the destructor so freed memory
1766 * isn't accessed here.
1767 */
1768SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1769{
1770 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1771 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1772 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1773 PSUPDRVUSAGE pUsagePre;
1774 PSUPDRVUSAGE pUsage;
1775
1776 /*
1777 * Validate the input.
1778 * Be ready for the destruction race (someone might be stuck in the
1779 * destructor waiting a lock we own).
1780 */
1781 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1782 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
1783 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC + 1,
1784 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC + 1),
1785 VERR_INVALID_PARAMETER);
1786
1787 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1788
1789 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1790 {
1791 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1792
1793 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1794 return VERR_WRONG_ORDER;
1795 }
1796
1797 /*
1798 * Preallocate the usage record.
1799 */
1800 pUsagePre = pDevExt->pUsageFree;
1801 if (pUsagePre)
1802 pDevExt->pUsageFree = pUsagePre->pNext;
1803 else
1804 {
1805 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1806 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1807 if (!pUsagePre)
1808 return VERR_NO_MEMORY;
1809
1810 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1811 }
1812
1813 /*
1814 * Reference the object.
1815 */
1816 pObj->cUsage++;
1817
1818 /*
1819 * Look for the session record.
1820 */
1821 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1822 {
1823 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1824 if (pUsage->pObj == pObj)
1825 break;
1826 }
1827 if (pUsage)
1828 pUsage->cUsage++;
1829 else
1830 {
1831 /* create a new session record. */
1832 pUsagePre->cUsage = 1;
1833 pUsagePre->pObj = pObj;
1834 pUsagePre->pNext = pSession->pUsage;
1835 pSession->pUsage = pUsagePre;
1836 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
1837
1838 pUsagePre = NULL;
1839 }
1840
1841 /*
1842 * Put any unused usage record into the free list..
1843 */
1844 if (pUsagePre)
1845 {
1846 pUsagePre->pNext = pDevExt->pUsageFree;
1847 pDevExt->pUsageFree = pUsagePre;
1848 }
1849
1850 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1851
1852 return VINF_SUCCESS;
1853}
1854
1855
1856/**
1857 * Decrement / destroy a reference counter record for an object.
1858 *
1859 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1860 *
1861 * @returns IPRT status code.
1862 * @param pvObj The identifier returned by SUPR0ObjRegister().
1863 * @param pSession The session which is referencing the object.
1864 */
1865SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1866{
1867 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1868 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1869 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1870 bool fDestroy = false;
1871 PSUPDRVUSAGE pUsage;
1872 PSUPDRVUSAGE pUsagePrev;
1873
1874 /*
1875 * Validate the input.
1876 */
1877 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1878 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1879 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1880 VERR_INVALID_PARAMETER);
1881
1882 /*
1883 * Acquire the spinlock and look for the usage record.
1884 */
1885 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1886
1887 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1888 pUsage;
1889 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1890 {
1891 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1892 if (pUsage->pObj == pObj)
1893 {
1894 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1895 if (pUsage->cUsage > 1)
1896 {
1897 pObj->cUsage--;
1898 pUsage->cUsage--;
1899 }
1900 else
1901 {
1902 /*
1903 * Free the session record.
1904 */
1905 if (pUsagePrev)
1906 pUsagePrev->pNext = pUsage->pNext;
1907 else
1908 pSession->pUsage = pUsage->pNext;
1909 pUsage->pNext = pDevExt->pUsageFree;
1910 pDevExt->pUsageFree = pUsage;
1911
1912 /* What about the object? */
1913 if (pObj->cUsage > 1)
1914 pObj->cUsage--;
1915 else
1916 {
1917 /*
1918 * Object is to be destroyed, unlink it.
1919 */
1920 pObj->u32Magic = SUPDRVOBJ_MAGIC + 1;
1921 fDestroy = true;
1922 if (pDevExt->pObjs == pObj)
1923 pDevExt->pObjs = pObj->pNext;
1924 else
1925 {
1926 PSUPDRVOBJ pObjPrev;
1927 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1928 if (pObjPrev->pNext == pObj)
1929 {
1930 pObjPrev->pNext = pObj->pNext;
1931 break;
1932 }
1933 Assert(pObjPrev);
1934 }
1935 }
1936 }
1937 break;
1938 }
1939 }
1940
1941 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1942
1943 /*
1944 * Call the destructor and free the object if required.
1945 */
1946 if (fDestroy)
1947 {
1948 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
1949 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
1950 if (pObj->pfnDestructor)
1951#ifdef RT_WITH_W64_UNWIND_HACK
1952 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
1953#else
1954 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1955#endif
1956 RTMemFree(pObj);
1957 }
1958
1959 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
1960 return pUsage ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
1961}
1962
1963/**
1964 * Verifies that the current process can access the specified object.
1965 *
1966 * @returns The following IPRT status code:
1967 * @retval VINF_SUCCESS if access was granted.
1968 * @retval VERR_PERMISSION_DENIED if denied access.
1969 * @retval VERR_INVALID_PARAMETER if invalid parameter.
1970 *
1971 * @param pvObj The identifier returned by SUPR0ObjRegister().
1972 * @param pSession The session which wishes to access the object.
1973 * @param pszObjName Object string name. This is optional and depends on the object type.
1974 *
1975 * @remark The caller is responsible for making sure the object isn't removed while
1976 * we're inside this function. If uncertain about this, just call AddRef before calling us.
1977 */
1978SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
1979{
1980 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1981 int rc;
1982
1983 /*
1984 * Validate the input.
1985 */
1986 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1987 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1988 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1989 VERR_INVALID_PARAMETER);
1990
1991 /*
1992 * Check access. (returns true if a decision has been made.)
1993 */
1994 rc = VERR_INTERNAL_ERROR;
1995 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
1996 return rc;
1997
1998 /*
1999 * Default policy is to allow the user to access his own
2000 * stuff but nothing else.
2001 */
2002 if (pObj->CreatorUid == pSession->Uid)
2003 return VINF_SUCCESS;
2004 return VERR_PERMISSION_DENIED;
2005}
2006
2007
2008/**
2009 * Lock pages.
2010 *
2011 * @returns IPRT status code.
2012 * @param pSession Session to which the locked memory should be associated.
2013 * @param pvR3 Start of the memory range to lock.
2014 * This must be page aligned.
2015 * @param cb Size of the memory range to lock.
2016 * This must be page aligned.
2017 */
2018SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2019{
2020 int rc;
2021 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2022 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2023 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2024
2025 /*
2026 * Verify input.
2027 */
2028 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2029 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2030 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2031 || !pvR3)
2032 {
2033 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2034 return VERR_INVALID_PARAMETER;
2035 }
2036
2037#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
2038 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
2039 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
2040 if (RT_SUCCESS(rc))
2041 return rc;
2042#endif
2043
2044 /*
2045 * Let IPRT do the job.
2046 */
2047 Mem.eType = MEMREF_TYPE_LOCKED;
2048 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
2049 if (RT_SUCCESS(rc))
2050 {
2051 uint32_t iPage = cPages;
2052 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2053 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2054
2055 while (iPage-- > 0)
2056 {
2057 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2058 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2059 {
2060 AssertMsgFailed(("iPage=%d\n", iPage));
2061 rc = VERR_INTERNAL_ERROR;
2062 break;
2063 }
2064 }
2065 if (RT_SUCCESS(rc))
2066 rc = supdrvMemAdd(&Mem, pSession);
2067 if (RT_FAILURE(rc))
2068 {
2069 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2070 AssertRC(rc2);
2071 }
2072 }
2073
2074 return rc;
2075}
2076
2077
2078/**
2079 * Unlocks the memory pointed to by pv.
2080 *
2081 * @returns IPRT status code.
2082 * @param pSession Session to which the memory was locked.
2083 * @param pvR3 Memory to unlock.
2084 */
2085SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2086{
2087 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2088 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2089#ifdef RT_OS_WINDOWS
2090 /*
2091 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
2092 * allocations; ignore this call.
2093 */
2094 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
2095 {
2096 LogFlow(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
2097 return VINF_SUCCESS;
2098 }
2099#endif
2100 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2101}
2102
2103
2104/**
2105 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2106 * backing.
2107 *
2108 * @returns IPRT status code.
2109 * @param pSession Session data.
2110 * @param cb Number of bytes to allocate.
2111 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2112 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2113 * @param pHCPhys Where to put the physical address of allocated memory.
2114 */
2115SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2116{
2117 int rc;
2118 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2119 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2120
2121 /*
2122 * Validate input.
2123 */
2124 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2125 if (!ppvR3 || !ppvR0 || !pHCPhys)
2126 {
2127 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2128 pSession, ppvR0, ppvR3, pHCPhys));
2129 return VERR_INVALID_PARAMETER;
2130
2131 }
2132 if (cPages < 1 || cPages >= 256)
2133 {
2134 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2135 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2136 }
2137
2138 /*
2139 * Let IPRT do the job.
2140 */
2141 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2142 if (RT_SUCCESS(rc))
2143 {
2144 int rc2;
2145 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2146 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2147 if (RT_SUCCESS(rc))
2148 {
2149 Mem.eType = MEMREF_TYPE_CONT;
2150 rc = supdrvMemAdd(&Mem, pSession);
2151 if (!rc)
2152 {
2153 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2154 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2155 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2156 return 0;
2157 }
2158
2159 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2160 AssertRC(rc2);
2161 }
2162 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2163 AssertRC(rc2);
2164 }
2165
2166 return rc;
2167}
2168
2169
2170/**
2171 * Frees memory allocated using SUPR0ContAlloc().
2172 *
2173 * @returns IPRT status code.
2174 * @param pSession The session to which the memory was allocated.
2175 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2176 */
2177SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2178{
2179 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2180 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2181 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2182}
2183
2184
2185/**
2186 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2187 *
2188 * The memory isn't zeroed.
2189 *
2190 * @returns IPRT status code.
2191 * @param pSession Session data.
2192 * @param cPages Number of pages to allocate.
2193 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2194 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2195 * @param paPages Where to put the physical addresses of allocated memory.
2196 */
2197SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2198{
2199 unsigned iPage;
2200 int rc;
2201 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2202 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2203
2204 /*
2205 * Validate input.
2206 */
2207 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2208 if (!ppvR3 || !ppvR0 || !paPages)
2209 {
2210 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2211 pSession, ppvR3, ppvR0, paPages));
2212 return VERR_INVALID_PARAMETER;
2213
2214 }
2215 if (cPages < 1 || cPages >= 256)
2216 {
2217 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2218 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2219 }
2220
2221 /*
2222 * Let IPRT do the work.
2223 */
2224 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2225 if (RT_SUCCESS(rc))
2226 {
2227 int rc2;
2228 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2229 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2230 if (RT_SUCCESS(rc))
2231 {
2232 Mem.eType = MEMREF_TYPE_LOW;
2233 rc = supdrvMemAdd(&Mem, pSession);
2234 if (!rc)
2235 {
2236 for (iPage = 0; iPage < cPages; iPage++)
2237 {
2238 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2239 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2240 }
2241 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2242 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2243 return 0;
2244 }
2245
2246 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2247 AssertRC(rc2);
2248 }
2249
2250 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2251 AssertRC(rc2);
2252 }
2253
2254 return rc;
2255}
2256
2257
2258/**
2259 * Frees memory allocated using SUPR0LowAlloc().
2260 *
2261 * @returns IPRT status code.
2262 * @param pSession The session to which the memory was allocated.
2263 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2264 */
2265SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2266{
2267 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2268 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2269 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2270}
2271
2272
2273
2274/**
2275 * Allocates a chunk of memory with both R0 and R3 mappings.
2276 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2277 *
2278 * @returns IPRT status code.
2279 * @param pSession The session to associated the allocation with.
2280 * @param cb Number of bytes to allocate.
2281 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2282 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2283 */
2284SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2285{
2286 int rc;
2287 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2288 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2289
2290 /*
2291 * Validate input.
2292 */
2293 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2294 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2295 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2296 if (cb < 1 || cb >= _4M)
2297 {
2298 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2299 return VERR_INVALID_PARAMETER;
2300 }
2301
2302 /*
2303 * Let IPRT do the work.
2304 */
2305 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2306 if (RT_SUCCESS(rc))
2307 {
2308 int rc2;
2309 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2310 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2311 if (RT_SUCCESS(rc))
2312 {
2313 Mem.eType = MEMREF_TYPE_MEM;
2314 rc = supdrvMemAdd(&Mem, pSession);
2315 if (!rc)
2316 {
2317 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2318 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2319 return VINF_SUCCESS;
2320 }
2321
2322 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2323 AssertRC(rc2);
2324 }
2325
2326 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2327 AssertRC(rc2);
2328 }
2329
2330 return rc;
2331}
2332
2333
2334/**
2335 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2336 *
2337 * @returns IPRT status code.
2338 * @param pSession The session to which the memory was allocated.
2339 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2340 * @param paPages Where to store the physical addresses.
2341 */
2342SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2343{
2344 PSUPDRVBUNDLE pBundle;
2345 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2346 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2347
2348 /*
2349 * Validate input.
2350 */
2351 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2352 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2353 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2354
2355 /*
2356 * Search for the address.
2357 */
2358 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2359 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2360 {
2361 if (pBundle->cUsed > 0)
2362 {
2363 unsigned i;
2364 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2365 {
2366 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2367 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2368 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2369 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2370 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2371 )
2372 )
2373 {
2374 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2375 size_t iPage;
2376 for (iPage = 0; iPage < cPages; iPage++)
2377 {
2378 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2379 paPages[iPage].uReserved = 0;
2380 }
2381 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2382 return VINF_SUCCESS;
2383 }
2384 }
2385 }
2386 }
2387 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2388 Log(("Failed to find %p!!!\n", (void *)uPtr));
2389 return VERR_INVALID_PARAMETER;
2390}
2391
2392
2393/**
2394 * Free memory allocated by SUPR0MemAlloc().
2395 *
2396 * @returns IPRT status code.
2397 * @param pSession The session owning the allocation.
2398 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2399 */
2400SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2401{
2402 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2403 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2404 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2405}
2406
2407
2408/**
2409 * Allocates a chunk of memory with only a R3 mappings.
2410 *
2411 * The memory is fixed and it's possible to query the physical addresses using
2412 * SUPR0MemGetPhys().
2413 *
2414 * @returns IPRT status code.
2415 * @param pSession The session to associated the allocation with.
2416 * @param cPages The number of pages to allocate.
2417 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2418 * @param paPages Where to store the addresses of the pages. Optional.
2419 */
2420SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2421{
2422 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2423 return SUPR0PageAllocEx(pSession, cPages, 0 /*fFlags*/, ppvR3, NULL, paPages);
2424}
2425
2426
2427/**
2428 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2429 *
2430 * The memory is fixed and it's possible to query the physical addresses using
2431 * SUPR0MemGetPhys().
2432 *
2433 * @returns IPRT status code.
2434 * @param pSession The session to associated the allocation with.
2435 * @param cPages The number of pages to allocate.
2436 * @param fFlags Flags, reserved for the future. Must be zero.
2437 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2438 * NULL if no ring-3 mapping.
2439 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2440 * NULL if no ring-0 mapping.
2441 * @param paPages Where to store the addresses of the pages. Optional.
2442 */
2443SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2444{
2445 int rc;
2446 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2447 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2448
2449 /*
2450 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2451 */
2452 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2453 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2454 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2455 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2456 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2457 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2458 {
2459 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2460 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2461 }
2462
2463 /*
2464 * Let IPRT do the work.
2465 */
2466 if (ppvR0)
2467 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2468 else
2469 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2470 if (RT_SUCCESS(rc))
2471 {
2472 int rc2;
2473 if (ppvR3)
2474 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2475 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2476 else
2477 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2478 if (RT_SUCCESS(rc))
2479 {
2480 Mem.eType = MEMREF_TYPE_PAGE;
2481 rc = supdrvMemAdd(&Mem, pSession);
2482 if (!rc)
2483 {
2484 if (ppvR3)
2485 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2486 if (ppvR0)
2487 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2488 if (paPages)
2489 {
2490 uint32_t iPage = cPages;
2491 while (iPage-- > 0)
2492 {
2493 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2494 Assert(paPages[iPage] != NIL_RTHCPHYS);
2495 }
2496 }
2497 return VINF_SUCCESS;
2498 }
2499
2500 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2501 AssertRC(rc2);
2502 }
2503
2504 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2505 AssertRC(rc2);
2506 }
2507 return rc;
2508}
2509
2510
2511/**
2512 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2513 *
2514 * The memory is fixed and it's possible to query the physical addresses using
2515 * SUPR0MemGetPhys().
2516 *
2517 * @returns IPRT status code.
2518 * @param pSession The session to associated the allocation with.
2519 * @param cPages The number of pages to allocate.
2520 * @param fFlags Flags, reserved for the future. Must be zero.
2521 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2522 * NULL if no ring-3 mapping.
2523 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2524 * NULL if no ring-0 mapping.
2525 * @param paPages Where to store the addresses of the pages. Optional.
2526 */
2527SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2528 uint32_t fFlags, PRTR0PTR ppvR0)
2529{
2530 int rc;
2531 PSUPDRVBUNDLE pBundle;
2532 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2533 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2534 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2535
2536 /*
2537 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2538 */
2539 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2540 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2541 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2542 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2543 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2544 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2545
2546 /*
2547 * Find the memory object.
2548 */
2549 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2550 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2551 {
2552 if (pBundle->cUsed > 0)
2553 {
2554 unsigned i;
2555 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2556 {
2557 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2558 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2559 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2560 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2561 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2562 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2563 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2564 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2565 {
2566 hMemObj = pBundle->aMem[i].MemObj;
2567 break;
2568 }
2569 }
2570 }
2571 }
2572 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2573
2574 rc = VERR_INVALID_PARAMETER;
2575 if (hMemObj != NIL_RTR0MEMOBJ)
2576 {
2577 /*
2578 * Do some furter input validations before calling IPRT.
2579 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2580 */
2581 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2582 if ( offSub < cbMemObj
2583 && cbSub <= cbMemObj
2584 && offSub + cbSub <= cbMemObj)
2585 {
2586 RTR0MEMOBJ hMapObj;
2587 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2588 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2589 if (RT_SUCCESS(rc))
2590 *ppvR0 = RTR0MemObjAddress(hMapObj);
2591 }
2592 else
2593 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2594
2595 }
2596 return rc;
2597}
2598
2599
2600
2601#ifdef RT_OS_WINDOWS
2602/**
2603 * Check if the pages were locked by SUPR0PageAlloc
2604 *
2605 * This function will be removed along with the lock/unlock hacks when
2606 * we've cleaned up the ring-3 code properly.
2607 *
2608 * @returns boolean
2609 * @param pSession The session to which the memory was allocated.
2610 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2611 */
2612static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2613{
2614 PSUPDRVBUNDLE pBundle;
2615 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2616 LogFlow(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2617
2618 /*
2619 * Search for the address.
2620 */
2621 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2622 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2623 {
2624 if (pBundle->cUsed > 0)
2625 {
2626 unsigned i;
2627 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2628 {
2629 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2630 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2631 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2632 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2633 {
2634 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2635 return true;
2636 }
2637 }
2638 }
2639 }
2640 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2641 return false;
2642}
2643
2644
2645/**
2646 * Get the physical addresses of memory allocated using SUPR0PageAllocEx().
2647 *
2648 * This function will be removed along with the lock/unlock hacks when
2649 * we've cleaned up the ring-3 code properly.
2650 *
2651 * @returns IPRT status code.
2652 * @param pSession The session to which the memory was allocated.
2653 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2654 * @param cPages Number of pages in paPages
2655 * @param paPages Where to store the physical addresses.
2656 */
2657static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2658{
2659 PSUPDRVBUNDLE pBundle;
2660 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2661 LogFlow(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
2662
2663 /*
2664 * Search for the address.
2665 */
2666 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2667 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2668 {
2669 if (pBundle->cUsed > 0)
2670 {
2671 unsigned i;
2672 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2673 {
2674 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2675 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2676 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2677 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2678 {
2679 uint32_t iPage;
2680 size_t cMaxPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2681 cPages = (uint32_t)RT_MIN(cMaxPages, cPages);
2682 for (iPage = 0; iPage < cPages; iPage++)
2683 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2684 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2685 return VINF_SUCCESS;
2686 }
2687 }
2688 }
2689 }
2690 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2691 return VERR_INVALID_PARAMETER;
2692}
2693#endif /* RT_OS_WINDOWS */
2694
2695
2696/**
2697 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2698 *
2699 * @returns IPRT status code.
2700 * @param pSession The session owning the allocation.
2701 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2702 * SUPR0PageAllocEx().
2703 */
2704SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2705{
2706 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2707 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2708 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2709}
2710
2711
2712/**
2713 * Maps the GIP into userspace and/or get the physical address of the GIP.
2714 *
2715 * @returns IPRT status code.
2716 * @param pSession Session to which the GIP mapping should belong.
2717 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2718 * @param pHCPhysGip Where to store the physical address. (optional)
2719 *
2720 * @remark There is no reference counting on the mapping, so one call to this function
2721 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2722 * and remove the session as a GIP user.
2723 */
2724SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2725{
2726 int rc = 0;
2727 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2728 RTR3PTR pGip = NIL_RTR3PTR;
2729 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2730 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2731
2732 /*
2733 * Validate
2734 */
2735 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2736 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2737 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2738
2739 RTSemFastMutexRequest(pDevExt->mtxGip);
2740 if (pDevExt->pGip)
2741 {
2742 /*
2743 * Map it?
2744 */
2745 if (ppGipR3)
2746 {
2747 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2748 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2749 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2750 if (RT_SUCCESS(rc))
2751 {
2752 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2753 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2754 }
2755 }
2756
2757 /*
2758 * Get physical address.
2759 */
2760 if (pHCPhysGip && !rc)
2761 HCPhys = pDevExt->HCPhysGip;
2762
2763 /*
2764 * Reference globally.
2765 */
2766 if (!pSession->fGipReferenced && !rc)
2767 {
2768 pSession->fGipReferenced = 1;
2769 pDevExt->cGipUsers++;
2770 if (pDevExt->cGipUsers == 1)
2771 {
2772 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2773 unsigned i;
2774
2775 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2776
2777 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2778 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2779 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2780
2781 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2782 AssertRC(rc); rc = VINF_SUCCESS;
2783 }
2784 }
2785 }
2786 else
2787 {
2788 rc = SUPDRV_ERR_GENERAL_FAILURE;
2789 Log(("SUPR0GipMap: GIP is not available!\n"));
2790 }
2791 RTSemFastMutexRelease(pDevExt->mtxGip);
2792
2793 /*
2794 * Write returns.
2795 */
2796 if (pHCPhysGip)
2797 *pHCPhysGip = HCPhys;
2798 if (ppGipR3)
2799 *ppGipR3 = pGip;
2800
2801#ifdef DEBUG_DARWIN_GIP
2802 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGip=%p GipMapObjR3\n", rc, (unsigned long)HCPhys, pGip, pSession->GipMapObjR3));
2803#else
2804 LogFlow(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)(uintptr_t)pGip));
2805#endif
2806 return rc;
2807}
2808
2809
2810/**
2811 * Unmaps any user mapping of the GIP and terminates all GIP access
2812 * from this session.
2813 *
2814 * @returns IPRT status code.
2815 * @param pSession Session to which the GIP mapping should belong.
2816 */
2817SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2818{
2819 int rc = VINF_SUCCESS;
2820 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2821#ifdef DEBUG_DARWIN_GIP
2822 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2823 pSession,
2824 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2825 pSession->GipMapObjR3));
2826#else
2827 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
2828#endif
2829 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2830
2831 RTSemFastMutexRequest(pDevExt->mtxGip);
2832
2833 /*
2834 * Unmap anything?
2835 */
2836 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2837 {
2838 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2839 AssertRC(rc);
2840 if (RT_SUCCESS(rc))
2841 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2842 }
2843
2844 /*
2845 * Dereference global GIP.
2846 */
2847 if (pSession->fGipReferenced && !rc)
2848 {
2849 pSession->fGipReferenced = 0;
2850 if ( pDevExt->cGipUsers > 0
2851 && !--pDevExt->cGipUsers)
2852 {
2853 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
2854 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = 0;
2855 }
2856 }
2857
2858 RTSemFastMutexRelease(pDevExt->mtxGip);
2859
2860 return rc;
2861}
2862
2863
2864/**
2865 * Register a component factory with the support driver.
2866 *
2867 * This is currently restricted to kernel sessions only.
2868 *
2869 * @returns VBox status code.
2870 * @retval VINF_SUCCESS on success.
2871 * @retval VERR_NO_MEMORY if we're out of memory.
2872 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
2873 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2874 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2875 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2876 *
2877 * @param pSession The SUPDRV session (must be a ring-0 session).
2878 * @param pFactory Pointer to the component factory registration structure.
2879 *
2880 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
2881 */
2882SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2883{
2884 PSUPDRVFACTORYREG pNewReg;
2885 const char *psz;
2886 int rc;
2887
2888 /*
2889 * Validate parameters.
2890 */
2891 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2892 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2893 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2894 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
2895 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
2896 AssertReturn(psz, VERR_INVALID_PARAMETER);
2897
2898 /*
2899 * Allocate and initialize a new registration structure.
2900 */
2901 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
2902 if (pNewReg)
2903 {
2904 pNewReg->pNext = NULL;
2905 pNewReg->pFactory = pFactory;
2906 pNewReg->pSession = pSession;
2907 pNewReg->cchName = psz - &pFactory->szName[0];
2908
2909 /*
2910 * Add it to the tail of the list after checking for prior registration.
2911 */
2912 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
2913 if (RT_SUCCESS(rc))
2914 {
2915 PSUPDRVFACTORYREG pPrev = NULL;
2916 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
2917 while (pCur && pCur->pFactory != pFactory)
2918 {
2919 pPrev = pCur;
2920 pCur = pCur->pNext;
2921 }
2922 if (!pCur)
2923 {
2924 if (pPrev)
2925 pPrev->pNext = pNewReg;
2926 else
2927 pSession->pDevExt->pComponentFactoryHead = pNewReg;
2928 rc = VINF_SUCCESS;
2929 }
2930 else
2931 rc = VERR_ALREADY_EXISTS;
2932
2933 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
2934 }
2935
2936 if (RT_FAILURE(rc))
2937 RTMemFree(pNewReg);
2938 }
2939 else
2940 rc = VERR_NO_MEMORY;
2941 return rc;
2942}
2943
2944
2945/**
2946 * Deregister a component factory.
2947 *
2948 * @returns VBox status code.
2949 * @retval VINF_SUCCESS on success.
2950 * @retval VERR_NOT_FOUND if the factory wasn't registered.
2951 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2952 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2953 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2954 *
2955 * @param pSession The SUPDRV session (must be a ring-0 session).
2956 * @param pFactory Pointer to the component factory registration structure
2957 * previously passed SUPR0ComponentRegisterFactory().
2958 *
2959 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
2960 */
2961SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2962{
2963 int rc;
2964
2965 /*
2966 * Validate parameters.
2967 */
2968 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2969 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2970 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2971
2972 /*
2973 * Take the lock and look for the registration record.
2974 */
2975 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
2976 if (RT_SUCCESS(rc))
2977 {
2978 PSUPDRVFACTORYREG pPrev = NULL;
2979 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
2980 while (pCur && pCur->pFactory != pFactory)
2981 {
2982 pPrev = pCur;
2983 pCur = pCur->pNext;
2984 }
2985 if (pCur)
2986 {
2987 if (!pPrev)
2988 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
2989 else
2990 pPrev->pNext = pCur->pNext;
2991
2992 pCur->pNext = NULL;
2993 pCur->pFactory = NULL;
2994 pCur->pSession = NULL;
2995 rc = VINF_SUCCESS;
2996 }
2997 else
2998 rc = VERR_NOT_FOUND;
2999
3000 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3001
3002 RTMemFree(pCur);
3003 }
3004 return rc;
3005}
3006
3007
3008/**
3009 * Queries a component factory.
3010 *
3011 * @returns VBox status code.
3012 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3013 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3014 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3015 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3016 *
3017 * @param pSession The SUPDRV session.
3018 * @param pszName The name of the component factory.
3019 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3020 * @param ppvFactoryIf Where to store the factory interface.
3021 */
3022SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3023{
3024 const char *pszEnd;
3025 size_t cchName;
3026 int rc;
3027
3028 /*
3029 * Validate parameters.
3030 */
3031 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3032
3033 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3034 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3035 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3036 cchName = pszEnd - pszName;
3037
3038 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3039 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3040 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3041
3042 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3043 *ppvFactoryIf = NULL;
3044
3045 /*
3046 * Take the lock and try all factories by this name.
3047 */
3048 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3049 if (RT_SUCCESS(rc))
3050 {
3051 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3052 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3053 while (pCur)
3054 {
3055 if ( pCur->cchName == cchName
3056 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3057 {
3058#ifdef RT_WITH_W64_UNWIND_HACK
3059 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3060#else
3061 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3062#endif
3063 if (pvFactory)
3064 {
3065 *ppvFactoryIf = pvFactory;
3066 rc = VINF_SUCCESS;
3067 break;
3068 }
3069 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3070 }
3071
3072 /* next */
3073 pCur = pCur->pNext;
3074 }
3075
3076 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3077 }
3078 return rc;
3079}
3080
3081
3082/**
3083 * Adds a memory object to the session.
3084 *
3085 * @returns IPRT status code.
3086 * @param pMem Memory tracking structure containing the
3087 * information to track.
3088 * @param pSession The session.
3089 */
3090static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3091{
3092 PSUPDRVBUNDLE pBundle;
3093 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3094
3095 /*
3096 * Find free entry and record the allocation.
3097 */
3098 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3099 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3100 {
3101 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3102 {
3103 unsigned i;
3104 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3105 {
3106 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3107 {
3108 pBundle->cUsed++;
3109 pBundle->aMem[i] = *pMem;
3110 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3111 return VINF_SUCCESS;
3112 }
3113 }
3114 AssertFailed(); /* !!this can't be happening!!! */
3115 }
3116 }
3117 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3118
3119 /*
3120 * Need to allocate a new bundle.
3121 * Insert into the last entry in the bundle.
3122 */
3123 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3124 if (!pBundle)
3125 return VERR_NO_MEMORY;
3126
3127 /* take last entry. */
3128 pBundle->cUsed++;
3129 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3130
3131 /* insert into list. */
3132 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3133 pBundle->pNext = pSession->Bundle.pNext;
3134 pSession->Bundle.pNext = pBundle;
3135 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3136
3137 return VINF_SUCCESS;
3138}
3139
3140
3141/**
3142 * Releases a memory object referenced by pointer and type.
3143 *
3144 * @returns IPRT status code.
3145 * @param pSession Session data.
3146 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3147 * @param eType Memory type.
3148 */
3149static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3150{
3151 PSUPDRVBUNDLE pBundle;
3152 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3153
3154 /*
3155 * Validate input.
3156 */
3157 if (!uPtr)
3158 {
3159 Log(("Illegal address %p\n", (void *)uPtr));
3160 return VERR_INVALID_PARAMETER;
3161 }
3162
3163 /*
3164 * Search for the address.
3165 */
3166 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3167 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3168 {
3169 if (pBundle->cUsed > 0)
3170 {
3171 unsigned i;
3172 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3173 {
3174 if ( pBundle->aMem[i].eType == eType
3175 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3176 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3177 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3178 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3179 )
3180 {
3181 /* Make a copy of it and release it outside the spinlock. */
3182 SUPDRVMEMREF Mem = pBundle->aMem[i];
3183 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3184 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3185 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3186 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3187
3188 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3189 {
3190 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3191 AssertRC(rc); /** @todo figure out how to handle this. */
3192 }
3193 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3194 {
3195 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3196 AssertRC(rc); /** @todo figure out how to handle this. */
3197 }
3198 return VINF_SUCCESS;
3199 }
3200 }
3201 }
3202 }
3203 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3204 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3205 return VERR_INVALID_PARAMETER;
3206}
3207
3208
3209/**
3210 * Opens an image. If it's the first time it's opened the call must upload
3211 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3212 *
3213 * This is the 1st step of the loading.
3214 *
3215 * @returns IPRT status code.
3216 * @param pDevExt Device globals.
3217 * @param pSession Session data.
3218 * @param pReq The open request.
3219 */
3220static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3221{
3222 PSUPDRVLDRIMAGE pImage;
3223 unsigned cb;
3224 void *pv;
3225 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3226
3227 /*
3228 * Check if we got an instance of the image already.
3229 */
3230 RTSemFastMutexRequest(pDevExt->mtxLdr);
3231 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3232 {
3233 if (!strcmp(pImage->szName, pReq->u.In.szName))
3234 {
3235 pImage->cUsage++;
3236 pReq->u.Out.pvImageBase = pImage->pvImage;
3237 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3238 supdrvLdrAddUsage(pSession, pImage);
3239 RTSemFastMutexRelease(pDevExt->mtxLdr);
3240 return VINF_SUCCESS;
3241 }
3242 }
3243 /* (not found - add it!) */
3244
3245 /*
3246 * Allocate memory.
3247 */
3248 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3249 pv = RTMemExecAlloc(cb);
3250 if (!pv)
3251 {
3252 RTSemFastMutexRelease(pDevExt->mtxLdr);
3253 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3254 return VERR_NO_MEMORY;
3255 }
3256
3257 /*
3258 * Setup and link in the LDR stuff.
3259 */
3260 pImage = (PSUPDRVLDRIMAGE)pv;
3261 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
3262 pImage->cbImage = pReq->u.In.cbImage;
3263 pImage->pfnModuleInit = NULL;
3264 pImage->pfnModuleTerm = NULL;
3265 pImage->pfnServiceReqHandler = NULL;
3266 pImage->uState = SUP_IOCTL_LDR_OPEN;
3267 pImage->cUsage = 1;
3268 strcpy(pImage->szName, pReq->u.In.szName);
3269
3270 pImage->pNext = pDevExt->pLdrImages;
3271 pDevExt->pLdrImages = pImage;
3272
3273 supdrvLdrAddUsage(pSession, pImage);
3274
3275 pReq->u.Out.pvImageBase = pImage->pvImage;
3276 pReq->u.Out.fNeedsLoading = true;
3277 RTSemFastMutexRelease(pDevExt->mtxLdr);
3278
3279#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3280 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3281#endif
3282 return VINF_SUCCESS;
3283}
3284
3285
3286/**
3287 * Loads the image bits.
3288 *
3289 * This is the 2nd step of the loading.
3290 *
3291 * @returns IPRT status code.
3292 * @param pDevExt Device globals.
3293 * @param pSession Session data.
3294 * @param pReq The request.
3295 */
3296static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3297{
3298 PSUPDRVLDRUSAGE pUsage;
3299 PSUPDRVLDRIMAGE pImage;
3300 int rc;
3301 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3302
3303 /*
3304 * Find the ldr image.
3305 */
3306 RTSemFastMutexRequest(pDevExt->mtxLdr);
3307 pUsage = pSession->pLdrUsage;
3308 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3309 pUsage = pUsage->pNext;
3310 if (!pUsage)
3311 {
3312 RTSemFastMutexRelease(pDevExt->mtxLdr);
3313 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3314 return VERR_INVALID_HANDLE;
3315 }
3316 pImage = pUsage->pImage;
3317 if (pImage->cbImage != pReq->u.In.cbImage)
3318 {
3319 RTSemFastMutexRelease(pDevExt->mtxLdr);
3320 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3321 return VERR_INVALID_HANDLE;
3322 }
3323 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3324 {
3325 unsigned uState = pImage->uState;
3326 RTSemFastMutexRelease(pDevExt->mtxLdr);
3327 if (uState != SUP_IOCTL_LDR_LOAD)
3328 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3329 return SUPDRV_ERR_ALREADY_LOADED;
3330 }
3331 switch (pReq->u.In.eEPType)
3332 {
3333 case SUPLDRLOADEP_NOTHING:
3334 break;
3335
3336 case SUPLDRLOADEP_VMMR0:
3337 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
3338 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
3339 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
3340 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
3341 {
3342 RTSemFastMutexRelease(pDevExt->mtxLdr);
3343 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
3344 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3345 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3346 return VERR_INVALID_PARAMETER;
3347 }
3348 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
3349 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3350 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3351 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3352 {
3353 RTSemFastMutexRelease(pDevExt->mtxLdr);
3354 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3355 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3356 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3357 return VERR_INVALID_PARAMETER;
3358 }
3359 break;
3360
3361 case SUPLDRLOADEP_SERVICE:
3362 if (!pReq->u.In.EP.Service.pfnServiceReq)
3363 {
3364 RTSemFastMutexRelease(pDevExt->mtxLdr);
3365 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
3366 return VERR_INVALID_PARAMETER;
3367 }
3368 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3369 {
3370 RTSemFastMutexRelease(pDevExt->mtxLdr);
3371 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3372 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
3373 return VERR_INVALID_PARAMETER;
3374 }
3375 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3376 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3377 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3378 {
3379 RTSemFastMutexRelease(pDevExt->mtxLdr);
3380 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3381 pImage->pvImage, pReq->u.In.cbImage,
3382 pReq->u.In.EP.Service.apvReserved[0],
3383 pReq->u.In.EP.Service.apvReserved[1],
3384 pReq->u.In.EP.Service.apvReserved[2]));
3385 return VERR_INVALID_PARAMETER;
3386 }
3387 break;
3388
3389 default:
3390 RTSemFastMutexRelease(pDevExt->mtxLdr);
3391 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3392 return VERR_INVALID_PARAMETER;
3393 }
3394 if ( pReq->u.In.pfnModuleInit
3395 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3396 {
3397 RTSemFastMutexRelease(pDevExt->mtxLdr);
3398 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3399 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3400 return VERR_INVALID_PARAMETER;
3401 }
3402 if ( pReq->u.In.pfnModuleTerm
3403 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3404 {
3405 RTSemFastMutexRelease(pDevExt->mtxLdr);
3406 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3407 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3408 return VERR_INVALID_PARAMETER;
3409 }
3410
3411 /*
3412 * Copy the memory.
3413 */
3414 /* no need to do try/except as this is a buffered request. */
3415 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3416 pImage->uState = SUP_IOCTL_LDR_LOAD;
3417 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3418 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3419 pImage->offSymbols = pReq->u.In.offSymbols;
3420 pImage->cSymbols = pReq->u.In.cSymbols;
3421 pImage->offStrTab = pReq->u.In.offStrTab;
3422 pImage->cbStrTab = pReq->u.In.cbStrTab;
3423
3424 /*
3425 * Update any entry points.
3426 */
3427 switch (pReq->u.In.eEPType)
3428 {
3429 default:
3430 case SUPLDRLOADEP_NOTHING:
3431 rc = VINF_SUCCESS;
3432 break;
3433 case SUPLDRLOADEP_VMMR0:
3434 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3435 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3436 break;
3437 case SUPLDRLOADEP_SERVICE:
3438 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3439 rc = VINF_SUCCESS;
3440 break;
3441 }
3442
3443 /*
3444 * On success call the module initialization.
3445 */
3446 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3447 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3448 {
3449 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3450#ifdef RT_WITH_W64_UNWIND_HACK
3451 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
3452#else
3453 rc = pImage->pfnModuleInit();
3454#endif
3455 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3456 supdrvLdrUnsetVMMR0EPs(pDevExt);
3457 }
3458
3459 if (rc)
3460 pImage->uState = SUP_IOCTL_LDR_OPEN;
3461
3462 RTSemFastMutexRelease(pDevExt->mtxLdr);
3463 return rc;
3464}
3465
3466
3467/**
3468 * Frees a previously loaded (prep'ed) image.
3469 *
3470 * @returns IPRT status code.
3471 * @param pDevExt Device globals.
3472 * @param pSession Session data.
3473 * @param pReq The request.
3474 */
3475static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3476{
3477 int rc;
3478 PSUPDRVLDRUSAGE pUsagePrev;
3479 PSUPDRVLDRUSAGE pUsage;
3480 PSUPDRVLDRIMAGE pImage;
3481 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3482
3483 /*
3484 * Find the ldr image.
3485 */
3486 RTSemFastMutexRequest(pDevExt->mtxLdr);
3487 pUsagePrev = NULL;
3488 pUsage = pSession->pLdrUsage;
3489 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3490 {
3491 pUsagePrev = pUsage;
3492 pUsage = pUsage->pNext;
3493 }
3494 if (!pUsage)
3495 {
3496 RTSemFastMutexRelease(pDevExt->mtxLdr);
3497 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3498 return VERR_INVALID_HANDLE;
3499 }
3500
3501 /*
3502 * Check if we can remove anything.
3503 */
3504 rc = VINF_SUCCESS;
3505 pImage = pUsage->pImage;
3506 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3507 {
3508 /*
3509 * Check if there are any objects with destructors in the image, if
3510 * so leave it for the session cleanup routine so we get a chance to
3511 * clean things up in the right order and not leave them all dangling.
3512 */
3513 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3514 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3515 if (pImage->cUsage <= 1)
3516 {
3517 PSUPDRVOBJ pObj;
3518 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3519 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3520 {
3521 rc = VERR_SHARING_VIOLATION; /** @todo VERR_DANGLING_OBJECTS */
3522 break;
3523 }
3524 }
3525 else
3526 {
3527 PSUPDRVUSAGE pGenUsage;
3528 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3529 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3530 {
3531 rc = VERR_SHARING_VIOLATION; /** @todo VERR_DANGLING_OBJECTS */
3532 break;
3533 }
3534 }
3535 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3536 if (rc == VINF_SUCCESS)
3537 {
3538 /* unlink it */
3539 if (pUsagePrev)
3540 pUsagePrev->pNext = pUsage->pNext;
3541 else
3542 pSession->pLdrUsage = pUsage->pNext;
3543
3544 /* free it */
3545 pUsage->pImage = NULL;
3546 pUsage->pNext = NULL;
3547 RTMemFree(pUsage);
3548
3549 /*
3550 * Derefrence the image.
3551 */
3552 if (pImage->cUsage <= 1)
3553 supdrvLdrFree(pDevExt, pImage);
3554 else
3555 pImage->cUsage--;
3556 }
3557 else
3558 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3559 }
3560 else
3561 {
3562 /*
3563 * Dereference both image and usage.
3564 */
3565 pImage->cUsage--;
3566 pUsage->cUsage--;
3567 }
3568
3569 RTSemFastMutexRelease(pDevExt->mtxLdr);
3570 return VINF_SUCCESS;
3571}
3572
3573
3574/**
3575 * Gets the address of a symbol in an open image.
3576 *
3577 * @returns 0 on success.
3578 * @returns SUPDRV_ERR_* on failure.
3579 * @param pDevExt Device globals.
3580 * @param pSession Session data.
3581 * @param pReq The request buffer.
3582 */
3583static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3584{
3585 PSUPDRVLDRIMAGE pImage;
3586 PSUPDRVLDRUSAGE pUsage;
3587 uint32_t i;
3588 PSUPLDRSYM paSyms;
3589 const char *pchStrings;
3590 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3591 void *pvSymbol = NULL;
3592 int rc = VERR_GENERAL_FAILURE;
3593 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3594
3595 /*
3596 * Find the ldr image.
3597 */
3598 RTSemFastMutexRequest(pDevExt->mtxLdr);
3599 pUsage = pSession->pLdrUsage;
3600 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3601 pUsage = pUsage->pNext;
3602 if (!pUsage)
3603 {
3604 RTSemFastMutexRelease(pDevExt->mtxLdr);
3605 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3606 return VERR_INVALID_HANDLE;
3607 }
3608 pImage = pUsage->pImage;
3609 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3610 {
3611 unsigned uState = pImage->uState;
3612 RTSemFastMutexRelease(pDevExt->mtxLdr);
3613 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3614 return VERR_ALREADY_LOADED;
3615 }
3616
3617 /*
3618 * Search the symbol strings.
3619 */
3620 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3621 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3622 for (i = 0; i < pImage->cSymbols; i++)
3623 {
3624 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3625 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3626 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3627 {
3628 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3629 rc = VINF_SUCCESS;
3630 break;
3631 }
3632 }
3633 RTSemFastMutexRelease(pDevExt->mtxLdr);
3634 pReq->u.Out.pvSymbol = pvSymbol;
3635 return rc;
3636}
3637
3638
3639/**
3640 * Gets the address of a symbol in an open image or the support driver.
3641 *
3642 * @returns VINF_SUCCESS on success.
3643 * @returns
3644 * @param pDevExt Device globals.
3645 * @param pSession Session data.
3646 * @param pReq The request buffer.
3647 */
3648static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3649{
3650 int rc = VINF_SUCCESS;
3651 const char *pszSymbol = pReq->u.In.pszSymbol;
3652 const char *pszModule = pReq->u.In.pszModule;
3653 size_t cbSymbol;
3654 char const *pszEnd;
3655 uint32_t i;
3656
3657 /*
3658 * Input validation.
3659 */
3660 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3661 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3662 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3663 cbSymbol = pszEnd - pszSymbol + 1;
3664
3665 if (pszModule)
3666 {
3667 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3668 pszEnd = (char *)memchr(pszModule, '\0', 64);
3669 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3670 }
3671 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3672
3673
3674 if ( !pszModule
3675 || !strcmp(pszModule, "SupDrv"))
3676 {
3677 /*
3678 * Search the support driver export table.
3679 */
3680 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3681 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3682 {
3683 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3684 break;
3685 }
3686 }
3687 else
3688 {
3689 /*
3690 * Find the loader image.
3691 */
3692 PSUPDRVLDRIMAGE pImage;
3693
3694 RTSemFastMutexRequest(pDevExt->mtxLdr);
3695
3696 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3697 if (!strcmp(pImage->szName, pszModule))
3698 break;
3699 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3700 {
3701 /*
3702 * Search the symbol strings.
3703 */
3704 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3705 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3706 for (i = 0; i < pImage->cSymbols; i++)
3707 {
3708 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3709 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3710 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3711 {
3712 /*
3713 * Found it! Calc the symbol address and add a reference to the module.
3714 */
3715 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3716 rc = supdrvLdrAddUsage(pSession, pImage);
3717 break;
3718 }
3719 }
3720 }
3721 else
3722 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3723
3724 RTSemFastMutexRelease(pDevExt->mtxLdr);
3725 }
3726 return rc;
3727}
3728
3729
3730/**
3731 * Updates the VMMR0 entry point pointers.
3732 *
3733 * @returns IPRT status code.
3734 * @param pDevExt Device globals.
3735 * @param pSession Session data.
3736 * @param pVMMR0 VMMR0 image handle.
3737 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3738 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3739 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3740 * @remark Caller must own the loader mutex.
3741 */
3742static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3743{
3744 int rc = VINF_SUCCESS;
3745 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3746
3747
3748 /*
3749 * Check if not yet set.
3750 */
3751 if (!pDevExt->pvVMMR0)
3752 {
3753 pDevExt->pvVMMR0 = pvVMMR0;
3754 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3755 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3756 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3757 }
3758 else
3759 {
3760 /*
3761 * Return failure or success depending on whether the values match or not.
3762 */
3763 if ( pDevExt->pvVMMR0 != pvVMMR0
3764 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3765 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3766 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3767 {
3768 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3769 rc = VERR_INVALID_PARAMETER;
3770 }
3771 }
3772 return rc;
3773}
3774
3775
3776/**
3777 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3778 *
3779 * @param pDevExt Device globals.
3780 */
3781static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3782{
3783 pDevExt->pvVMMR0 = NULL;
3784 pDevExt->pfnVMMR0EntryInt = NULL;
3785 pDevExt->pfnVMMR0EntryFast = NULL;
3786 pDevExt->pfnVMMR0EntryEx = NULL;
3787}
3788
3789
3790/**
3791 * Adds a usage reference in the specified session of an image.
3792 *
3793 * Called while owning the loader semaphore.
3794 *
3795 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
3796 * @param pSession Session in question.
3797 * @param pImage Image which the session is using.
3798 */
3799static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3800{
3801 PSUPDRVLDRUSAGE pUsage;
3802 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3803
3804 /*
3805 * Referenced it already?
3806 */
3807 pUsage = pSession->pLdrUsage;
3808 while (pUsage)
3809 {
3810 if (pUsage->pImage == pImage)
3811 {
3812 pUsage->cUsage++;
3813 return VINF_SUCCESS;
3814 }
3815 pUsage = pUsage->pNext;
3816 }
3817
3818 /*
3819 * Allocate new usage record.
3820 */
3821 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3822 AssertReturn(pUsage, VERR_NO_MEMORY);
3823 pUsage->cUsage = 1;
3824 pUsage->pImage = pImage;
3825 pUsage->pNext = pSession->pLdrUsage;
3826 pSession->pLdrUsage = pUsage;
3827 return VINF_SUCCESS;
3828}
3829
3830
3831/**
3832 * Frees a load image.
3833 *
3834 * @param pDevExt Pointer to device extension.
3835 * @param pImage Pointer to the image we're gonna free.
3836 * This image must exit!
3837 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3838 */
3839static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3840{
3841 PSUPDRVLDRIMAGE pImagePrev;
3842 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
3843
3844 /* find it - arg. should've used doubly linked list. */
3845 Assert(pDevExt->pLdrImages);
3846 pImagePrev = NULL;
3847 if (pDevExt->pLdrImages != pImage)
3848 {
3849 pImagePrev = pDevExt->pLdrImages;
3850 while (pImagePrev->pNext != pImage)
3851 pImagePrev = pImagePrev->pNext;
3852 Assert(pImagePrev->pNext == pImage);
3853 }
3854
3855 /* unlink */
3856 if (pImagePrev)
3857 pImagePrev->pNext = pImage->pNext;
3858 else
3859 pDevExt->pLdrImages = pImage->pNext;
3860
3861 /* check if this is VMMR0.r0 unset its entry point pointers. */
3862 if (pDevExt->pvVMMR0 == pImage->pvImage)
3863 supdrvLdrUnsetVMMR0EPs(pDevExt);
3864
3865 /* check for objects with destructors in this image. (Shouldn't happen.) */
3866 if (pDevExt->pObjs)
3867 {
3868 unsigned cObjs = 0;
3869 PSUPDRVOBJ pObj;
3870 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3871 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3872 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3873 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3874 {
3875 pObj->pfnDestructor = NULL;
3876 cObjs++;
3877 }
3878 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3879 if (cObjs)
3880 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
3881 }
3882
3883 /* call termination function if fully loaded. */
3884 if ( pImage->pfnModuleTerm
3885 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3886 {
3887 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3888#ifdef RT_WITH_W64_UNWIND_HACK
3889 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
3890#else
3891 pImage->pfnModuleTerm();
3892#endif
3893 }
3894
3895 /* free the image */
3896 pImage->cUsage = 0;
3897 pImage->pNext = 0;
3898 pImage->uState = SUP_IOCTL_LDR_FREE;
3899 RTMemExecFree(pImage);
3900}
3901
3902
3903/**
3904 * Implements the service call request.
3905 *
3906 * @returns VBox status code.
3907 * @param pDevExt The device extension.
3908 * @param pSession The calling session.
3909 * @param pReq The request packet, valid.
3910 */
3911static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
3912{
3913#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
3914 int rc;
3915
3916 /*
3917 * Find the module first in the module referenced by the calling session.
3918 */
3919 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
3920 if (RT_SUCCESS(rc))
3921 {
3922 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
3923 PSUPDRVLDRUSAGE pUsage;
3924
3925 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
3926 if ( pUsage->pImage->pfnServiceReqHandler
3927 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
3928 {
3929 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
3930 break;
3931 }
3932 RTSemFastMutexRelease(pDevExt->mtxLdr);
3933
3934 if (pfnServiceReqHandler)
3935 {
3936 /*
3937 * Call it.
3938 */
3939 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
3940#ifdef RT_WITH_W64_UNWIND_HACK
3941 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
3942#else
3943 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
3944#endif
3945 else
3946#ifdef RT_WITH_W64_UNWIND_HACK
3947 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
3948 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
3949#else
3950 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
3951#endif
3952 }
3953 else
3954 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
3955 }
3956
3957 /* log it */
3958 if ( RT_FAILURE(rc)
3959 && rc != VERR_INTERRUPTED
3960 && rc != VERR_TIMEOUT)
3961 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
3962 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
3963 else
3964 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
3965 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
3966 return rc;
3967#else /* RT_OS_WINDOWS && !DEBUG */
3968 return VERR_NOT_IMPLEMENTED;
3969#endif /* RT_OS_WINDOWS && !DEBUG */
3970}
3971
3972
3973/**
3974 * Gets the paging mode of the current CPU.
3975 *
3976 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3977 */
3978SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3979{
3980 SUPPAGINGMODE enmMode;
3981
3982 RTR0UINTREG cr0 = ASMGetCR0();
3983 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3984 enmMode = SUPPAGINGMODE_INVALID;
3985 else
3986 {
3987 RTR0UINTREG cr4 = ASMGetCR4();
3988 uint32_t fNXEPlusLMA = 0;
3989 if (cr4 & X86_CR4_PAE)
3990 {
3991 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
3992 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
3993 {
3994 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3995 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3996 fNXEPlusLMA |= RT_BIT(0);
3997 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3998 fNXEPlusLMA |= RT_BIT(1);
3999 }
4000 }
4001
4002 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4003 {
4004 case 0:
4005 enmMode = SUPPAGINGMODE_32_BIT;
4006 break;
4007
4008 case X86_CR4_PGE:
4009 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4010 break;
4011
4012 case X86_CR4_PAE:
4013 enmMode = SUPPAGINGMODE_PAE;
4014 break;
4015
4016 case X86_CR4_PAE | RT_BIT(0):
4017 enmMode = SUPPAGINGMODE_PAE_NX;
4018 break;
4019
4020 case X86_CR4_PAE | X86_CR4_PGE:
4021 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4022 break;
4023
4024 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4025 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4026 break;
4027
4028 case RT_BIT(1) | X86_CR4_PAE:
4029 enmMode = SUPPAGINGMODE_AMD64;
4030 break;
4031
4032 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4033 enmMode = SUPPAGINGMODE_AMD64_NX;
4034 break;
4035
4036 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4037 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4038 break;
4039
4040 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4041 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4042 break;
4043
4044 default:
4045 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4046 enmMode = SUPPAGINGMODE_INVALID;
4047 break;
4048 }
4049 }
4050 return enmMode;
4051}
4052
4053
4054/**
4055 * Enables or disabled hardware virtualization extensions using native OS APIs.
4056 *
4057 * @returns VBox status code.
4058 * @retval VINF_SUCCESS on success.
4059 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4060 *
4061 * @param fEnable Whether to enable or disable.
4062 */
4063SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4064{
4065#ifdef RT_OS_DARWIN
4066 return supdrvOSEnableVTx(fEnable);
4067#else
4068 return VERR_NOT_SUPPORTED;
4069#endif
4070}
4071
4072
4073/**
4074 * Creates the GIP.
4075 *
4076 * @returns VBox status code.
4077 * @param pDevExt Instance data. GIP stuff may be updated.
4078 */
4079static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4080{
4081 PSUPGLOBALINFOPAGE pGip;
4082 RTHCPHYS HCPhysGip;
4083 uint32_t u32SystemResolution;
4084 uint32_t u32Interval;
4085 int rc;
4086
4087 LogFlow(("supdrvGipCreate:\n"));
4088
4089 /* assert order */
4090 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4091 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4092 Assert(!pDevExt->pGipTimer);
4093
4094 /*
4095 * Allocate a suitable page with a default kernel mapping.
4096 */
4097 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4098 if (RT_FAILURE(rc))
4099 {
4100 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4101 return rc;
4102 }
4103 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4104 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4105
4106#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4107 * It only applies to Windows and should probably revisited later, if possible made part of the
4108 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4109 /*
4110 * Try bump up the system timer resolution.
4111 * The more interrupts the better...
4112 */
4113 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4114 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4115 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4116 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4117 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4118 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4119 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4120 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4121 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4122 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4123 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4124 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4125 )
4126 {
4127 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4128 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4129 }
4130#endif
4131
4132 /*
4133 * Find a reasonable update interval and initialize the structure.
4134 */
4135 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4136 while (u32Interval < 10000000 /* 10 ms */)
4137 u32Interval += u32SystemResolution;
4138
4139 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4140
4141 /*
4142 * Create the timer.
4143 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4144 */
4145 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4146 {
4147 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4148 if (rc == VERR_NOT_SUPPORTED)
4149 {
4150 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4151 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4152 }
4153 }
4154 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4155 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4156 if (RT_SUCCESS(rc))
4157 {
4158 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4159 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4160 if (RT_SUCCESS(rc))
4161 {
4162 /*
4163 * We're good.
4164 */
4165 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4166 return VINF_SUCCESS;
4167 }
4168
4169 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4170 }
4171 else
4172 {
4173 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4174 Assert(!pDevExt->pGipTimer);
4175 }
4176 supdrvGipDestroy(pDevExt);
4177 return rc;
4178}
4179
4180
4181/**
4182 * Terminates the GIP.
4183 *
4184 * @param pDevExt Instance data. GIP stuff may be updated.
4185 */
4186static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4187{
4188 int rc;
4189#ifdef DEBUG_DARWIN_GIP
4190 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4191 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4192 pDevExt->pGipTimer, pDevExt->GipMemObj));
4193#endif
4194
4195 /*
4196 * Invalid the GIP data.
4197 */
4198 if (pDevExt->pGip)
4199 {
4200 supdrvGipTerm(pDevExt->pGip);
4201 pDevExt->pGip = NULL;
4202 }
4203
4204 /*
4205 * Destroy the timer and free the GIP memory object.
4206 */
4207 if (pDevExt->pGipTimer)
4208 {
4209 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4210 pDevExt->pGipTimer = NULL;
4211 }
4212
4213 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4214 {
4215 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4216 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4217 }
4218
4219 /*
4220 * Finally, release the system timer resolution request if one succeeded.
4221 */
4222 if (pDevExt->u32SystemTimerGranularityGrant)
4223 {
4224 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4225 pDevExt->u32SystemTimerGranularityGrant = 0;
4226 }
4227}
4228
4229
4230/**
4231 * Timer callback function sync GIP mode.
4232 * @param pTimer The timer.
4233 * @param pvUser The device extension.
4234 */
4235static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4236{
4237 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4238 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4239
4240 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4241
4242 ASMSetFlags(fOldFlags);
4243}
4244
4245
4246/**
4247 * Timer callback function for async GIP mode.
4248 * @param pTimer The timer.
4249 * @param pvUser The device extension.
4250 */
4251static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4252{
4253 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4254 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4255 RTCPUID idCpu = RTMpCpuId();
4256 uint64_t NanoTS = RTTimeSystemNanoTS();
4257
4258 /** @todo reset the transaction number and whatnot when iTick == 1. */
4259 if (pDevExt->idGipMaster == idCpu)
4260 supdrvGipUpdate(pDevExt->pGip, NanoTS);
4261 else
4262 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, ASMGetApicId());
4263
4264 ASMSetFlags(fOldFlags);
4265}
4266
4267
4268/**
4269 * Multiprocessor event notification callback.
4270 *
4271 * This is used to make sue that the GIP master gets passed on to
4272 * another CPU.
4273 *
4274 * @param enmEvent The event.
4275 * @param idCpu The cpu it applies to.
4276 * @param pvUser Pointer to the device extension.
4277 */
4278static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4279{
4280 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4281 if (enmEvent == RTMPEVENT_OFFLINE)
4282 {
4283 RTCPUID idGipMaster;
4284 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4285 if (idGipMaster == idCpu)
4286 {
4287 /*
4288 * Find a new GIP master.
4289 */
4290 bool fIgnored;
4291 unsigned i;
4292 RTCPUID idNewGipMaster = NIL_RTCPUID;
4293 RTCPUSET OnlineCpus;
4294 RTMpGetOnlineSet(&OnlineCpus);
4295
4296 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4297 {
4298 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4299 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4300 && idCurCpu != idGipMaster)
4301 {
4302 idNewGipMaster = idCurCpu;
4303 break;
4304 }
4305 }
4306
4307 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4308 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4309 NOREF(fIgnored);
4310 }
4311 }
4312}
4313
4314
4315/**
4316 * Initializes the GIP data.
4317 *
4318 * @returns IPRT status code.
4319 * @param pDevExt Pointer to the device instance data.
4320 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4321 * @param HCPhys The physical address of the GIP.
4322 * @param u64NanoTS The current nanosecond timestamp.
4323 * @param uUpdateHz The update freqence.
4324 */
4325int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4326{
4327 unsigned i;
4328#ifdef DEBUG_DARWIN_GIP
4329 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4330#else
4331 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4332#endif
4333
4334 /*
4335 * Initialize the structure.
4336 */
4337 memset(pGip, 0, PAGE_SIZE);
4338 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4339 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4340 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4341 pGip->u32UpdateHz = uUpdateHz;
4342 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4343 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4344
4345 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4346 {
4347 pGip->aCPUs[i].u32TransactionId = 2;
4348 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4349 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4350
4351 /*
4352 * We don't know the following values until we've executed updates.
4353 * So, we'll just insert very high values.
4354 */
4355 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4356 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4357 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4358 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4359 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4360 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4361 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4362 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4363 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4364 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4365 }
4366
4367 /*
4368 * Link it to the device extension.
4369 */
4370 pDevExt->pGip = pGip;
4371 pDevExt->HCPhysGip = HCPhys;
4372 pDevExt->cGipUsers = 0;
4373
4374 return VINF_SUCCESS;
4375}
4376
4377
4378/**
4379 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4380 *
4381 * @param idCpu Ignored.
4382 * @param pvUser1 Where to put the TSC.
4383 * @param pvUser2 Ignored.
4384 */
4385static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4386{
4387#if 1
4388 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4389#else
4390 *(uint64_t *)pvUser1 = ASMReadTSC();
4391#endif
4392}
4393
4394
4395/**
4396 * Determine if Async GIP mode is required because of TSC drift.
4397 *
4398 * When using the default/normal timer code it is essential that the time stamp counter
4399 * (TSC) runs never backwards, that is, a read operation to the counter should return
4400 * a bigger value than any previous read operation. This is guaranteed by the latest
4401 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4402 * case we have to choose the asynchronous timer mode.
4403 *
4404 * @param poffMin Pointer to the determined difference between different cores.
4405 * @return false if the time stamp counters appear to be synchron, true otherwise.
4406 */
4407bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4408{
4409 /*
4410 * Just iterate all the cpus 8 times and make sure that the TSC is
4411 * ever increasing. We don't bother taking TSC rollover into account.
4412 */
4413 RTCPUSET CpuSet;
4414 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4415 int iCpu;
4416 int cLoops = 8;
4417 bool fAsync = false;
4418 int rc = VINF_SUCCESS;
4419 uint64_t offMax = 0;
4420 uint64_t offMin = ~(uint64_t)0;
4421 uint64_t PrevTsc = ASMReadTSC();
4422
4423 while (cLoops-- > 0)
4424 {
4425 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4426 {
4427 uint64_t CurTsc;
4428 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4429 if (RT_SUCCESS(rc))
4430 {
4431 if (CurTsc <= PrevTsc)
4432 {
4433 fAsync = true;
4434 offMin = offMax = PrevTsc - CurTsc;
4435 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4436 iCpu, cLoops, CurTsc, PrevTsc));
4437 break;
4438 }
4439
4440 /* Gather statistics (except the first time). */
4441 if (iCpu != 0 || cLoops != 7)
4442 {
4443 uint64_t off = CurTsc - PrevTsc;
4444 if (off < offMin)
4445 offMin = off;
4446 if (off > offMax)
4447 offMax = off;
4448 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4449 }
4450
4451 /* Next */
4452 PrevTsc = CurTsc;
4453 }
4454 else if (rc == VERR_NOT_SUPPORTED)
4455 break;
4456 else
4457 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4458 }
4459
4460 /* broke out of the loop. */
4461 if (iCpu <= iLastCpu)
4462 break;
4463 }
4464
4465 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4466 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4467 fAsync, iLastCpu, rc, offMin, offMax));
4468#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4469 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4470#endif
4471 return fAsync;
4472}
4473
4474
4475/**
4476 * Determin the GIP TSC mode.
4477 *
4478 * @returns The most suitable TSC mode.
4479 * @param pDevExt Pointer to the device instance data.
4480 */
4481static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)
4482{
4483 /*
4484 * On SMP we're faced with two problems:
4485 * (1) There might be a skew between the CPU, so that cpu0
4486 * returns a TSC that is sligtly different from cpu1.
4487 * (2) Power management (and other things) may cause the TSC
4488 * to run at a non-constant speed, and cause the speed
4489 * to be different on the cpus. This will result in (1).
4490 *
4491 * So, on SMP systems we'll have to select the ASYNC update method
4492 * if there are symphoms of these problems.
4493 */
4494 if (RTMpGetCount() > 1)
4495 {
4496 uint32_t uEAX, uEBX, uECX, uEDX;
4497 uint64_t u64DiffCoresIgnored;
4498
4499 /* Permit the user and/or the OS specfic bits to force async mode. */
4500 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
4501 return SUPGIPMODE_ASYNC_TSC;
4502
4503 /* Try check for current differences between the cpus. */
4504 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))
4505 return SUPGIPMODE_ASYNC_TSC;
4506
4507 /*
4508 * If the CPU supports power management and is an AMD one we
4509 * won't trust it unless it has the TscInvariant bit is set.
4510 */
4511 /* Check for "AuthenticAMD" */
4512 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4513 if ( uEAX >= 1
4514 && uEBX == X86_CPUID_VENDOR_AMD_EBX
4515 && uECX == X86_CPUID_VENDOR_AMD_ECX
4516 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
4517 {
4518 /* Check for APM support and that TscInvariant is cleared. */
4519 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4520 if (uEAX >= 0x80000007)
4521 {
4522 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4523 if ( !(uEDX & RT_BIT(8))/* TscInvariant */
4524 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4525 return SUPGIPMODE_ASYNC_TSC;
4526 }
4527 }
4528 }
4529 return SUPGIPMODE_SYNC_TSC;
4530}
4531
4532
4533/**
4534 * Invalidates the GIP data upon termination.
4535 *
4536 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4537 */
4538void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4539{
4540 unsigned i;
4541 pGip->u32Magic = 0;
4542 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4543 {
4544 pGip->aCPUs[i].u64NanoTS = 0;
4545 pGip->aCPUs[i].u64TSC = 0;
4546 pGip->aCPUs[i].iTSCHistoryHead = 0;
4547 }
4548}
4549
4550
4551/**
4552 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4553 * updates all the per cpu data except the transaction id.
4554 *
4555 * @param pGip The GIP.
4556 * @param pGipCpu Pointer to the per cpu data.
4557 * @param u64NanoTS The current time stamp.
4558 */
4559static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4560{
4561 uint64_t u64TSC;
4562 uint64_t u64TSCDelta;
4563 uint32_t u32UpdateIntervalTSC;
4564 uint32_t u32UpdateIntervalTSCSlack;
4565 unsigned iTSCHistoryHead;
4566 uint64_t u64CpuHz;
4567
4568 /*
4569 * Update the NanoTS.
4570 */
4571 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4572
4573 /*
4574 * Calc TSC delta.
4575 */
4576 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4577 u64TSC = ASMReadTSC();
4578 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4579 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4580
4581 if (u64TSCDelta >> 32)
4582 {
4583 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4584 pGipCpu->cErrors++;
4585 }
4586
4587 /*
4588 * TSC History.
4589 */
4590 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4591
4592 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4593 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4594 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4595
4596 /*
4597 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4598 */
4599 if (pGip->u32UpdateHz >= 1000)
4600 {
4601 uint32_t u32;
4602 u32 = pGipCpu->au32TSCHistory[0];
4603 u32 += pGipCpu->au32TSCHistory[1];
4604 u32 += pGipCpu->au32TSCHistory[2];
4605 u32 += pGipCpu->au32TSCHistory[3];
4606 u32 >>= 2;
4607 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4608 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4609 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4610 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4611 u32UpdateIntervalTSC >>= 2;
4612 u32UpdateIntervalTSC += u32;
4613 u32UpdateIntervalTSC >>= 1;
4614
4615 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4616 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4617 }
4618 else if (pGip->u32UpdateHz >= 90)
4619 {
4620 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4621 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4622 u32UpdateIntervalTSC >>= 1;
4623
4624 /* value choosen on a 2GHz thinkpad running windows */
4625 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4626 }
4627 else
4628 {
4629 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4630
4631 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4632 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4633 }
4634 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4635
4636 /*
4637 * CpuHz.
4638 */
4639 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4640 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4641}
4642
4643
4644/**
4645 * Updates the GIP.
4646 *
4647 * @param pGip Pointer to the GIP.
4648 * @param u64NanoTS The current nanosecond timesamp.
4649 */
4650void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4651{
4652 /*
4653 * Determin the relevant CPU data.
4654 */
4655 PSUPGIPCPU pGipCpu;
4656 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4657 pGipCpu = &pGip->aCPUs[0];
4658 else
4659 {
4660 unsigned iCpu = ASMGetApicId();
4661 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4662 return;
4663 pGipCpu = &pGip->aCPUs[iCpu];
4664 }
4665
4666 /*
4667 * Start update transaction.
4668 */
4669 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4670 {
4671 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4672 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4673 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4674 pGipCpu->cErrors++;
4675 return;
4676 }
4677
4678 /*
4679 * Recalc the update frequency every 0x800th time.
4680 */
4681 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4682 {
4683 if (pGip->u64NanoTSLastUpdateHz)
4684 {
4685#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4686 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4687 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4688 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4689 {
4690 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4691 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4692 }
4693#endif
4694 }
4695 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4696 }
4697
4698 /*
4699 * Update the data.
4700 */
4701 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4702
4703 /*
4704 * Complete transaction.
4705 */
4706 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4707}
4708
4709
4710/**
4711 * Updates the per cpu GIP data for the calling cpu.
4712 *
4713 * @param pGip Pointer to the GIP.
4714 * @param u64NanoTS The current nanosecond timesamp.
4715 * @param iCpu The CPU index.
4716 */
4717void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4718{
4719 PSUPGIPCPU pGipCpu;
4720
4721 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4722 {
4723 pGipCpu = &pGip->aCPUs[iCpu];
4724
4725 /*
4726 * Start update transaction.
4727 */
4728 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4729 {
4730 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4731 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4732 pGipCpu->cErrors++;
4733 return;
4734 }
4735
4736 /*
4737 * Update the data.
4738 */
4739 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4740
4741 /*
4742 * Complete transaction.
4743 */
4744 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4745 }
4746}
4747
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette