VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 19564

Last change on this file since 19564 was 19454, checked in by vboxsync, 16 years ago

VMM++: More on poking. Fixed broken R0 stats (wrong way of calling into VMMR0), use NIL_VMCPUID instead of 0 to VMMR0EntryEx when it is supposed to be irrellevant. Use VMCPUID. Allow for and check NIL_VMCPUID. Fixed a few missing/wrong idCpu checks (paranoia mostly).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 188.7 KB
Line 
1/* $Revision: 19454 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/semaphore.h>
41#include <iprt/spinlock.h>
42#include <iprt/thread.h>
43#include <iprt/process.h>
44#include <iprt/mp.h>
45#include <iprt/power.h>
46#include <iprt/cpuset.h>
47#include <iprt/uuid.h>
48#include <VBox/param.h>
49#include <VBox/log.h>
50#include <VBox/err.h>
51#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
52# include <iprt/crc32.h>
53# include <iprt/net.h>
54# include <iprt/string.h>
55#endif
56/* VBox/x86.h not compatible with the Linux kernel sources */
57#ifdef RT_OS_LINUX
58# define X86_CPUID_VENDOR_AMD_EBX 0x68747541
59# define X86_CPUID_VENDOR_AMD_ECX 0x444d4163
60# define X86_CPUID_VENDOR_AMD_EDX 0x69746e65
61#else
62# include <VBox/x86.h>
63#endif
64
65/*
66 * Logging assignments:
67 * Log - useful stuff, like failures.
68 * LogFlow - program flow, except the really noisy bits.
69 * Log2 - Cleanup.
70 * Log3 - Loader flow noise.
71 * Log4 - Call VMMR0 flow noise.
72 * Log5 - Native yet-to-be-defined noise.
73 * Log6 - Native ioctl flow noise.
74 *
75 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
76 * instanciation in log-vbox.c(pp).
77 */
78
79
80/*******************************************************************************
81* Defined Constants And Macros *
82*******************************************************************************/
83/* from x86.h - clashes with linux thus this duplication */
84#undef X86_CR0_PG
85#define X86_CR0_PG RT_BIT(31)
86#undef X86_CR0_PE
87#define X86_CR0_PE RT_BIT(0)
88#undef X86_CPUID_AMD_FEATURE_EDX_NX
89#define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)
90#undef MSR_K6_EFER
91#define MSR_K6_EFER 0xc0000080
92#undef MSR_K6_EFER_NXE
93#define MSR_K6_EFER_NXE RT_BIT(11)
94#undef MSR_K6_EFER_LMA
95#define MSR_K6_EFER_LMA RT_BIT(10)
96#undef X86_CR4_PGE
97#define X86_CR4_PGE RT_BIT(7)
98#undef X86_CR4_PAE
99#define X86_CR4_PAE RT_BIT(5)
100#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
101#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)
102
103
104/** The frequency by which we recalculate the u32UpdateHz and
105 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
106#define GIP_UPDATEHZ_RECALC_FREQ 0x800
107
108/**
109 * Validates a session pointer.
110 *
111 * @returns true/false accordingly.
112 * @param pSession The session.
113 */
114#define SUP_IS_SESSION_VALID(pSession) \
115 ( VALID_PTR(pSession) \
116 && pSession->u32Cookie == BIRD_INV)
117
118/** @def VBOX_SVN_REV
119 * The makefile should define this if it can. */
120#ifndef VBOX_SVN_REV
121# define VBOX_SVN_REV 0
122#endif
123
124/*******************************************************************************
125* Internal Functions *
126*******************************************************************************/
127static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
128static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
129static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
130static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
131static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
132static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
133static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
134static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
135static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
136static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
137static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
138static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
139static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq);
140static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);
141#ifdef RT_OS_WINDOWS
142static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
143static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
144#endif /* RT_OS_WINDOWS */
145static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
146static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
147static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
148static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
149static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
150
151#ifdef RT_WITH_W64_UNWIND_HACK
152DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, VMCPUID idCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
153DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, VMCPUID idCpu, unsigned uOperation);
154DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
155DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
156DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
157DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
158DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
159
160DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
161DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
162DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
163DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
164DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
165DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRefEx)(void *pvObj, PSUPDRVSESSION pSession, bool fNoPreempt);
166DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
167DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
168DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
169DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
170DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
171DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
172DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
173DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
174DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
175DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
176DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
177DECLASM(int) UNWIND_WRAP(SUPR0PageAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages);
178DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
179//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
180DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
181DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
182DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
183DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
184DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
185DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
186DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
187DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
188DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
189DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
190DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
191DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
192DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
193DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
194DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
195DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
196DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
197/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
198/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
199/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
200/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
201/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
202DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
203/* RTProcSelf - not necessary */
204/* RTR0ProcHandleSelf - not necessary */
205DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
206DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
207DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
208DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
209DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
210DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
211DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
212DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
213DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
214DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
215DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
216DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
217DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
218DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
219DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
220DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
221DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
222DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
223DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
224DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
225DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
226/* RTTimeNanoTS - not necessary */
227/* RTTimeMilliTS - not necessary */
228/* RTTimeSystemNanoTS - not necessary */
229/* RTTimeSystemMilliTS - not necessary */
230/* RTThreadNativeSelf - not necessary */
231DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
232DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
233#if 0
234/* RTThreadSelf - not necessary */
235DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
236 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
237DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
238DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
239DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
240DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
241DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
242DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
243DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
244DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
245DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
246DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
247#endif
248/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
249/* RTMpCpuId - not necessary */
250/* RTMpCpuIdFromSetIndex - not necessary */
251/* RTMpCpuIdToSetIndex - not necessary */
252/* RTMpIsCpuPossible - not necessary */
253/* RTMpGetCount - not necessary */
254/* RTMpGetMaxCpuId - not necessary */
255/* RTMpGetOnlineCount - not necessary */
256/* RTMpGetOnlineSet - not necessary */
257/* RTMpGetSet - not necessary */
258/* RTMpIsCpuOnline - not necessary */
259DECLASM(int) UNWIND_WRAP(RTMpIsCpuWorkPending)(void);
260DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
261DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
262DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
263DECLASM(int) UNWIND_WRAP(RTMpPokeCpu)(RTCPUID idCpu);
264/* RTLogRelDefaultInstance - not necessary. */
265DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
266/* RTLogLogger - can't wrap this buster. */
267/* RTLogLoggerEx - can't wrap this buster. */
268DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
269/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
270DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
271DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
272/* AssertMsg2 - can't wrap this buster. */
273#endif /* RT_WITH_W64_UNWIND_HACK */
274
275
276/*******************************************************************************
277* Global Variables *
278*******************************************************************************/
279/**
280 * Array of the R0 SUP API.
281 */
282static SUPFUNC g_aFunctions[] =
283{
284 /* name function */
285 /* Entries with absolute addresses determined at runtime, fixup
286 code makes ugly ASSUMPTIONS about the order here: */
287 { "SUPR0AbsIs64bit", (void *)0 },
288 { "SUPR0Abs64bitKernelCS", (void *)0 },
289 { "SUPR0Abs64bitKernelSS", (void *)0 },
290 { "SUPR0Abs64bitKernelDS", (void *)0 },
291 { "SUPR0AbsKernelCS", (void *)0 },
292 { "SUPR0AbsKernelSS", (void *)0 },
293 { "SUPR0AbsKernelDS", (void *)0 },
294 { "SUPR0AbsKernelES", (void *)0 },
295 { "SUPR0AbsKernelFS", (void *)0 },
296 { "SUPR0AbsKernelGS", (void *)0 },
297 /* Normal function pointers: */
298 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
299 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
300 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
301 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
302 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
303 { "SUPR0ObjAddRefEx", (void *)UNWIND_WRAP(SUPR0ObjAddRefEx) },
304 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
305 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
306 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
307 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
308 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
309 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
310 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
311 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
312 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
313 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
314 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
315 { "SUPR0PageAlloc", (void *)UNWIND_WRAP(SUPR0PageAlloc) },
316 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
317 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
318 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
319 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
320 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
321 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
322 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
323 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
324 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
325 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
326 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
327 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
328 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
329 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
330 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
331 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
332 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
333 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
334 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
335 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
336 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
337 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
338 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
339 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
340 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
341 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
342/* These don't work yet on linux - use fast mutexes!
343 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
344 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
345 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
346 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
347*/
348 { "RTProcSelf", (void *)RTProcSelf },
349 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
350 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
351 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
352 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
353 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
354 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
355 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
356 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
357 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
358 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
359 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
360 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
361 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
362 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
363 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
364 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
365 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
366 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
367 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
368 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
369 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
370 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
371 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
372 { "RTTimeMillieTS", (void *)RTTimeMilliTS },
373 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
374 { "RTTimeSystemMillieTS", (void *)RTTimeSystemMilliTS },
375 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
376 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
377 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
378#if 0 /* Thread APIs, Part 2. */
379 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
380 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
381 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
382 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
383 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
384 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
385 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
386 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
387 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
388 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
389 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
390 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
391#endif
392 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
393 { "RTMpCpuId", (void *)RTMpCpuId },
394 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
395 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
396 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
397 { "RTMpGetCount", (void *)RTMpGetCount },
398 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
399 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
400 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
401 { "RTMpGetSet", (void *)RTMpGetSet },
402 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
403 { "RTMpIsCpuWorkPending", (void *)UNWIND_WRAP(RTMpIsCpuWorkPending) },
404 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
405 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
406 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
407 { "RTMpPokeCpu", (void *)UNWIND_WRAP(RTMpPokeCpu) },
408 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
409 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
410 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
411 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
412 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
413 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
414 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
415 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
416 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
417 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
418 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
419#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
420 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
421#endif
422#if defined(RT_OS_DARWIN)
423 { "RTAssertMsg1", (void *)RTAssertMsg1 },
424 { "RTAssertMsg2", (void *)RTAssertMsg2 },
425 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
426#endif
427};
428
429#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
430/**
431 * Drag in the rest of IRPT since we share it with the
432 * rest of the kernel modules on darwin.
433 */
434PFNRT g_apfnVBoxDrvIPRTDeps[] =
435{
436 (PFNRT)RTCrc32,
437 (PFNRT)RTErrConvertFromErrno,
438 (PFNRT)RTNetIPv4IsHdrValid,
439 (PFNRT)RTNetIPv4TCPChecksum,
440 (PFNRT)RTNetIPv4UDPChecksum,
441 (PFNRT)RTUuidCompare,
442 (PFNRT)RTUuidCompareStr,
443 (PFNRT)RTUuidFromStr,
444 (PFNRT)RTStrDup,
445 (PFNRT)RTStrFree,
446 NULL
447};
448#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
449
450
451/**
452 * Initializes the device extentsion structure.
453 *
454 * @returns IPRT status code.
455 * @param pDevExt The device extension to initialize.
456 */
457int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
458{
459 int rc;
460
461#ifdef SUPDRV_WITH_RELEASE_LOGGER
462 /*
463 * Create the release log.
464 */
465 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
466 PRTLOGGER pRelLogger;
467 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
468 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
469 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
470 if (RT_SUCCESS(rc))
471 RTLogRelSetDefaultInstance(pRelLogger);
472#endif
473
474 /*
475 * Initialize it.
476 */
477 memset(pDevExt, 0, sizeof(*pDevExt));
478 rc = RTSpinlockCreate(&pDevExt->Spinlock);
479 if (!rc)
480 {
481 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
482 if (!rc)
483 {
484 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
485 if (!rc)
486 {
487 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
488 if (!rc)
489 {
490 rc = supdrvGipCreate(pDevExt);
491 if (RT_SUCCESS(rc))
492 {
493 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
494
495 /*
496 * Fixup the absolute symbols.
497 *
498 * Because of the table indexing assumptions we'll have a little #ifdef orgy
499 * here rather than distributing this to OS specific files. At least for now.
500 */
501#ifdef RT_OS_DARWIN
502# if ARCH_BITS == 32
503 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
504 {
505 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
506 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
507 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
508 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
509 }
510 else
511 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
512 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
513 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
514 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
515 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
516 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
517 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
518# else /* 64-bit darwin: */
519 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
520 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
521 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
522 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
523 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
524 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
525 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
526 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
527 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
528 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
529
530# endif
531#else /* !RT_OS_DARWIN */
532# if ARCH_BITS == 64
533 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
534 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
535 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
536 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
537# else
538 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
539# endif
540 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
541 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
542 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
543 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
544 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
545 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
546#endif /* !RT_OS_DARWIN */
547 return VINF_SUCCESS;
548 }
549
550 RTSemFastMutexDestroy(pDevExt->mtxGip);
551 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
552 }
553 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
554 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
555 }
556 RTSemFastMutexDestroy(pDevExt->mtxLdr);
557 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
558 }
559 RTSpinlockDestroy(pDevExt->Spinlock);
560 pDevExt->Spinlock = NIL_RTSPINLOCK;
561 }
562#ifdef SUPDRV_WITH_RELEASE_LOGGER
563 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
564 RTLogDestroy(RTLogSetDefaultInstance(NULL));
565#endif
566
567 return rc;
568}
569
570
571/**
572 * Delete the device extension (e.g. cleanup members).
573 *
574 * @param pDevExt The device extension to delete.
575 */
576void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
577{
578 PSUPDRVOBJ pObj;
579 PSUPDRVUSAGE pUsage;
580
581 /*
582 * Kill mutexes and spinlocks.
583 */
584 RTSemFastMutexDestroy(pDevExt->mtxGip);
585 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
586 RTSemFastMutexDestroy(pDevExt->mtxLdr);
587 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
588 RTSpinlockDestroy(pDevExt->Spinlock);
589 pDevExt->Spinlock = NIL_RTSPINLOCK;
590 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
591 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
592
593 /*
594 * Free lists.
595 */
596 /* objects. */
597 pObj = pDevExt->pObjs;
598#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
599 Assert(!pObj); /* (can trigger on forced unloads) */
600#endif
601 pDevExt->pObjs = NULL;
602 while (pObj)
603 {
604 void *pvFree = pObj;
605 pObj = pObj->pNext;
606 RTMemFree(pvFree);
607 }
608
609 /* usage records. */
610 pUsage = pDevExt->pUsageFree;
611 pDevExt->pUsageFree = NULL;
612 while (pUsage)
613 {
614 void *pvFree = pUsage;
615 pUsage = pUsage->pNext;
616 RTMemFree(pvFree);
617 }
618
619 /* kill the GIP. */
620 supdrvGipDestroy(pDevExt);
621
622#ifdef SUPDRV_WITH_RELEASE_LOGGER
623 /* destroy the loggers. */
624 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
625 RTLogDestroy(RTLogSetDefaultInstance(NULL));
626#endif
627}
628
629
630/**
631 * Create session.
632 *
633 * @returns IPRT status code.
634 * @param pDevExt Device extension.
635 * @param fUser Flag indicating whether this is a user or kernel session.
636 * @param ppSession Where to store the pointer to the session data.
637 */
638int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
639{
640 /*
641 * Allocate memory for the session data.
642 */
643 int rc = VERR_NO_MEMORY;
644 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
645 if (pSession)
646 {
647 /* Initialize session data. */
648 rc = RTSpinlockCreate(&pSession->Spinlock);
649 if (!rc)
650 {
651 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
652 pSession->pDevExt = pDevExt;
653 pSession->u32Cookie = BIRD_INV;
654 /*pSession->pLdrUsage = NULL;
655 pSession->pVM = NULL;
656 pSession->pUsage = NULL;
657 pSession->pGip = NULL;
658 pSession->fGipReferenced = false;
659 pSession->Bundle.cUsed = 0; */
660 pSession->Uid = NIL_RTUID;
661 pSession->Gid = NIL_RTGID;
662 if (fUser)
663 {
664 pSession->Process = RTProcSelf();
665 pSession->R0Process = RTR0ProcHandleSelf();
666 }
667 else
668 {
669 pSession->Process = NIL_RTPROCESS;
670 pSession->R0Process = NIL_RTR0PROCESS;
671 }
672
673 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
674 return VINF_SUCCESS;
675 }
676
677 RTMemFree(pSession);
678 *ppSession = NULL;
679 Log(("Failed to create spinlock, rc=%d!\n", rc));
680 }
681
682 return rc;
683}
684
685
686/**
687 * Shared code for cleaning up a session.
688 *
689 * @param pDevExt Device extension.
690 * @param pSession Session data.
691 * This data will be freed by this routine.
692 */
693void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
694{
695 /*
696 * Cleanup the session first.
697 */
698 supdrvCleanupSession(pDevExt, pSession);
699
700 /*
701 * Free the rest of the session stuff.
702 */
703 RTSpinlockDestroy(pSession->Spinlock);
704 pSession->Spinlock = NIL_RTSPINLOCK;
705 pSession->pDevExt = NULL;
706 RTMemFree(pSession);
707 LogFlow(("supdrvCloseSession: returns\n"));
708}
709
710
711/**
712 * Shared code for cleaning up a session (but not quite freeing it).
713 *
714 * This is primarily intended for MAC OS X where we have to clean up the memory
715 * stuff before the file handle is closed.
716 *
717 * @param pDevExt Device extension.
718 * @param pSession Session data.
719 * This data will be freed by this routine.
720 */
721void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
722{
723 PSUPDRVBUNDLE pBundle;
724 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
725
726 /*
727 * Remove logger instances related to this session.
728 */
729 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
730
731 /*
732 * Release object references made in this session.
733 * In theory there should be noone racing us in this session.
734 */
735 Log2(("release objects - start\n"));
736 if (pSession->pUsage)
737 {
738 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
739 PSUPDRVUSAGE pUsage;
740 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
741
742 while ((pUsage = pSession->pUsage) != NULL)
743 {
744 PSUPDRVOBJ pObj = pUsage->pObj;
745 pSession->pUsage = pUsage->pNext;
746
747 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
748 if (pUsage->cUsage < pObj->cUsage)
749 {
750 pObj->cUsage -= pUsage->cUsage;
751 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
752 }
753 else
754 {
755 /* Destroy the object and free the record. */
756 if (pDevExt->pObjs == pObj)
757 pDevExt->pObjs = pObj->pNext;
758 else
759 {
760 PSUPDRVOBJ pObjPrev;
761 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
762 if (pObjPrev->pNext == pObj)
763 {
764 pObjPrev->pNext = pObj->pNext;
765 break;
766 }
767 Assert(pObjPrev);
768 }
769 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
770
771 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
772 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
773 if (pObj->pfnDestructor)
774#ifdef RT_WITH_W64_UNWIND_HACK
775 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
776#else
777 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
778#endif
779 RTMemFree(pObj);
780 }
781
782 /* free it and continue. */
783 RTMemFree(pUsage);
784
785 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
786 }
787
788 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
789 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
790 }
791 Log2(("release objects - done\n"));
792
793 /*
794 * Release memory allocated in the session.
795 *
796 * We do not serialize this as we assume that the application will
797 * not allocated memory while closing the file handle object.
798 */
799 Log2(("freeing memory:\n"));
800 pBundle = &pSession->Bundle;
801 while (pBundle)
802 {
803 PSUPDRVBUNDLE pToFree;
804 unsigned i;
805
806 /*
807 * Check and unlock all entries in the bundle.
808 */
809 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
810 {
811 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
812 {
813 int rc;
814 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
815 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
816 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
817 {
818 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
819 AssertRC(rc); /** @todo figure out how to handle this. */
820 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
821 }
822 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
823 AssertRC(rc); /** @todo figure out how to handle this. */
824 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
825 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
826 }
827 }
828
829 /*
830 * Advance and free previous bundle.
831 */
832 pToFree = pBundle;
833 pBundle = pBundle->pNext;
834
835 pToFree->pNext = NULL;
836 pToFree->cUsed = 0;
837 if (pToFree != &pSession->Bundle)
838 RTMemFree(pToFree);
839 }
840 Log2(("freeing memory - done\n"));
841
842 /*
843 * Deregister component factories.
844 */
845 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
846 Log2(("deregistering component factories:\n"));
847 if (pDevExt->pComponentFactoryHead)
848 {
849 PSUPDRVFACTORYREG pPrev = NULL;
850 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
851 while (pCur)
852 {
853 if (pCur->pSession == pSession)
854 {
855 /* unlink it */
856 PSUPDRVFACTORYREG pNext = pCur->pNext;
857 if (pPrev)
858 pPrev->pNext = pNext;
859 else
860 pDevExt->pComponentFactoryHead = pNext;
861
862 /* free it */
863 pCur->pNext = NULL;
864 pCur->pSession = NULL;
865 pCur->pFactory = NULL;
866 RTMemFree(pCur);
867
868 /* next */
869 pCur = pNext;
870 }
871 else
872 {
873 /* next */
874 pPrev = pCur;
875 pCur = pCur->pNext;
876 }
877 }
878 }
879 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
880 Log2(("deregistering component factories - done\n"));
881
882 /*
883 * Loaded images needs to be dereferenced and possibly freed up.
884 */
885 RTSemFastMutexRequest(pDevExt->mtxLdr);
886 Log2(("freeing images:\n"));
887 if (pSession->pLdrUsage)
888 {
889 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
890 pSession->pLdrUsage = NULL;
891 while (pUsage)
892 {
893 void *pvFree = pUsage;
894 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
895 if (pImage->cUsage > pUsage->cUsage)
896 pImage->cUsage -= pUsage->cUsage;
897 else
898 supdrvLdrFree(pDevExt, pImage);
899 pUsage->pImage = NULL;
900 pUsage = pUsage->pNext;
901 RTMemFree(pvFree);
902 }
903 }
904 RTSemFastMutexRelease(pDevExt->mtxLdr);
905 Log2(("freeing images - done\n"));
906
907 /*
908 * Unmap the GIP.
909 */
910 Log2(("umapping GIP:\n"));
911 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
912 {
913 SUPR0GipUnmap(pSession);
914 pSession->fGipReferenced = 0;
915 }
916 Log2(("umapping GIP - done\n"));
917}
918
919
920/**
921 * Fast path I/O Control worker.
922 *
923 * @returns VBox status code that should be passed down to ring-3 unchanged.
924 * @param uIOCtl Function number.
925 * @param idCpu VMCPU id.
926 * @param pDevExt Device extention.
927 * @param pSession Session data.
928 */
929int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
930{
931 /*
932 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
933 */
934 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
935 {
936 switch (uIOCtl)
937 {
938 case SUP_IOCTL_FAST_DO_RAW_RUN:
939#ifdef RT_WITH_W64_UNWIND_HACK
940 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
941#else
942 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
943#endif
944 break;
945 case SUP_IOCTL_FAST_DO_HWACC_RUN:
946#ifdef RT_WITH_W64_UNWIND_HACK
947 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
948#else
949 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
950#endif
951 break;
952 case SUP_IOCTL_FAST_DO_NOP:
953#ifdef RT_WITH_W64_UNWIND_HACK
954 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
955#else
956 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
957#endif
958 break;
959 default:
960 return VERR_INTERNAL_ERROR;
961 }
962 return VINF_SUCCESS;
963 }
964 return VERR_INTERNAL_ERROR;
965}
966
967
968/**
969 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
970 * We would use strpbrk here if this function would be contained in the RedHat kABI white
971 * list, see http://www.kerneldrivers.org/RHEL5.
972 *
973 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
974 * @param pszStr String to check
975 * @param pszChars Character set
976 */
977static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
978{
979 int chCur;
980 while ((chCur = *pszStr++) != '\0')
981 {
982 int ch;
983 const char *psz = pszChars;
984 while ((ch = *psz++) != '\0')
985 if (ch == chCur)
986 return 1;
987
988 }
989 return 0;
990}
991
992
993/**
994 * I/O Control worker.
995 *
996 * @returns 0 on success.
997 * @returns VERR_INVALID_PARAMETER if the request is invalid.
998 *
999 * @param uIOCtl Function number.
1000 * @param pDevExt Device extention.
1001 * @param pSession Session data.
1002 * @param pReqHdr The request header.
1003 */
1004int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1005{
1006 /*
1007 * Validate the request.
1008 */
1009 /* this first check could probably be omitted as its also done by the OS specific code... */
1010 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
1011 || pReqHdr->cbIn < sizeof(*pReqHdr)
1012 || pReqHdr->cbOut < sizeof(*pReqHdr)))
1013 {
1014 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
1015 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
1016 return VERR_INVALID_PARAMETER;
1017 }
1018 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
1019 {
1020 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
1021 {
1022 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
1023 return VERR_INVALID_PARAMETER;
1024 }
1025 }
1026 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
1027 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
1028 {
1029 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
1030 return VERR_INVALID_PARAMETER;
1031 }
1032
1033/*
1034 * Validation macros
1035 */
1036#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1037 do { \
1038 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1039 { \
1040 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1041 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1042 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1043 } \
1044 } while (0)
1045
1046#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1047
1048#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1049 do { \
1050 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1051 { \
1052 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1053 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1054 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1055 } \
1056 } while (0)
1057
1058#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1059 do { \
1060 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1061 { \
1062 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1063 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1064 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1065 } \
1066 } while (0)
1067
1068#define REQ_CHECK_EXPR(Name, expr) \
1069 do { \
1070 if (RT_UNLIKELY(!(expr))) \
1071 { \
1072 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1073 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1074 } \
1075 } while (0)
1076
1077#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1078 do { \
1079 if (RT_UNLIKELY(!(expr))) \
1080 { \
1081 OSDBGPRINT( fmt ); \
1082 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1083 } \
1084 } while (0)
1085
1086
1087 /*
1088 * The switch.
1089 */
1090 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1091 {
1092 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1093 {
1094 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1095 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1096 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1097 {
1098 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1099 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1100 return 0;
1101 }
1102
1103#if 0
1104 /*
1105 * Call out to the OS specific code and let it do permission checks on the
1106 * client process.
1107 */
1108 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1109 {
1110 pReq->u.Out.u32Cookie = 0xffffffff;
1111 pReq->u.Out.u32SessionCookie = 0xffffffff;
1112 pReq->u.Out.u32SessionVersion = 0xffffffff;
1113 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1114 pReq->u.Out.pSession = NULL;
1115 pReq->u.Out.cFunctions = 0;
1116 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1117 return 0;
1118 }
1119#endif
1120
1121 /*
1122 * Match the version.
1123 * The current logic is very simple, match the major interface version.
1124 */
1125 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1126 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1127 {
1128 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1129 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1130 pReq->u.Out.u32Cookie = 0xffffffff;
1131 pReq->u.Out.u32SessionCookie = 0xffffffff;
1132 pReq->u.Out.u32SessionVersion = 0xffffffff;
1133 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1134 pReq->u.Out.pSession = NULL;
1135 pReq->u.Out.cFunctions = 0;
1136 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1137 return 0;
1138 }
1139
1140 /*
1141 * Fill in return data and be gone.
1142 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1143 * u32SessionVersion <= u32ReqVersion!
1144 */
1145 /** @todo Somehow validate the client and negotiate a secure cookie... */
1146 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1147 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1148 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1149 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1150 pReq->u.Out.pSession = pSession;
1151 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1152 pReq->Hdr.rc = VINF_SUCCESS;
1153 return 0;
1154 }
1155
1156 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1157 {
1158 /* validate */
1159 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1160 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1161
1162 /* execute */
1163 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1164 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1165 pReq->Hdr.rc = VINF_SUCCESS;
1166 return 0;
1167 }
1168
1169 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
1170 {
1171 /* validate */
1172 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
1173 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
1174
1175 /* execute */
1176 pReq->u.Out.u8Idt = 3;
1177 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1178 return 0;
1179 }
1180
1181 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
1182 {
1183 /* validate */
1184 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
1185 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
1186
1187 /* execute */
1188 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1189 return 0;
1190 }
1191
1192 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1193 {
1194 /* validate */
1195 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1196 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1197 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1198 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1199 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1200
1201 /* execute */
1202 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1203 if (RT_FAILURE(pReq->Hdr.rc))
1204 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1205 return 0;
1206 }
1207
1208 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1209 {
1210 /* validate */
1211 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1212 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1213
1214 /* execute */
1215 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1216 return 0;
1217 }
1218
1219 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1220 {
1221 /* validate */
1222 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1223 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1224
1225 /* execute */
1226 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1227 if (RT_FAILURE(pReq->Hdr.rc))
1228 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1229 return 0;
1230 }
1231
1232 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1233 {
1234 /* validate */
1235 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1236 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1237
1238 /* execute */
1239 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1240 return 0;
1241 }
1242
1243 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1244 {
1245 /* validate */
1246 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1247 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1248 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1249 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1250 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1251 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1252 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1253
1254 /* execute */
1255 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1256 return 0;
1257 }
1258
1259 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1260 {
1261 /* validate */
1262 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1263 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1264 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1265 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1266 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1267 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1268 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1269 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1270 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1271 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1272 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1273 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1274 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1275 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1276 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1277
1278 if (pReq->u.In.cSymbols)
1279 {
1280 uint32_t i;
1281 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1282 for (i = 0; i < pReq->u.In.cSymbols; i++)
1283 {
1284 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1285 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1286 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1287 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1288 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1289 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1290 }
1291 }
1292
1293 /* execute */
1294 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1295 return 0;
1296 }
1297
1298 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1299 {
1300 /* validate */
1301 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1302 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1303
1304 /* execute */
1305 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1306 return 0;
1307 }
1308
1309 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1310 {
1311 /* validate */
1312 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1313 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1314 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1315
1316 /* execute */
1317 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1318 return 0;
1319 }
1320
1321 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1322 {
1323 /* validate */
1324 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1325 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1326 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1327
1328 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1329 {
1330 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1331
1332 /* execute */
1333 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1334#ifdef RT_WITH_W64_UNWIND_HACK
1335 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1336#else
1337 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1338#endif
1339 else
1340 pReq->Hdr.rc = VERR_WRONG_ORDER;
1341 }
1342 else
1343 {
1344 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1345 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1346 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1347 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1348 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1349
1350 /* execute */
1351 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1352#ifdef RT_WITH_W64_UNWIND_HACK
1353 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1354#else
1355 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1356#endif
1357 else
1358 pReq->Hdr.rc = VERR_WRONG_ORDER;
1359 }
1360
1361 if ( RT_FAILURE(pReq->Hdr.rc)
1362 && pReq->Hdr.rc != VERR_INTERRUPTED
1363 && pReq->Hdr.rc != VERR_TIMEOUT)
1364 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1365 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1366 else
1367 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1368 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1369 return 0;
1370 }
1371
1372 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1373 {
1374 /* validate */
1375 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1376 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1377
1378 /* execute */
1379 pReq->Hdr.rc = VINF_SUCCESS;
1380 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1381 return 0;
1382 }
1383
1384 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1385 {
1386 /* validate */
1387 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1388 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1389 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1390
1391 /* execute */
1392 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1393 if (RT_FAILURE(pReq->Hdr.rc))
1394 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1395 return 0;
1396 }
1397
1398 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1399 {
1400 /* validate */
1401 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1402 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1403
1404 /* execute */
1405 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1406 return 0;
1407 }
1408
1409 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1410 {
1411 /* validate */
1412 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1413 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1414
1415 /* execute */
1416 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1417 if (RT_SUCCESS(pReq->Hdr.rc))
1418 pReq->u.Out.pGipR0 = pDevExt->pGip;
1419 return 0;
1420 }
1421
1422 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1423 {
1424 /* validate */
1425 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1426 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1427
1428 /* execute */
1429 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1430 return 0;
1431 }
1432
1433 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1434 {
1435 /* validate */
1436 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1437 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1438 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1439 || ( VALID_PTR(pReq->u.In.pVMR0)
1440 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1441 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1442 /* execute */
1443 pSession->pVM = pReq->u.In.pVMR0;
1444 pReq->Hdr.rc = VINF_SUCCESS;
1445 return 0;
1446 }
1447
1448 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1449 {
1450 /* validate */
1451 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1452 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1453 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1454
1455 /* execute */
1456 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1457 if (RT_FAILURE(pReq->Hdr.rc))
1458 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1459 return 0;
1460 }
1461
1462 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1463 {
1464 /* validate */
1465 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1466 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1467 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1468 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1469 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1470 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1471 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1472 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1473 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1474
1475 /* execute */
1476 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1477 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1478 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1479 &pReq->u.Out.aPages[0]);
1480 if (RT_FAILURE(pReq->Hdr.rc))
1481 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1482 return 0;
1483 }
1484
1485 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1486 {
1487 /* validate */
1488 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1489 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1490 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1491 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1492 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1493 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1494
1495 /* execute */
1496 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1497 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1498 if (RT_FAILURE(pReq->Hdr.rc))
1499 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1500 return 0;
1501 }
1502
1503 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1504 {
1505 /* validate */
1506 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1507 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1508
1509 /* execute */
1510 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1511 return 0;
1512 }
1513
1514 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1515 {
1516 /* validate */
1517 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1518 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1519 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1520
1521 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1522 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1523 else
1524 {
1525 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1526 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1527 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1528 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1529 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1530 }
1531 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1532
1533 /* execute */
1534 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1535 return 0;
1536 }
1537
1538 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS(0)):
1539 {
1540 /* validate */
1541 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
1542 size_t cbStrTab;
1543 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
1544 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
1545 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
1546 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
1547 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
1548 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
1549 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
1550 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
1551 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
1552 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
1553 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
1554
1555 /* execute */
1556 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pDevExt, pSession, pReq);
1557 return 0;
1558 }
1559
1560 default:
1561 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1562 break;
1563 }
1564 return SUPDRV_ERR_GENERAL_FAILURE;
1565}
1566
1567
1568/**
1569 * Inter-Driver Communcation (IDC) worker.
1570 *
1571 * @returns VBox status code.
1572 * @retval VINF_SUCCESS on success.
1573 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1574 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1575 *
1576 * @param uReq The request (function) code.
1577 * @param pDevExt Device extention.
1578 * @param pSession Session data.
1579 * @param pReqHdr The request header.
1580 */
1581int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1582{
1583 /*
1584 * The OS specific code has already validated the pSession
1585 * pointer, and the request size being greater or equal to
1586 * size of the header.
1587 *
1588 * So, just check that pSession is a kernel context session.
1589 */
1590 if (RT_UNLIKELY( pSession
1591 && pSession->R0Process != NIL_RTR0PROCESS))
1592 return VERR_INVALID_PARAMETER;
1593
1594/*
1595 * Validation macro.
1596 */
1597#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1598 do { \
1599 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1600 { \
1601 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1602 (long)pReqHdr->cb, (long)(cbExpect))); \
1603 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1604 } \
1605 } while (0)
1606
1607 switch (uReq)
1608 {
1609 case SUPDRV_IDC_REQ_CONNECT:
1610 {
1611 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1612 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1613
1614 /*
1615 * Validate the cookie and other input.
1616 */
1617 if (pReq->Hdr.pSession != NULL)
1618 {
1619 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1620 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1621 }
1622 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1623 {
1624 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1625 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1626 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1627 }
1628 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1629 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1630 {
1631 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1632 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1633 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1634 }
1635
1636 /*
1637 * Match the version.
1638 * The current logic is very simple, match the major interface version.
1639 */
1640 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1641 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1642 {
1643 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1644 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
1645 pReq->u.Out.pSession = NULL;
1646 pReq->u.Out.uSessionVersion = 0xffffffff;
1647 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1648 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1649 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1650 return VINF_SUCCESS;
1651 }
1652
1653 pReq->u.Out.pSession = NULL;
1654 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1655 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1656 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1657
1658 /*
1659 * On NT we will already have a session associated with the
1660 * client, just like with the SUP_IOCTL_COOKIE request, while
1661 * the other doesn't.
1662 */
1663#ifdef RT_OS_WINDOWS
1664 pReq->Hdr.rc = VINF_SUCCESS;
1665#else
1666 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1667 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1668 if (RT_FAILURE(pReq->Hdr.rc))
1669 {
1670 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1671 return VINF_SUCCESS;
1672 }
1673#endif
1674
1675 pReq->u.Out.pSession = pSession;
1676 pReq->Hdr.pSession = pSession;
1677
1678 return VINF_SUCCESS;
1679 }
1680
1681 case SUPDRV_IDC_REQ_DISCONNECT:
1682 {
1683 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1684
1685#ifdef RT_OS_WINDOWS
1686 /* Windows will destroy the session when the file object is destroyed. */
1687#else
1688 supdrvCloseSession(pDevExt, pSession);
1689#endif
1690 return pReqHdr->rc = VINF_SUCCESS;
1691 }
1692
1693 case SUPDRV_IDC_REQ_GET_SYMBOL:
1694 {
1695 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1696 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1697
1698 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1699 return VINF_SUCCESS;
1700 }
1701
1702 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1703 {
1704 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1705 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1706
1707 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1708 return VINF_SUCCESS;
1709 }
1710
1711 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1712 {
1713 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1714 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1715
1716 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1717 return VINF_SUCCESS;
1718 }
1719
1720 default:
1721 Log(("Unknown IDC %#lx\n", (long)uReq));
1722 break;
1723 }
1724
1725#undef REQ_CHECK_IDC_SIZE
1726 return VERR_NOT_SUPPORTED;
1727}
1728
1729
1730/**
1731 * Register a object for reference counting.
1732 * The object is registered with one reference in the specified session.
1733 *
1734 * @returns Unique identifier on success (pointer).
1735 * All future reference must use this identifier.
1736 * @returns NULL on failure.
1737 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1738 * @param pvUser1 The first user argument.
1739 * @param pvUser2 The second user argument.
1740 */
1741SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1742{
1743 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1744 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1745 PSUPDRVOBJ pObj;
1746 PSUPDRVUSAGE pUsage;
1747
1748 /*
1749 * Validate the input.
1750 */
1751 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1752 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1753 AssertPtrReturn(pfnDestructor, NULL);
1754
1755 /*
1756 * Allocate and initialize the object.
1757 */
1758 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1759 if (!pObj)
1760 return NULL;
1761 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1762 pObj->enmType = enmType;
1763 pObj->pNext = NULL;
1764 pObj->cUsage = 1;
1765 pObj->pfnDestructor = pfnDestructor;
1766 pObj->pvUser1 = pvUser1;
1767 pObj->pvUser2 = pvUser2;
1768 pObj->CreatorUid = pSession->Uid;
1769 pObj->CreatorGid = pSession->Gid;
1770 pObj->CreatorProcess= pSession->Process;
1771 supdrvOSObjInitCreator(pObj, pSession);
1772
1773 /*
1774 * Allocate the usage record.
1775 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
1776 */
1777 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1778
1779 pUsage = pDevExt->pUsageFree;
1780 if (pUsage)
1781 pDevExt->pUsageFree = pUsage->pNext;
1782 else
1783 {
1784 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1785 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1786 if (!pUsage)
1787 {
1788 RTMemFree(pObj);
1789 return NULL;
1790 }
1791 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1792 }
1793
1794 /*
1795 * Insert the object and create the session usage record.
1796 */
1797 /* The object. */
1798 pObj->pNext = pDevExt->pObjs;
1799 pDevExt->pObjs = pObj;
1800
1801 /* The session record. */
1802 pUsage->cUsage = 1;
1803 pUsage->pObj = pObj;
1804 pUsage->pNext = pSession->pUsage;
1805 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1806 pSession->pUsage = pUsage;
1807
1808 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1809
1810 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1811 return pObj;
1812}
1813
1814
1815/**
1816 * Increment the reference counter for the object associating the reference
1817 * with the specified session.
1818 *
1819 * @returns IPRT status code.
1820 * @param pvObj The identifier returned by SUPR0ObjRegister().
1821 * @param pSession The session which is referencing the object.
1822 *
1823 * @remarks The caller should not own any spinlocks and must carefully protect
1824 * itself against potential race with the destructor so freed memory
1825 * isn't accessed here.
1826 */
1827SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1828{
1829 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
1830}
1831
1832
1833/**
1834 * Increment the reference counter for the object associating the reference
1835 * with the specified session.
1836 *
1837 * @returns IPRT status code.
1838 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
1839 * couldn't be allocated. (If you see this you're not doing the right
1840 * thing and it won't ever work reliably.)
1841 *
1842 * @param pvObj The identifier returned by SUPR0ObjRegister().
1843 * @param pSession The session which is referencing the object.
1844 * @param fNoBlocking Set if it's not OK to block. Never try to make the
1845 * first reference to an object in a session with this
1846 * argument set.
1847 *
1848 * @remarks The caller should not own any spinlocks and must carefully protect
1849 * itself against potential race with the destructor so freed memory
1850 * isn't accessed here.
1851 */
1852SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
1853{
1854 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1855 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1856 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1857 int rc = VINF_SUCCESS;
1858 PSUPDRVUSAGE pUsagePre;
1859 PSUPDRVUSAGE pUsage;
1860
1861 /*
1862 * Validate the input.
1863 * Be ready for the destruction race (someone might be stuck in the
1864 * destructor waiting a lock we own).
1865 */
1866 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1867 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
1868 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
1869 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
1870 VERR_INVALID_PARAMETER);
1871
1872 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1873
1874 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1875 {
1876 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1877
1878 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1879 return VERR_WRONG_ORDER;
1880 }
1881
1882 /*
1883 * Preallocate the usage record if we can.
1884 */
1885 pUsagePre = pDevExt->pUsageFree;
1886 if (pUsagePre)
1887 pDevExt->pUsageFree = pUsagePre->pNext;
1888 else if (!fNoBlocking)
1889 {
1890 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1891 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1892 if (!pUsagePre)
1893 return VERR_NO_MEMORY;
1894
1895 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1896 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1897 {
1898 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1899
1900 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1901 return VERR_WRONG_ORDER;
1902 }
1903 }
1904
1905 /*
1906 * Reference the object.
1907 */
1908 pObj->cUsage++;
1909
1910 /*
1911 * Look for the session record.
1912 */
1913 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1914 {
1915 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1916 if (pUsage->pObj == pObj)
1917 break;
1918 }
1919 if (pUsage)
1920 pUsage->cUsage++;
1921 else if (pUsagePre)
1922 {
1923 /* create a new session record. */
1924 pUsagePre->cUsage = 1;
1925 pUsagePre->pObj = pObj;
1926 pUsagePre->pNext = pSession->pUsage;
1927 pSession->pUsage = pUsagePre;
1928 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
1929
1930 pUsagePre = NULL;
1931 }
1932 else
1933 {
1934 pObj->cUsage--;
1935 rc = VERR_TRY_AGAIN;
1936 }
1937
1938 /*
1939 * Put any unused usage record into the free list..
1940 */
1941 if (pUsagePre)
1942 {
1943 pUsagePre->pNext = pDevExt->pUsageFree;
1944 pDevExt->pUsageFree = pUsagePre;
1945 }
1946
1947 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1948
1949 return rc;
1950}
1951
1952
1953/**
1954 * Decrement / destroy a reference counter record for an object.
1955 *
1956 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1957 *
1958 * @returns IPRT status code.
1959 * @retval VINF_SUCCESS if not destroyed.
1960 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
1961 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
1962 * string builds.
1963 *
1964 * @param pvObj The identifier returned by SUPR0ObjRegister().
1965 * @param pSession The session which is referencing the object.
1966 */
1967SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1968{
1969 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1970 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1971 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1972 int rc = VERR_INVALID_PARAMETER;
1973 PSUPDRVUSAGE pUsage;
1974 PSUPDRVUSAGE pUsagePrev;
1975
1976 /*
1977 * Validate the input.
1978 */
1979 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1980 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1981 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1982 VERR_INVALID_PARAMETER);
1983
1984 /*
1985 * Acquire the spinlock and look for the usage record.
1986 */
1987 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1988
1989 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1990 pUsage;
1991 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1992 {
1993 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1994 if (pUsage->pObj == pObj)
1995 {
1996 rc = VINF_SUCCESS;
1997 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1998 if (pUsage->cUsage > 1)
1999 {
2000 pObj->cUsage--;
2001 pUsage->cUsage--;
2002 }
2003 else
2004 {
2005 /*
2006 * Free the session record.
2007 */
2008 if (pUsagePrev)
2009 pUsagePrev->pNext = pUsage->pNext;
2010 else
2011 pSession->pUsage = pUsage->pNext;
2012 pUsage->pNext = pDevExt->pUsageFree;
2013 pDevExt->pUsageFree = pUsage;
2014
2015 /* What about the object? */
2016 if (pObj->cUsage > 1)
2017 pObj->cUsage--;
2018 else
2019 {
2020 /*
2021 * Object is to be destroyed, unlink it.
2022 */
2023 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
2024 rc = VINF_OBJECT_DESTROYED;
2025 if (pDevExt->pObjs == pObj)
2026 pDevExt->pObjs = pObj->pNext;
2027 else
2028 {
2029 PSUPDRVOBJ pObjPrev;
2030 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
2031 if (pObjPrev->pNext == pObj)
2032 {
2033 pObjPrev->pNext = pObj->pNext;
2034 break;
2035 }
2036 Assert(pObjPrev);
2037 }
2038 }
2039 }
2040 break;
2041 }
2042 }
2043
2044 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2045
2046 /*
2047 * Call the destructor and free the object if required.
2048 */
2049 if (rc == VINF_OBJECT_DESTROYED)
2050 {
2051 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2052 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2053 if (pObj->pfnDestructor)
2054#ifdef RT_WITH_W64_UNWIND_HACK
2055 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
2056#else
2057 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2058#endif
2059 RTMemFree(pObj);
2060 }
2061
2062 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2063 return rc;
2064}
2065
2066
2067/**
2068 * Verifies that the current process can access the specified object.
2069 *
2070 * @returns The following IPRT status code:
2071 * @retval VINF_SUCCESS if access was granted.
2072 * @retval VERR_PERMISSION_DENIED if denied access.
2073 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2074 *
2075 * @param pvObj The identifier returned by SUPR0ObjRegister().
2076 * @param pSession The session which wishes to access the object.
2077 * @param pszObjName Object string name. This is optional and depends on the object type.
2078 *
2079 * @remark The caller is responsible for making sure the object isn't removed while
2080 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2081 */
2082SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2083{
2084 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2085 int rc;
2086
2087 /*
2088 * Validate the input.
2089 */
2090 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2091 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2092 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2093 VERR_INVALID_PARAMETER);
2094
2095 /*
2096 * Check access. (returns true if a decision has been made.)
2097 */
2098 rc = VERR_INTERNAL_ERROR;
2099 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2100 return rc;
2101
2102 /*
2103 * Default policy is to allow the user to access his own
2104 * stuff but nothing else.
2105 */
2106 if (pObj->CreatorUid == pSession->Uid)
2107 return VINF_SUCCESS;
2108 return VERR_PERMISSION_DENIED;
2109}
2110
2111
2112/**
2113 * Lock pages.
2114 *
2115 * @returns IPRT status code.
2116 * @param pSession Session to which the locked memory should be associated.
2117 * @param pvR3 Start of the memory range to lock.
2118 * This must be page aligned.
2119 * @param cPages Number of pages to lock.
2120 * @param paPages Where to put the physical addresses of locked memory.
2121 */
2122SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2123{
2124 int rc;
2125 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2126 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2127 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2128
2129 /*
2130 * Verify input.
2131 */
2132 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2133 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2134 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2135 || !pvR3)
2136 {
2137 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2138 return VERR_INVALID_PARAMETER;
2139 }
2140
2141#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
2142 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
2143 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
2144 if (RT_SUCCESS(rc))
2145 return rc;
2146#endif
2147
2148 /*
2149 * Let IPRT do the job.
2150 */
2151 Mem.eType = MEMREF_TYPE_LOCKED;
2152 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
2153 if (RT_SUCCESS(rc))
2154 {
2155 uint32_t iPage = cPages;
2156 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2157 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2158
2159 while (iPage-- > 0)
2160 {
2161 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2162 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2163 {
2164 AssertMsgFailed(("iPage=%d\n", iPage));
2165 rc = VERR_INTERNAL_ERROR;
2166 break;
2167 }
2168 }
2169 if (RT_SUCCESS(rc))
2170 rc = supdrvMemAdd(&Mem, pSession);
2171 if (RT_FAILURE(rc))
2172 {
2173 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2174 AssertRC(rc2);
2175 }
2176 }
2177
2178 return rc;
2179}
2180
2181
2182/**
2183 * Unlocks the memory pointed to by pv.
2184 *
2185 * @returns IPRT status code.
2186 * @param pSession Session to which the memory was locked.
2187 * @param pvR3 Memory to unlock.
2188 */
2189SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2190{
2191 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2192 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2193#ifdef RT_OS_WINDOWS
2194 /*
2195 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
2196 * allocations; ignore this call.
2197 */
2198 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
2199 {
2200 LogFlow(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
2201 return VINF_SUCCESS;
2202 }
2203#endif
2204 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2205}
2206
2207
2208/**
2209 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2210 * backing.
2211 *
2212 * @returns IPRT status code.
2213 * @param pSession Session data.
2214 * @param cPages Number of pages to allocate.
2215 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2216 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2217 * @param pHCPhys Where to put the physical address of allocated memory.
2218 */
2219SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2220{
2221 int rc;
2222 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2223 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2224
2225 /*
2226 * Validate input.
2227 */
2228 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2229 if (!ppvR3 || !ppvR0 || !pHCPhys)
2230 {
2231 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2232 pSession, ppvR0, ppvR3, pHCPhys));
2233 return VERR_INVALID_PARAMETER;
2234
2235 }
2236 if (cPages < 1 || cPages >= 256)
2237 {
2238 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2239 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2240 }
2241
2242 /*
2243 * Let IPRT do the job.
2244 */
2245 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2246 if (RT_SUCCESS(rc))
2247 {
2248 int rc2;
2249 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2250 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2251 if (RT_SUCCESS(rc))
2252 {
2253 Mem.eType = MEMREF_TYPE_CONT;
2254 rc = supdrvMemAdd(&Mem, pSession);
2255 if (!rc)
2256 {
2257 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2258 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2259 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2260 return 0;
2261 }
2262
2263 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2264 AssertRC(rc2);
2265 }
2266 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2267 AssertRC(rc2);
2268 }
2269
2270 return rc;
2271}
2272
2273
2274/**
2275 * Frees memory allocated using SUPR0ContAlloc().
2276 *
2277 * @returns IPRT status code.
2278 * @param pSession The session to which the memory was allocated.
2279 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2280 */
2281SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2282{
2283 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2284 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2285 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2286}
2287
2288
2289/**
2290 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2291 *
2292 * The memory isn't zeroed.
2293 *
2294 * @returns IPRT status code.
2295 * @param pSession Session data.
2296 * @param cPages Number of pages to allocate.
2297 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2298 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2299 * @param paPages Where to put the physical addresses of allocated memory.
2300 */
2301SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2302{
2303 unsigned iPage;
2304 int rc;
2305 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2306 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2307
2308 /*
2309 * Validate input.
2310 */
2311 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2312 if (!ppvR3 || !ppvR0 || !paPages)
2313 {
2314 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2315 pSession, ppvR3, ppvR0, paPages));
2316 return VERR_INVALID_PARAMETER;
2317
2318 }
2319 if (cPages < 1 || cPages >= 256)
2320 {
2321 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2322 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2323 }
2324
2325 /*
2326 * Let IPRT do the work.
2327 */
2328 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2329 if (RT_SUCCESS(rc))
2330 {
2331 int rc2;
2332 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2333 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2334 if (RT_SUCCESS(rc))
2335 {
2336 Mem.eType = MEMREF_TYPE_LOW;
2337 rc = supdrvMemAdd(&Mem, pSession);
2338 if (!rc)
2339 {
2340 for (iPage = 0; iPage < cPages; iPage++)
2341 {
2342 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2343 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2344 }
2345 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2346 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2347 return 0;
2348 }
2349
2350 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2351 AssertRC(rc2);
2352 }
2353
2354 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2355 AssertRC(rc2);
2356 }
2357
2358 return rc;
2359}
2360
2361
2362/**
2363 * Frees memory allocated using SUPR0LowAlloc().
2364 *
2365 * @returns IPRT status code.
2366 * @param pSession The session to which the memory was allocated.
2367 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2368 */
2369SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2370{
2371 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2372 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2373 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2374}
2375
2376
2377
2378/**
2379 * Allocates a chunk of memory with both R0 and R3 mappings.
2380 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2381 *
2382 * @returns IPRT status code.
2383 * @param pSession The session to associated the allocation with.
2384 * @param cb Number of bytes to allocate.
2385 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2386 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2387 */
2388SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2389{
2390 int rc;
2391 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2392 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2393
2394 /*
2395 * Validate input.
2396 */
2397 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2398 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2399 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2400 if (cb < 1 || cb >= _4M)
2401 {
2402 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2403 return VERR_INVALID_PARAMETER;
2404 }
2405
2406 /*
2407 * Let IPRT do the work.
2408 */
2409 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2410 if (RT_SUCCESS(rc))
2411 {
2412 int rc2;
2413 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2414 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2415 if (RT_SUCCESS(rc))
2416 {
2417 Mem.eType = MEMREF_TYPE_MEM;
2418 rc = supdrvMemAdd(&Mem, pSession);
2419 if (!rc)
2420 {
2421 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2422 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2423 return VINF_SUCCESS;
2424 }
2425
2426 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2427 AssertRC(rc2);
2428 }
2429
2430 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2431 AssertRC(rc2);
2432 }
2433
2434 return rc;
2435}
2436
2437
2438/**
2439 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2440 *
2441 * @returns IPRT status code.
2442 * @param pSession The session to which the memory was allocated.
2443 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2444 * @param paPages Where to store the physical addresses.
2445 */
2446SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2447{
2448 PSUPDRVBUNDLE pBundle;
2449 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2450 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2451
2452 /*
2453 * Validate input.
2454 */
2455 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2456 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2457 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2458
2459 /*
2460 * Search for the address.
2461 */
2462 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2463 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2464 {
2465 if (pBundle->cUsed > 0)
2466 {
2467 unsigned i;
2468 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2469 {
2470 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2471 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2472 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2473 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2474 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2475 )
2476 )
2477 {
2478 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2479 size_t iPage;
2480 for (iPage = 0; iPage < cPages; iPage++)
2481 {
2482 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2483 paPages[iPage].uReserved = 0;
2484 }
2485 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2486 return VINF_SUCCESS;
2487 }
2488 }
2489 }
2490 }
2491 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2492 Log(("Failed to find %p!!!\n", (void *)uPtr));
2493 return VERR_INVALID_PARAMETER;
2494}
2495
2496
2497/**
2498 * Free memory allocated by SUPR0MemAlloc().
2499 *
2500 * @returns IPRT status code.
2501 * @param pSession The session owning the allocation.
2502 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2503 */
2504SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2505{
2506 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2507 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2508 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2509}
2510
2511
2512/**
2513 * Allocates a chunk of memory with only a R3 mappings.
2514 *
2515 * The memory is fixed and it's possible to query the physical addresses using
2516 * SUPR0MemGetPhys().
2517 *
2518 * @returns IPRT status code.
2519 * @param pSession The session to associated the allocation with.
2520 * @param cPages The number of pages to allocate.
2521 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2522 * @param paPages Where to store the addresses of the pages. Optional.
2523 */
2524SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2525{
2526 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2527 return SUPR0PageAllocEx(pSession, cPages, 0 /*fFlags*/, ppvR3, NULL, paPages);
2528}
2529
2530
2531/**
2532 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2533 *
2534 * The memory is fixed and it's possible to query the physical addresses using
2535 * SUPR0MemGetPhys().
2536 *
2537 * @returns IPRT status code.
2538 * @param pSession The session to associated the allocation with.
2539 * @param cPages The number of pages to allocate.
2540 * @param fFlags Flags, reserved for the future. Must be zero.
2541 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2542 * NULL if no ring-3 mapping.
2543 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2544 * NULL if no ring-0 mapping.
2545 * @param paPages Where to store the addresses of the pages. Optional.
2546 */
2547SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2548{
2549 int rc;
2550 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2551 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2552
2553 /*
2554 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2555 */
2556 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2557 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2558 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2559 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2560 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2561 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2562 {
2563 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2564 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2565 }
2566
2567 /*
2568 * Let IPRT do the work.
2569 */
2570 if (ppvR0)
2571 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2572 else
2573 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2574 if (RT_SUCCESS(rc))
2575 {
2576 int rc2;
2577 if (ppvR3)
2578 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2579 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2580 else
2581 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2582 if (RT_SUCCESS(rc))
2583 {
2584 Mem.eType = MEMREF_TYPE_PAGE;
2585 rc = supdrvMemAdd(&Mem, pSession);
2586 if (!rc)
2587 {
2588 if (ppvR3)
2589 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2590 if (ppvR0)
2591 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2592 if (paPages)
2593 {
2594 uint32_t iPage = cPages;
2595 while (iPage-- > 0)
2596 {
2597 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2598 Assert(paPages[iPage] != NIL_RTHCPHYS);
2599 }
2600 }
2601 return VINF_SUCCESS;
2602 }
2603
2604 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2605 AssertRC(rc2);
2606 }
2607
2608 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2609 AssertRC(rc2);
2610 }
2611 return rc;
2612}
2613
2614
2615/**
2616 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2617 *
2618 * The memory is fixed and it's possible to query the physical addresses using
2619 * SUPR0MemGetPhys().
2620 *
2621 * @returns IPRT status code.
2622 * @param pSession The session to associated the allocation with.
2623 * @param cPages The number of pages to allocate.
2624 * @param fFlags Flags, reserved for the future. Must be zero.
2625 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2626 * NULL if no ring-3 mapping.
2627 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2628 * NULL if no ring-0 mapping.
2629 * @param paPages Where to store the addresses of the pages. Optional.
2630 */
2631SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2632 uint32_t fFlags, PRTR0PTR ppvR0)
2633{
2634 int rc;
2635 PSUPDRVBUNDLE pBundle;
2636 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2637 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2638 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2639
2640 /*
2641 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2642 */
2643 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2644 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2645 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2646 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2647 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2648 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2649
2650 /*
2651 * Find the memory object.
2652 */
2653 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2654 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2655 {
2656 if (pBundle->cUsed > 0)
2657 {
2658 unsigned i;
2659 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2660 {
2661 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2662 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2663 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2664 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2665 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2666 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2667 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2668 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2669 {
2670 hMemObj = pBundle->aMem[i].MemObj;
2671 break;
2672 }
2673 }
2674 }
2675 }
2676 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2677
2678 rc = VERR_INVALID_PARAMETER;
2679 if (hMemObj != NIL_RTR0MEMOBJ)
2680 {
2681 /*
2682 * Do some furter input validations before calling IPRT.
2683 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2684 */
2685 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2686 if ( offSub < cbMemObj
2687 && cbSub <= cbMemObj
2688 && offSub + cbSub <= cbMemObj)
2689 {
2690 RTR0MEMOBJ hMapObj;
2691 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2692 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2693 if (RT_SUCCESS(rc))
2694 *ppvR0 = RTR0MemObjAddress(hMapObj);
2695 }
2696 else
2697 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2698
2699 }
2700 return rc;
2701}
2702
2703
2704
2705#ifdef RT_OS_WINDOWS
2706/**
2707 * Check if the pages were locked by SUPR0PageAlloc
2708 *
2709 * This function will be removed along with the lock/unlock hacks when
2710 * we've cleaned up the ring-3 code properly.
2711 *
2712 * @returns boolean
2713 * @param pSession The session to which the memory was allocated.
2714 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2715 */
2716static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2717{
2718 PSUPDRVBUNDLE pBundle;
2719 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2720 LogFlow(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2721
2722 /*
2723 * Search for the address.
2724 */
2725 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2726 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2727 {
2728 if (pBundle->cUsed > 0)
2729 {
2730 unsigned i;
2731 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2732 {
2733 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2734 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2735 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2736 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2737 {
2738 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2739 return true;
2740 }
2741 }
2742 }
2743 }
2744 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2745 return false;
2746}
2747
2748
2749/**
2750 * Get the physical addresses of memory allocated using SUPR0PageAllocEx().
2751 *
2752 * This function will be removed along with the lock/unlock hacks when
2753 * we've cleaned up the ring-3 code properly.
2754 *
2755 * @returns IPRT status code.
2756 * @param pSession The session to which the memory was allocated.
2757 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2758 * @param cPages Number of pages in paPages
2759 * @param paPages Where to store the physical addresses.
2760 */
2761static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2762{
2763 PSUPDRVBUNDLE pBundle;
2764 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2765 LogFlow(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
2766
2767 /*
2768 * Search for the address.
2769 */
2770 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2771 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2772 {
2773 if (pBundle->cUsed > 0)
2774 {
2775 unsigned i;
2776 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2777 {
2778 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2779 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2780 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2781 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2782 {
2783 uint32_t iPage;
2784 size_t cMaxPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2785 cPages = (uint32_t)RT_MIN(cMaxPages, cPages);
2786 for (iPage = 0; iPage < cPages; iPage++)
2787 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2788 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2789 return VINF_SUCCESS;
2790 }
2791 }
2792 }
2793 }
2794 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2795 return VERR_INVALID_PARAMETER;
2796}
2797#endif /* RT_OS_WINDOWS */
2798
2799
2800/**
2801 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2802 *
2803 * @returns IPRT status code.
2804 * @param pSession The session owning the allocation.
2805 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2806 * SUPR0PageAllocEx().
2807 */
2808SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2809{
2810 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2811 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2812 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2813}
2814
2815
2816/**
2817 * Maps the GIP into userspace and/or get the physical address of the GIP.
2818 *
2819 * @returns IPRT status code.
2820 * @param pSession Session to which the GIP mapping should belong.
2821 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2822 * @param pHCPhysGip Where to store the physical address. (optional)
2823 *
2824 * @remark There is no reference counting on the mapping, so one call to this function
2825 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2826 * and remove the session as a GIP user.
2827 */
2828SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2829{
2830 int rc = VINF_SUCCESS;
2831 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2832 RTR3PTR pGip = NIL_RTR3PTR;
2833 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2834 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2835
2836 /*
2837 * Validate
2838 */
2839 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2840 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2841 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2842
2843 RTSemFastMutexRequest(pDevExt->mtxGip);
2844 if (pDevExt->pGip)
2845 {
2846 /*
2847 * Map it?
2848 */
2849 if (ppGipR3)
2850 {
2851 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2852 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2853 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2854 if (RT_SUCCESS(rc))
2855 {
2856 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2857 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2858 }
2859 }
2860
2861 /*
2862 * Get physical address.
2863 */
2864 if (pHCPhysGip && !rc)
2865 HCPhys = pDevExt->HCPhysGip;
2866
2867 /*
2868 * Reference globally.
2869 */
2870 if (!pSession->fGipReferenced && !rc)
2871 {
2872 pSession->fGipReferenced = 1;
2873 pDevExt->cGipUsers++;
2874 if (pDevExt->cGipUsers == 1)
2875 {
2876 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2877 unsigned i;
2878
2879 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2880
2881 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2882 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2883 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2884
2885 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2886 AssertRC(rc); rc = VINF_SUCCESS;
2887 }
2888 }
2889 }
2890 else
2891 {
2892 rc = SUPDRV_ERR_GENERAL_FAILURE;
2893 Log(("SUPR0GipMap: GIP is not available!\n"));
2894 }
2895 RTSemFastMutexRelease(pDevExt->mtxGip);
2896
2897 /*
2898 * Write returns.
2899 */
2900 if (pHCPhysGip)
2901 *pHCPhysGip = HCPhys;
2902 if (ppGipR3)
2903 *ppGipR3 = pGip;
2904
2905#ifdef DEBUG_DARWIN_GIP
2906 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
2907#else
2908 LogFlow(( "SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
2909#endif
2910 return rc;
2911}
2912
2913
2914/**
2915 * Unmaps any user mapping of the GIP and terminates all GIP access
2916 * from this session.
2917 *
2918 * @returns IPRT status code.
2919 * @param pSession Session to which the GIP mapping should belong.
2920 */
2921SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2922{
2923 int rc = VINF_SUCCESS;
2924 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2925#ifdef DEBUG_DARWIN_GIP
2926 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2927 pSession,
2928 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2929 pSession->GipMapObjR3));
2930#else
2931 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
2932#endif
2933 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2934
2935 RTSemFastMutexRequest(pDevExt->mtxGip);
2936
2937 /*
2938 * Unmap anything?
2939 */
2940 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2941 {
2942 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2943 AssertRC(rc);
2944 if (RT_SUCCESS(rc))
2945 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2946 }
2947
2948 /*
2949 * Dereference global GIP.
2950 */
2951 if (pSession->fGipReferenced && !rc)
2952 {
2953 pSession->fGipReferenced = 0;
2954 if ( pDevExt->cGipUsers > 0
2955 && !--pDevExt->cGipUsers)
2956 {
2957 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
2958 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = VINF_SUCCESS;
2959 }
2960 }
2961
2962 RTSemFastMutexRelease(pDevExt->mtxGip);
2963
2964 return rc;
2965}
2966
2967
2968/**
2969 * Register a component factory with the support driver.
2970 *
2971 * This is currently restricted to kernel sessions only.
2972 *
2973 * @returns VBox status code.
2974 * @retval VINF_SUCCESS on success.
2975 * @retval VERR_NO_MEMORY if we're out of memory.
2976 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
2977 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2978 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2979 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2980 *
2981 * @param pSession The SUPDRV session (must be a ring-0 session).
2982 * @param pFactory Pointer to the component factory registration structure.
2983 *
2984 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
2985 */
2986SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2987{
2988 PSUPDRVFACTORYREG pNewReg;
2989 const char *psz;
2990 int rc;
2991
2992 /*
2993 * Validate parameters.
2994 */
2995 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2996 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2997 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2998 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
2999 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
3000 AssertReturn(psz, VERR_INVALID_PARAMETER);
3001
3002 /*
3003 * Allocate and initialize a new registration structure.
3004 */
3005 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
3006 if (pNewReg)
3007 {
3008 pNewReg->pNext = NULL;
3009 pNewReg->pFactory = pFactory;
3010 pNewReg->pSession = pSession;
3011 pNewReg->cchName = psz - &pFactory->szName[0];
3012
3013 /*
3014 * Add it to the tail of the list after checking for prior registration.
3015 */
3016 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3017 if (RT_SUCCESS(rc))
3018 {
3019 PSUPDRVFACTORYREG pPrev = NULL;
3020 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3021 while (pCur && pCur->pFactory != pFactory)
3022 {
3023 pPrev = pCur;
3024 pCur = pCur->pNext;
3025 }
3026 if (!pCur)
3027 {
3028 if (pPrev)
3029 pPrev->pNext = pNewReg;
3030 else
3031 pSession->pDevExt->pComponentFactoryHead = pNewReg;
3032 rc = VINF_SUCCESS;
3033 }
3034 else
3035 rc = VERR_ALREADY_EXISTS;
3036
3037 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3038 }
3039
3040 if (RT_FAILURE(rc))
3041 RTMemFree(pNewReg);
3042 }
3043 else
3044 rc = VERR_NO_MEMORY;
3045 return rc;
3046}
3047
3048
3049/**
3050 * Deregister a component factory.
3051 *
3052 * @returns VBox status code.
3053 * @retval VINF_SUCCESS on success.
3054 * @retval VERR_NOT_FOUND if the factory wasn't registered.
3055 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3056 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3057 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3058 *
3059 * @param pSession The SUPDRV session (must be a ring-0 session).
3060 * @param pFactory Pointer to the component factory registration structure
3061 * previously passed SUPR0ComponentRegisterFactory().
3062 *
3063 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
3064 */
3065SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3066{
3067 int rc;
3068
3069 /*
3070 * Validate parameters.
3071 */
3072 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3073 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3074 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3075
3076 /*
3077 * Take the lock and look for the registration record.
3078 */
3079 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3080 if (RT_SUCCESS(rc))
3081 {
3082 PSUPDRVFACTORYREG pPrev = NULL;
3083 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3084 while (pCur && pCur->pFactory != pFactory)
3085 {
3086 pPrev = pCur;
3087 pCur = pCur->pNext;
3088 }
3089 if (pCur)
3090 {
3091 if (!pPrev)
3092 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3093 else
3094 pPrev->pNext = pCur->pNext;
3095
3096 pCur->pNext = NULL;
3097 pCur->pFactory = NULL;
3098 pCur->pSession = NULL;
3099 rc = VINF_SUCCESS;
3100 }
3101 else
3102 rc = VERR_NOT_FOUND;
3103
3104 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3105
3106 RTMemFree(pCur);
3107 }
3108 return rc;
3109}
3110
3111
3112/**
3113 * Queries a component factory.
3114 *
3115 * @returns VBox status code.
3116 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3117 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3118 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3119 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3120 *
3121 * @param pSession The SUPDRV session.
3122 * @param pszName The name of the component factory.
3123 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3124 * @param ppvFactoryIf Where to store the factory interface.
3125 */
3126SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3127{
3128 const char *pszEnd;
3129 size_t cchName;
3130 int rc;
3131
3132 /*
3133 * Validate parameters.
3134 */
3135 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3136
3137 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3138 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3139 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3140 cchName = pszEnd - pszName;
3141
3142 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3143 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3144 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3145
3146 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3147 *ppvFactoryIf = NULL;
3148
3149 /*
3150 * Take the lock and try all factories by this name.
3151 */
3152 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3153 if (RT_SUCCESS(rc))
3154 {
3155 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3156 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3157 while (pCur)
3158 {
3159 if ( pCur->cchName == cchName
3160 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3161 {
3162#ifdef RT_WITH_W64_UNWIND_HACK
3163 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3164#else
3165 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3166#endif
3167 if (pvFactory)
3168 {
3169 *ppvFactoryIf = pvFactory;
3170 rc = VINF_SUCCESS;
3171 break;
3172 }
3173 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3174 }
3175
3176 /* next */
3177 pCur = pCur->pNext;
3178 }
3179
3180 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3181 }
3182 return rc;
3183}
3184
3185
3186/**
3187 * Adds a memory object to the session.
3188 *
3189 * @returns IPRT status code.
3190 * @param pMem Memory tracking structure containing the
3191 * information to track.
3192 * @param pSession The session.
3193 */
3194static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3195{
3196 PSUPDRVBUNDLE pBundle;
3197 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3198
3199 /*
3200 * Find free entry and record the allocation.
3201 */
3202 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3203 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3204 {
3205 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3206 {
3207 unsigned i;
3208 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3209 {
3210 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3211 {
3212 pBundle->cUsed++;
3213 pBundle->aMem[i] = *pMem;
3214 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3215 return VINF_SUCCESS;
3216 }
3217 }
3218 AssertFailed(); /* !!this can't be happening!!! */
3219 }
3220 }
3221 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3222
3223 /*
3224 * Need to allocate a new bundle.
3225 * Insert into the last entry in the bundle.
3226 */
3227 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3228 if (!pBundle)
3229 return VERR_NO_MEMORY;
3230
3231 /* take last entry. */
3232 pBundle->cUsed++;
3233 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3234
3235 /* insert into list. */
3236 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3237 pBundle->pNext = pSession->Bundle.pNext;
3238 pSession->Bundle.pNext = pBundle;
3239 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3240
3241 return VINF_SUCCESS;
3242}
3243
3244
3245/**
3246 * Releases a memory object referenced by pointer and type.
3247 *
3248 * @returns IPRT status code.
3249 * @param pSession Session data.
3250 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3251 * @param eType Memory type.
3252 */
3253static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3254{
3255 PSUPDRVBUNDLE pBundle;
3256 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3257
3258 /*
3259 * Validate input.
3260 */
3261 if (!uPtr)
3262 {
3263 Log(("Illegal address %p\n", (void *)uPtr));
3264 return VERR_INVALID_PARAMETER;
3265 }
3266
3267 /*
3268 * Search for the address.
3269 */
3270 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3271 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3272 {
3273 if (pBundle->cUsed > 0)
3274 {
3275 unsigned i;
3276 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3277 {
3278 if ( pBundle->aMem[i].eType == eType
3279 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3280 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3281 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3282 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3283 )
3284 {
3285 /* Make a copy of it and release it outside the spinlock. */
3286 SUPDRVMEMREF Mem = pBundle->aMem[i];
3287 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3288 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3289 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3290 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3291
3292 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3293 {
3294 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3295 AssertRC(rc); /** @todo figure out how to handle this. */
3296 }
3297 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3298 {
3299 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3300 AssertRC(rc); /** @todo figure out how to handle this. */
3301 }
3302 return VINF_SUCCESS;
3303 }
3304 }
3305 }
3306 }
3307 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3308 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3309 return VERR_INVALID_PARAMETER;
3310}
3311
3312
3313/**
3314 * Opens an image. If it's the first time it's opened the call must upload
3315 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3316 *
3317 * This is the 1st step of the loading.
3318 *
3319 * @returns IPRT status code.
3320 * @param pDevExt Device globals.
3321 * @param pSession Session data.
3322 * @param pReq The open request.
3323 */
3324static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3325{
3326 PSUPDRVLDRIMAGE pImage;
3327 unsigned cb;
3328 void *pv;
3329 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
3330 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3331
3332 /*
3333 * Check if we got an instance of the image already.
3334 */
3335 RTSemFastMutexRequest(pDevExt->mtxLdr);
3336 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3337 {
3338 if ( pImage->szName[cchName] == '\0'
3339 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
3340 {
3341 pImage->cUsage++;
3342 pReq->u.Out.pvImageBase = pImage->pvImage;
3343 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3344 supdrvLdrAddUsage(pSession, pImage);
3345 RTSemFastMutexRelease(pDevExt->mtxLdr);
3346 return VINF_SUCCESS;
3347 }
3348 }
3349 /* (not found - add it!) */
3350
3351 /*
3352 * Allocate memory.
3353 */
3354 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3355 pv = RTMemExecAlloc(cb);
3356 if (!pv)
3357 {
3358 RTSemFastMutexRelease(pDevExt->mtxLdr);
3359 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3360 return VERR_NO_MEMORY;
3361 }
3362
3363 /*
3364 * Setup and link in the LDR stuff.
3365 */
3366 pImage = (PSUPDRVLDRIMAGE)pv;
3367 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
3368 pImage->cbImage = pReq->u.In.cbImage;
3369 pImage->pfnModuleInit = NULL;
3370 pImage->pfnModuleTerm = NULL;
3371 pImage->pfnServiceReqHandler = NULL;
3372 pImage->uState = SUP_IOCTL_LDR_OPEN;
3373 pImage->cUsage = 1;
3374 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
3375
3376 pImage->pNext = pDevExt->pLdrImages;
3377 pDevExt->pLdrImages = pImage;
3378
3379 supdrvLdrAddUsage(pSession, pImage);
3380
3381 pReq->u.Out.pvImageBase = pImage->pvImage;
3382 pReq->u.Out.fNeedsLoading = true;
3383 RTSemFastMutexRelease(pDevExt->mtxLdr);
3384
3385#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3386 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3387#endif
3388 return VINF_SUCCESS;
3389}
3390
3391
3392/**
3393 * Loads the image bits.
3394 *
3395 * This is the 2nd step of the loading.
3396 *
3397 * @returns IPRT status code.
3398 * @param pDevExt Device globals.
3399 * @param pSession Session data.
3400 * @param pReq The request.
3401 */
3402static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3403{
3404 PSUPDRVLDRUSAGE pUsage;
3405 PSUPDRVLDRIMAGE pImage;
3406 int rc;
3407 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3408
3409 /*
3410 * Find the ldr image.
3411 */
3412 RTSemFastMutexRequest(pDevExt->mtxLdr);
3413 pUsage = pSession->pLdrUsage;
3414 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3415 pUsage = pUsage->pNext;
3416 if (!pUsage)
3417 {
3418 RTSemFastMutexRelease(pDevExt->mtxLdr);
3419 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3420 return VERR_INVALID_HANDLE;
3421 }
3422 pImage = pUsage->pImage;
3423 if (pImage->cbImage != pReq->u.In.cbImage)
3424 {
3425 RTSemFastMutexRelease(pDevExt->mtxLdr);
3426 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3427 return VERR_INVALID_HANDLE;
3428 }
3429 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3430 {
3431 unsigned uState = pImage->uState;
3432 RTSemFastMutexRelease(pDevExt->mtxLdr);
3433 if (uState != SUP_IOCTL_LDR_LOAD)
3434 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3435 return SUPDRV_ERR_ALREADY_LOADED;
3436 }
3437 switch (pReq->u.In.eEPType)
3438 {
3439 case SUPLDRLOADEP_NOTHING:
3440 break;
3441
3442 case SUPLDRLOADEP_VMMR0:
3443 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
3444 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
3445 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
3446 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
3447 {
3448 RTSemFastMutexRelease(pDevExt->mtxLdr);
3449 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
3450 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3451 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3452 return VERR_INVALID_PARAMETER;
3453 }
3454 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
3455 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3456 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3457 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3458 {
3459 RTSemFastMutexRelease(pDevExt->mtxLdr);
3460 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3461 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3462 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3463 return VERR_INVALID_PARAMETER;
3464 }
3465 break;
3466
3467 case SUPLDRLOADEP_SERVICE:
3468 if (!pReq->u.In.EP.Service.pfnServiceReq)
3469 {
3470 RTSemFastMutexRelease(pDevExt->mtxLdr);
3471 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
3472 return VERR_INVALID_PARAMETER;
3473 }
3474 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3475 {
3476 RTSemFastMutexRelease(pDevExt->mtxLdr);
3477 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3478 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
3479 return VERR_INVALID_PARAMETER;
3480 }
3481 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3482 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3483 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3484 {
3485 RTSemFastMutexRelease(pDevExt->mtxLdr);
3486 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3487 pImage->pvImage, pReq->u.In.cbImage,
3488 pReq->u.In.EP.Service.apvReserved[0],
3489 pReq->u.In.EP.Service.apvReserved[1],
3490 pReq->u.In.EP.Service.apvReserved[2]));
3491 return VERR_INVALID_PARAMETER;
3492 }
3493 break;
3494
3495 default:
3496 RTSemFastMutexRelease(pDevExt->mtxLdr);
3497 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3498 return VERR_INVALID_PARAMETER;
3499 }
3500 if ( pReq->u.In.pfnModuleInit
3501 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3502 {
3503 RTSemFastMutexRelease(pDevExt->mtxLdr);
3504 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3505 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3506 return VERR_INVALID_PARAMETER;
3507 }
3508 if ( pReq->u.In.pfnModuleTerm
3509 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3510 {
3511 RTSemFastMutexRelease(pDevExt->mtxLdr);
3512 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3513 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3514 return VERR_INVALID_PARAMETER;
3515 }
3516
3517 /*
3518 * Copy the memory.
3519 */
3520 /* no need to do try/except as this is a buffered request. */
3521 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3522 pImage->uState = SUP_IOCTL_LDR_LOAD;
3523 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3524 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3525 pImage->offSymbols = pReq->u.In.offSymbols;
3526 pImage->cSymbols = pReq->u.In.cSymbols;
3527 pImage->offStrTab = pReq->u.In.offStrTab;
3528 pImage->cbStrTab = pReq->u.In.cbStrTab;
3529
3530 /*
3531 * Update any entry points.
3532 */
3533 switch (pReq->u.In.eEPType)
3534 {
3535 default:
3536 case SUPLDRLOADEP_NOTHING:
3537 rc = VINF_SUCCESS;
3538 break;
3539 case SUPLDRLOADEP_VMMR0:
3540 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3541 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3542 break;
3543 case SUPLDRLOADEP_SERVICE:
3544 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3545 rc = VINF_SUCCESS;
3546 break;
3547 }
3548
3549 /*
3550 * On success call the module initialization.
3551 */
3552 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3553 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3554 {
3555 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3556#ifdef RT_WITH_W64_UNWIND_HACK
3557 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
3558#else
3559 rc = pImage->pfnModuleInit();
3560#endif
3561 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3562 supdrvLdrUnsetVMMR0EPs(pDevExt);
3563 }
3564
3565 if (rc)
3566 pImage->uState = SUP_IOCTL_LDR_OPEN;
3567
3568 RTSemFastMutexRelease(pDevExt->mtxLdr);
3569 return rc;
3570}
3571
3572
3573/**
3574 * Frees a previously loaded (prep'ed) image.
3575 *
3576 * @returns IPRT status code.
3577 * @param pDevExt Device globals.
3578 * @param pSession Session data.
3579 * @param pReq The request.
3580 */
3581static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3582{
3583 int rc;
3584 PSUPDRVLDRUSAGE pUsagePrev;
3585 PSUPDRVLDRUSAGE pUsage;
3586 PSUPDRVLDRIMAGE pImage;
3587 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3588
3589 /*
3590 * Find the ldr image.
3591 */
3592 RTSemFastMutexRequest(pDevExt->mtxLdr);
3593 pUsagePrev = NULL;
3594 pUsage = pSession->pLdrUsage;
3595 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3596 {
3597 pUsagePrev = pUsage;
3598 pUsage = pUsage->pNext;
3599 }
3600 if (!pUsage)
3601 {
3602 RTSemFastMutexRelease(pDevExt->mtxLdr);
3603 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3604 return VERR_INVALID_HANDLE;
3605 }
3606
3607 /*
3608 * Check if we can remove anything.
3609 */
3610 rc = VINF_SUCCESS;
3611 pImage = pUsage->pImage;
3612 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3613 {
3614 /*
3615 * Check if there are any objects with destructors in the image, if
3616 * so leave it for the session cleanup routine so we get a chance to
3617 * clean things up in the right order and not leave them all dangling.
3618 */
3619 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3620 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3621 if (pImage->cUsage <= 1)
3622 {
3623 PSUPDRVOBJ pObj;
3624 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3625 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3626 {
3627 rc = VERR_DANGLING_OBJECTS;
3628 break;
3629 }
3630 }
3631 else
3632 {
3633 PSUPDRVUSAGE pGenUsage;
3634 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3635 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3636 {
3637 rc = VERR_DANGLING_OBJECTS;
3638 break;
3639 }
3640 }
3641 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3642 if (rc == VINF_SUCCESS)
3643 {
3644 /* unlink it */
3645 if (pUsagePrev)
3646 pUsagePrev->pNext = pUsage->pNext;
3647 else
3648 pSession->pLdrUsage = pUsage->pNext;
3649
3650 /* free it */
3651 pUsage->pImage = NULL;
3652 pUsage->pNext = NULL;
3653 RTMemFree(pUsage);
3654
3655 /*
3656 * Derefrence the image.
3657 */
3658 if (pImage->cUsage <= 1)
3659 supdrvLdrFree(pDevExt, pImage);
3660 else
3661 pImage->cUsage--;
3662 }
3663 else
3664 {
3665 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3666 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
3667 }
3668 }
3669 else
3670 {
3671 /*
3672 * Dereference both image and usage.
3673 */
3674 pImage->cUsage--;
3675 pUsage->cUsage--;
3676 }
3677
3678 RTSemFastMutexRelease(pDevExt->mtxLdr);
3679 return rc;
3680}
3681
3682
3683/**
3684 * Gets the address of a symbol in an open image.
3685 *
3686 * @returns 0 on success.
3687 * @returns SUPDRV_ERR_* on failure.
3688 * @param pDevExt Device globals.
3689 * @param pSession Session data.
3690 * @param pReq The request buffer.
3691 */
3692static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3693{
3694 PSUPDRVLDRIMAGE pImage;
3695 PSUPDRVLDRUSAGE pUsage;
3696 uint32_t i;
3697 PSUPLDRSYM paSyms;
3698 const char *pchStrings;
3699 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3700 void *pvSymbol = NULL;
3701 int rc = VERR_GENERAL_FAILURE;
3702 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3703
3704 /*
3705 * Find the ldr image.
3706 */
3707 RTSemFastMutexRequest(pDevExt->mtxLdr);
3708 pUsage = pSession->pLdrUsage;
3709 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3710 pUsage = pUsage->pNext;
3711 if (!pUsage)
3712 {
3713 RTSemFastMutexRelease(pDevExt->mtxLdr);
3714 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3715 return VERR_INVALID_HANDLE;
3716 }
3717 pImage = pUsage->pImage;
3718 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3719 {
3720 unsigned uState = pImage->uState;
3721 RTSemFastMutexRelease(pDevExt->mtxLdr);
3722 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3723 return VERR_ALREADY_LOADED;
3724 }
3725
3726 /*
3727 * Search the symbol strings.
3728 */
3729 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3730 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3731 for (i = 0; i < pImage->cSymbols; i++)
3732 {
3733 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3734 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3735 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3736 {
3737 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3738 rc = VINF_SUCCESS;
3739 break;
3740 }
3741 }
3742 RTSemFastMutexRelease(pDevExt->mtxLdr);
3743 pReq->u.Out.pvSymbol = pvSymbol;
3744 return rc;
3745}
3746
3747
3748/**
3749 * Gets the address of a symbol in an open image or the support driver.
3750 *
3751 * @returns VINF_SUCCESS on success.
3752 * @returns
3753 * @param pDevExt Device globals.
3754 * @param pSession Session data.
3755 * @param pReq The request buffer.
3756 */
3757static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3758{
3759 int rc = VINF_SUCCESS;
3760 const char *pszSymbol = pReq->u.In.pszSymbol;
3761 const char *pszModule = pReq->u.In.pszModule;
3762 size_t cbSymbol;
3763 char const *pszEnd;
3764 uint32_t i;
3765
3766 /*
3767 * Input validation.
3768 */
3769 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3770 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3771 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3772 cbSymbol = pszEnd - pszSymbol + 1;
3773
3774 if (pszModule)
3775 {
3776 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3777 pszEnd = (char *)memchr(pszModule, '\0', 64);
3778 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3779 }
3780 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3781
3782
3783 if ( !pszModule
3784 || !strcmp(pszModule, "SupDrv"))
3785 {
3786 /*
3787 * Search the support driver export table.
3788 */
3789 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3790 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3791 {
3792 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3793 break;
3794 }
3795 }
3796 else
3797 {
3798 /*
3799 * Find the loader image.
3800 */
3801 PSUPDRVLDRIMAGE pImage;
3802
3803 RTSemFastMutexRequest(pDevExt->mtxLdr);
3804
3805 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3806 if (!strcmp(pImage->szName, pszModule))
3807 break;
3808 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3809 {
3810 /*
3811 * Search the symbol strings.
3812 */
3813 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3814 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3815 for (i = 0; i < pImage->cSymbols; i++)
3816 {
3817 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3818 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3819 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3820 {
3821 /*
3822 * Found it! Calc the symbol address and add a reference to the module.
3823 */
3824 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3825 rc = supdrvLdrAddUsage(pSession, pImage);
3826 break;
3827 }
3828 }
3829 }
3830 else
3831 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3832
3833 RTSemFastMutexRelease(pDevExt->mtxLdr);
3834 }
3835 return rc;
3836}
3837
3838
3839/**
3840 * Updates the VMMR0 entry point pointers.
3841 *
3842 * @returns IPRT status code.
3843 * @param pDevExt Device globals.
3844 * @param pSession Session data.
3845 * @param pVMMR0 VMMR0 image handle.
3846 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3847 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3848 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3849 * @remark Caller must own the loader mutex.
3850 */
3851static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3852{
3853 int rc = VINF_SUCCESS;
3854 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3855
3856
3857 /*
3858 * Check if not yet set.
3859 */
3860 if (!pDevExt->pvVMMR0)
3861 {
3862 pDevExt->pvVMMR0 = pvVMMR0;
3863 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3864 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3865 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3866 }
3867 else
3868 {
3869 /*
3870 * Return failure or success depending on whether the values match or not.
3871 */
3872 if ( pDevExt->pvVMMR0 != pvVMMR0
3873 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3874 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3875 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3876 {
3877 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3878 rc = VERR_INVALID_PARAMETER;
3879 }
3880 }
3881 return rc;
3882}
3883
3884
3885/**
3886 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3887 *
3888 * @param pDevExt Device globals.
3889 */
3890static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3891{
3892 pDevExt->pvVMMR0 = NULL;
3893 pDevExt->pfnVMMR0EntryInt = NULL;
3894 pDevExt->pfnVMMR0EntryFast = NULL;
3895 pDevExt->pfnVMMR0EntryEx = NULL;
3896}
3897
3898
3899/**
3900 * Adds a usage reference in the specified session of an image.
3901 *
3902 * Called while owning the loader semaphore.
3903 *
3904 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
3905 * @param pSession Session in question.
3906 * @param pImage Image which the session is using.
3907 */
3908static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3909{
3910 PSUPDRVLDRUSAGE pUsage;
3911 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3912
3913 /*
3914 * Referenced it already?
3915 */
3916 pUsage = pSession->pLdrUsage;
3917 while (pUsage)
3918 {
3919 if (pUsage->pImage == pImage)
3920 {
3921 pUsage->cUsage++;
3922 return VINF_SUCCESS;
3923 }
3924 pUsage = pUsage->pNext;
3925 }
3926
3927 /*
3928 * Allocate new usage record.
3929 */
3930 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3931 AssertReturn(pUsage, VERR_NO_MEMORY);
3932 pUsage->cUsage = 1;
3933 pUsage->pImage = pImage;
3934 pUsage->pNext = pSession->pLdrUsage;
3935 pSession->pLdrUsage = pUsage;
3936 return VINF_SUCCESS;
3937}
3938
3939
3940/**
3941 * Frees a load image.
3942 *
3943 * @param pDevExt Pointer to device extension.
3944 * @param pImage Pointer to the image we're gonna free.
3945 * This image must exit!
3946 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3947 */
3948static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3949{
3950 PSUPDRVLDRIMAGE pImagePrev;
3951 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
3952
3953 /* find it - arg. should've used doubly linked list. */
3954 Assert(pDevExt->pLdrImages);
3955 pImagePrev = NULL;
3956 if (pDevExt->pLdrImages != pImage)
3957 {
3958 pImagePrev = pDevExt->pLdrImages;
3959 while (pImagePrev->pNext != pImage)
3960 pImagePrev = pImagePrev->pNext;
3961 Assert(pImagePrev->pNext == pImage);
3962 }
3963
3964 /* unlink */
3965 if (pImagePrev)
3966 pImagePrev->pNext = pImage->pNext;
3967 else
3968 pDevExt->pLdrImages = pImage->pNext;
3969
3970 /* check if this is VMMR0.r0 unset its entry point pointers. */
3971 if (pDevExt->pvVMMR0 == pImage->pvImage)
3972 supdrvLdrUnsetVMMR0EPs(pDevExt);
3973
3974 /* check for objects with destructors in this image. (Shouldn't happen.) */
3975 if (pDevExt->pObjs)
3976 {
3977 unsigned cObjs = 0;
3978 PSUPDRVOBJ pObj;
3979 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3980 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3981 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3982 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3983 {
3984 pObj->pfnDestructor = NULL;
3985 cObjs++;
3986 }
3987 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3988 if (cObjs)
3989 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
3990 }
3991
3992 /* call termination function if fully loaded. */
3993 if ( pImage->pfnModuleTerm
3994 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3995 {
3996 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3997#ifdef RT_WITH_W64_UNWIND_HACK
3998 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
3999#else
4000 pImage->pfnModuleTerm();
4001#endif
4002 }
4003
4004 /* free the image */
4005 pImage->cUsage = 0;
4006 pImage->pNext = 0;
4007 pImage->uState = SUP_IOCTL_LDR_FREE;
4008 RTMemExecFree(pImage);
4009}
4010
4011
4012/**
4013 * Implements the service call request.
4014 *
4015 * @returns VBox status code.
4016 * @param pDevExt The device extension.
4017 * @param pSession The calling session.
4018 * @param pReq The request packet, valid.
4019 */
4020static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
4021{
4022#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
4023 int rc;
4024
4025 /*
4026 * Find the module first in the module referenced by the calling session.
4027 */
4028 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
4029 if (RT_SUCCESS(rc))
4030 {
4031 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
4032 PSUPDRVLDRUSAGE pUsage;
4033
4034 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
4035 if ( pUsage->pImage->pfnServiceReqHandler
4036 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
4037 {
4038 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
4039 break;
4040 }
4041 RTSemFastMutexRelease(pDevExt->mtxLdr);
4042
4043 if (pfnServiceReqHandler)
4044 {
4045 /*
4046 * Call it.
4047 */
4048 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
4049#ifdef RT_WITH_W64_UNWIND_HACK
4050 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4051#else
4052 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4053#endif
4054 else
4055#ifdef RT_WITH_W64_UNWIND_HACK
4056 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
4057 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4058#else
4059 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4060#endif
4061 }
4062 else
4063 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
4064 }
4065
4066 /* log it */
4067 if ( RT_FAILURE(rc)
4068 && rc != VERR_INTERRUPTED
4069 && rc != VERR_TIMEOUT)
4070 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4071 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4072 else
4073 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4074 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4075 return rc;
4076#else /* RT_OS_WINDOWS && !DEBUG */
4077 return VERR_NOT_IMPLEMENTED;
4078#endif /* RT_OS_WINDOWS && !DEBUG */
4079}
4080
4081
4082/**
4083 * Implements the logger settings request.
4084 *
4085 * @returns VBox status code.
4086 * @param pDevExt The device extension.
4087 * @param pSession The caller's session.
4088 * @param pReq The request.
4089 */
4090static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq)
4091{
4092 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
4093 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
4094 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
4095 PRTLOGGER pLogger = NULL;
4096 int rc;
4097
4098 /*
4099 * Some further validation.
4100 */
4101 switch (pReq->u.In.fWhat)
4102 {
4103 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4104 case SUPLOGGERSETTINGS_WHAT_CREATE:
4105 break;
4106
4107 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4108 if (*pszGroup || *pszFlags || *pszDest)
4109 return VERR_INVALID_PARAMETER;
4110 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
4111 return VERR_ACCESS_DENIED;
4112 break;
4113
4114 default:
4115 return VERR_INTERNAL_ERROR;
4116 }
4117
4118 /*
4119 * Get the logger.
4120 */
4121 switch (pReq->u.In.fWhich)
4122 {
4123 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4124 pLogger = RTLogGetDefaultInstance();
4125 break;
4126
4127 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4128 pLogger = RTLogRelDefaultInstance();
4129 break;
4130
4131 default:
4132 return VERR_INTERNAL_ERROR;
4133 }
4134
4135 /*
4136 * Do the job.
4137 */
4138 switch (pReq->u.In.fWhat)
4139 {
4140 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4141 if (pLogger)
4142 {
4143 rc = RTLogFlags(pLogger, pszFlags);
4144 if (RT_SUCCESS(rc))
4145 rc = RTLogGroupSettings(pLogger, pszGroup);
4146 NOREF(pszDest);
4147 }
4148 else
4149 rc = VERR_NOT_FOUND;
4150 break;
4151
4152 case SUPLOGGERSETTINGS_WHAT_CREATE:
4153 {
4154 if (pLogger)
4155 rc = VERR_ALREADY_EXISTS;
4156 else
4157 {
4158 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
4159
4160 rc = RTLogCreate(&pLogger,
4161 0 /* fFlags */,
4162 pszGroup,
4163 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
4164 ? "VBOX_LOG"
4165 : "VBOX_RELEASE_LOG",
4166 RT_ELEMENTS(s_apszGroups),
4167 s_apszGroups,
4168 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
4169 NULL);
4170 if (RT_SUCCESS(rc))
4171 {
4172 rc = RTLogFlags(pLogger, pszFlags);
4173 NOREF(pszDest);
4174 if (RT_SUCCESS(rc))
4175 {
4176 switch (pReq->u.In.fWhich)
4177 {
4178 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4179 pLogger = RTLogSetDefaultInstance(pLogger);
4180 break;
4181 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4182 pLogger = RTLogRelSetDefaultInstance(pLogger);
4183 break;
4184 }
4185 }
4186 RTLogDestroy(pLogger);
4187 }
4188 }
4189 break;
4190 }
4191
4192 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4193 switch (pReq->u.In.fWhich)
4194 {
4195 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4196 pLogger = RTLogSetDefaultInstance(NULL);
4197 break;
4198 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4199 pLogger = RTLogRelSetDefaultInstance(NULL);
4200 break;
4201 }
4202 rc = RTLogDestroy(pLogger);
4203 break;
4204
4205 default:
4206 {
4207 rc = VERR_INTERNAL_ERROR;
4208 break;
4209 }
4210 }
4211
4212 return rc;
4213}
4214
4215
4216/**
4217 * Gets the paging mode of the current CPU.
4218 *
4219 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4220 */
4221SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4222{
4223 SUPPAGINGMODE enmMode;
4224
4225 RTR0UINTREG cr0 = ASMGetCR0();
4226 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4227 enmMode = SUPPAGINGMODE_INVALID;
4228 else
4229 {
4230 RTR0UINTREG cr4 = ASMGetCR4();
4231 uint32_t fNXEPlusLMA = 0;
4232 if (cr4 & X86_CR4_PAE)
4233 {
4234 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
4235 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
4236 {
4237 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4238 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4239 fNXEPlusLMA |= RT_BIT(0);
4240 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4241 fNXEPlusLMA |= RT_BIT(1);
4242 }
4243 }
4244
4245 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4246 {
4247 case 0:
4248 enmMode = SUPPAGINGMODE_32_BIT;
4249 break;
4250
4251 case X86_CR4_PGE:
4252 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4253 break;
4254
4255 case X86_CR4_PAE:
4256 enmMode = SUPPAGINGMODE_PAE;
4257 break;
4258
4259 case X86_CR4_PAE | RT_BIT(0):
4260 enmMode = SUPPAGINGMODE_PAE_NX;
4261 break;
4262
4263 case X86_CR4_PAE | X86_CR4_PGE:
4264 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4265 break;
4266
4267 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4268 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4269 break;
4270
4271 case RT_BIT(1) | X86_CR4_PAE:
4272 enmMode = SUPPAGINGMODE_AMD64;
4273 break;
4274
4275 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4276 enmMode = SUPPAGINGMODE_AMD64_NX;
4277 break;
4278
4279 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4280 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4281 break;
4282
4283 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4284 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4285 break;
4286
4287 default:
4288 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4289 enmMode = SUPPAGINGMODE_INVALID;
4290 break;
4291 }
4292 }
4293 return enmMode;
4294}
4295
4296
4297/**
4298 * Enables or disabled hardware virtualization extensions using native OS APIs.
4299 *
4300 * @returns VBox status code.
4301 * @retval VINF_SUCCESS on success.
4302 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4303 *
4304 * @param fEnable Whether to enable or disable.
4305 */
4306SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4307{
4308#ifdef RT_OS_DARWIN
4309 return supdrvOSEnableVTx(fEnable);
4310#else
4311 return VERR_NOT_SUPPORTED;
4312#endif
4313}
4314
4315
4316/**
4317 * Creates the GIP.
4318 *
4319 * @returns VBox status code.
4320 * @param pDevExt Instance data. GIP stuff may be updated.
4321 */
4322static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4323{
4324 PSUPGLOBALINFOPAGE pGip;
4325 RTHCPHYS HCPhysGip;
4326 uint32_t u32SystemResolution;
4327 uint32_t u32Interval;
4328 int rc;
4329
4330 LogFlow(("supdrvGipCreate:\n"));
4331
4332 /* assert order */
4333 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4334 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4335 Assert(!pDevExt->pGipTimer);
4336
4337 /*
4338 * Allocate a suitable page with a default kernel mapping.
4339 */
4340 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4341 if (RT_FAILURE(rc))
4342 {
4343 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4344 return rc;
4345 }
4346 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4347 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4348
4349#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4350 * It only applies to Windows and should probably revisited later, if possible made part of the
4351 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4352 /*
4353 * Try bump up the system timer resolution.
4354 * The more interrupts the better...
4355 */
4356 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4357 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4358 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4359 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4360 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4361 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4362 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4363 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4364 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4365 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4366 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4367 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4368 )
4369 {
4370 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4371 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4372 }
4373#endif
4374
4375 /*
4376 * Find a reasonable update interval and initialize the structure.
4377 */
4378 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4379 while (u32Interval < 10000000 /* 10 ms */)
4380 u32Interval += u32SystemResolution;
4381
4382 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4383
4384 /*
4385 * Create the timer.
4386 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4387 */
4388 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4389 {
4390 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4391 if (rc == VERR_NOT_SUPPORTED)
4392 {
4393 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4394 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4395 }
4396 }
4397 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4398 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4399 if (RT_SUCCESS(rc))
4400 {
4401 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4402 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4403 if (RT_SUCCESS(rc))
4404 {
4405 /*
4406 * We're good.
4407 */
4408 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4409 return VINF_SUCCESS;
4410 }
4411
4412 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4413 }
4414 else
4415 {
4416 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4417 Assert(!pDevExt->pGipTimer);
4418 }
4419 supdrvGipDestroy(pDevExt);
4420 return rc;
4421}
4422
4423
4424/**
4425 * Terminates the GIP.
4426 *
4427 * @param pDevExt Instance data. GIP stuff may be updated.
4428 */
4429static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4430{
4431 int rc;
4432#ifdef DEBUG_DARWIN_GIP
4433 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4434 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4435 pDevExt->pGipTimer, pDevExt->GipMemObj));
4436#endif
4437
4438 /*
4439 * Invalid the GIP data.
4440 */
4441 if (pDevExt->pGip)
4442 {
4443 supdrvGipTerm(pDevExt->pGip);
4444 pDevExt->pGip = NULL;
4445 }
4446
4447 /*
4448 * Destroy the timer and free the GIP memory object.
4449 */
4450 if (pDevExt->pGipTimer)
4451 {
4452 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4453 pDevExt->pGipTimer = NULL;
4454 }
4455
4456 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4457 {
4458 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4459 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4460 }
4461
4462 /*
4463 * Finally, release the system timer resolution request if one succeeded.
4464 */
4465 if (pDevExt->u32SystemTimerGranularityGrant)
4466 {
4467 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4468 pDevExt->u32SystemTimerGranularityGrant = 0;
4469 }
4470}
4471
4472
4473/**
4474 * Timer callback function sync GIP mode.
4475 * @param pTimer The timer.
4476 * @param pvUser The device extension.
4477 */
4478static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4479{
4480 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4481 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4482
4483 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4484
4485 ASMSetFlags(fOldFlags);
4486}
4487
4488
4489/**
4490 * Timer callback function for async GIP mode.
4491 * @param pTimer The timer.
4492 * @param pvUser The device extension.
4493 */
4494static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4495{
4496 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4497 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4498 RTCPUID idCpu = RTMpCpuId();
4499 uint64_t NanoTS = RTTimeSystemNanoTS();
4500
4501 /** @todo reset the transaction number and whatnot when iTick == 1. */
4502 if (pDevExt->idGipMaster == idCpu)
4503 supdrvGipUpdate(pDevExt->pGip, NanoTS);
4504 else
4505 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, ASMGetApicId());
4506
4507 ASMSetFlags(fOldFlags);
4508}
4509
4510
4511/**
4512 * Multiprocessor event notification callback.
4513 *
4514 * This is used to make sue that the GIP master gets passed on to
4515 * another CPU.
4516 *
4517 * @param enmEvent The event.
4518 * @param idCpu The cpu it applies to.
4519 * @param pvUser Pointer to the device extension.
4520 */
4521static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4522{
4523 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4524 if (enmEvent == RTMPEVENT_OFFLINE)
4525 {
4526 RTCPUID idGipMaster;
4527 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4528 if (idGipMaster == idCpu)
4529 {
4530 /*
4531 * Find a new GIP master.
4532 */
4533 bool fIgnored;
4534 unsigned i;
4535 RTCPUID idNewGipMaster = NIL_RTCPUID;
4536 RTCPUSET OnlineCpus;
4537 RTMpGetOnlineSet(&OnlineCpus);
4538
4539 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4540 {
4541 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4542 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4543 && idCurCpu != idGipMaster)
4544 {
4545 idNewGipMaster = idCurCpu;
4546 break;
4547 }
4548 }
4549
4550 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4551 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4552 NOREF(fIgnored);
4553 }
4554 }
4555}
4556
4557
4558/**
4559 * Initializes the GIP data.
4560 *
4561 * @returns IPRT status code.
4562 * @param pDevExt Pointer to the device instance data.
4563 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4564 * @param HCPhys The physical address of the GIP.
4565 * @param u64NanoTS The current nanosecond timestamp.
4566 * @param uUpdateHz The update freqence.
4567 */
4568int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4569{
4570 unsigned i;
4571#ifdef DEBUG_DARWIN_GIP
4572 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4573#else
4574 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4575#endif
4576
4577 /*
4578 * Initialize the structure.
4579 */
4580 memset(pGip, 0, PAGE_SIZE);
4581 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4582 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4583 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4584 pGip->u32UpdateHz = uUpdateHz;
4585 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4586 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4587
4588 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4589 {
4590 pGip->aCPUs[i].u32TransactionId = 2;
4591 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4592 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4593
4594 /*
4595 * We don't know the following values until we've executed updates.
4596 * So, we'll just insert very high values.
4597 */
4598 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4599 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4600 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4601 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4602 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4603 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4604 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4605 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4606 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4607 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4608 }
4609
4610 /*
4611 * Link it to the device extension.
4612 */
4613 pDevExt->pGip = pGip;
4614 pDevExt->HCPhysGip = HCPhys;
4615 pDevExt->cGipUsers = 0;
4616
4617 return VINF_SUCCESS;
4618}
4619
4620
4621/**
4622 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4623 *
4624 * @param idCpu Ignored.
4625 * @param pvUser1 Where to put the TSC.
4626 * @param pvUser2 Ignored.
4627 */
4628static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4629{
4630#if 1
4631 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4632#else
4633 *(uint64_t *)pvUser1 = ASMReadTSC();
4634#endif
4635}
4636
4637
4638/**
4639 * Determine if Async GIP mode is required because of TSC drift.
4640 *
4641 * When using the default/normal timer code it is essential that the time stamp counter
4642 * (TSC) runs never backwards, that is, a read operation to the counter should return
4643 * a bigger value than any previous read operation. This is guaranteed by the latest
4644 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4645 * case we have to choose the asynchronous timer mode.
4646 *
4647 * @param poffMin Pointer to the determined difference between different cores.
4648 * @return false if the time stamp counters appear to be synchron, true otherwise.
4649 */
4650bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4651{
4652 /*
4653 * Just iterate all the cpus 8 times and make sure that the TSC is
4654 * ever increasing. We don't bother taking TSC rollover into account.
4655 */
4656 RTCPUSET CpuSet;
4657 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4658 int iCpu;
4659 int cLoops = 8;
4660 bool fAsync = false;
4661 int rc = VINF_SUCCESS;
4662 uint64_t offMax = 0;
4663 uint64_t offMin = ~(uint64_t)0;
4664 uint64_t PrevTsc = ASMReadTSC();
4665
4666 while (cLoops-- > 0)
4667 {
4668 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4669 {
4670 uint64_t CurTsc;
4671 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4672 if (RT_SUCCESS(rc))
4673 {
4674 if (CurTsc <= PrevTsc)
4675 {
4676 fAsync = true;
4677 offMin = offMax = PrevTsc - CurTsc;
4678 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4679 iCpu, cLoops, CurTsc, PrevTsc));
4680 break;
4681 }
4682
4683 /* Gather statistics (except the first time). */
4684 if (iCpu != 0 || cLoops != 7)
4685 {
4686 uint64_t off = CurTsc - PrevTsc;
4687 if (off < offMin)
4688 offMin = off;
4689 if (off > offMax)
4690 offMax = off;
4691 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4692 }
4693
4694 /* Next */
4695 PrevTsc = CurTsc;
4696 }
4697 else if (rc == VERR_NOT_SUPPORTED)
4698 break;
4699 else
4700 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4701 }
4702
4703 /* broke out of the loop. */
4704 if (iCpu <= iLastCpu)
4705 break;
4706 }
4707
4708 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4709 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4710 fAsync, iLastCpu, rc, offMin, offMax));
4711#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4712 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4713#endif
4714 return fAsync;
4715}
4716
4717
4718/**
4719 * Determin the GIP TSC mode.
4720 *
4721 * @returns The most suitable TSC mode.
4722 * @param pDevExt Pointer to the device instance data.
4723 */
4724static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)
4725{
4726 /*
4727 * On SMP we're faced with two problems:
4728 * (1) There might be a skew between the CPU, so that cpu0
4729 * returns a TSC that is sligtly different from cpu1.
4730 * (2) Power management (and other things) may cause the TSC
4731 * to run at a non-constant speed, and cause the speed
4732 * to be different on the cpus. This will result in (1).
4733 *
4734 * So, on SMP systems we'll have to select the ASYNC update method
4735 * if there are symphoms of these problems.
4736 */
4737 if (RTMpGetCount() > 1)
4738 {
4739 uint32_t uEAX, uEBX, uECX, uEDX;
4740 uint64_t u64DiffCoresIgnored;
4741
4742 /* Permit the user and/or the OS specfic bits to force async mode. */
4743 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
4744 return SUPGIPMODE_ASYNC_TSC;
4745
4746 /* Try check for current differences between the cpus. */
4747 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))
4748 return SUPGIPMODE_ASYNC_TSC;
4749
4750 /*
4751 * If the CPU supports power management and is an AMD one we
4752 * won't trust it unless it has the TscInvariant bit is set.
4753 */
4754 /* Check for "AuthenticAMD" */
4755 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4756 if ( uEAX >= 1
4757 && uEBX == X86_CPUID_VENDOR_AMD_EBX
4758 && uECX == X86_CPUID_VENDOR_AMD_ECX
4759 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
4760 {
4761 /* Check for APM support and that TscInvariant is cleared. */
4762 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4763 if (uEAX >= 0x80000007)
4764 {
4765 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4766 if ( !(uEDX & RT_BIT(8))/* TscInvariant */
4767 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4768 return SUPGIPMODE_ASYNC_TSC;
4769 }
4770 }
4771 }
4772 return SUPGIPMODE_SYNC_TSC;
4773}
4774
4775
4776/**
4777 * Invalidates the GIP data upon termination.
4778 *
4779 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4780 */
4781void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4782{
4783 unsigned i;
4784 pGip->u32Magic = 0;
4785 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4786 {
4787 pGip->aCPUs[i].u64NanoTS = 0;
4788 pGip->aCPUs[i].u64TSC = 0;
4789 pGip->aCPUs[i].iTSCHistoryHead = 0;
4790 }
4791}
4792
4793
4794/**
4795 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4796 * updates all the per cpu data except the transaction id.
4797 *
4798 * @param pGip The GIP.
4799 * @param pGipCpu Pointer to the per cpu data.
4800 * @param u64NanoTS The current time stamp.
4801 */
4802static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4803{
4804 uint64_t u64TSC;
4805 uint64_t u64TSCDelta;
4806 uint32_t u32UpdateIntervalTSC;
4807 uint32_t u32UpdateIntervalTSCSlack;
4808 unsigned iTSCHistoryHead;
4809 uint64_t u64CpuHz;
4810
4811 /*
4812 * Update the NanoTS.
4813 */
4814 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4815
4816 /*
4817 * Calc TSC delta.
4818 */
4819 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4820 u64TSC = ASMReadTSC();
4821 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4822 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4823
4824 if (u64TSCDelta >> 32)
4825 {
4826 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4827 pGipCpu->cErrors++;
4828 }
4829
4830 /*
4831 * TSC History.
4832 */
4833 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4834
4835 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4836 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4837 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4838
4839 /*
4840 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4841 */
4842 if (pGip->u32UpdateHz >= 1000)
4843 {
4844 uint32_t u32;
4845 u32 = pGipCpu->au32TSCHistory[0];
4846 u32 += pGipCpu->au32TSCHistory[1];
4847 u32 += pGipCpu->au32TSCHistory[2];
4848 u32 += pGipCpu->au32TSCHistory[3];
4849 u32 >>= 2;
4850 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4851 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4852 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4853 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4854 u32UpdateIntervalTSC >>= 2;
4855 u32UpdateIntervalTSC += u32;
4856 u32UpdateIntervalTSC >>= 1;
4857
4858 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4859 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4860 }
4861 else if (pGip->u32UpdateHz >= 90)
4862 {
4863 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4864 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4865 u32UpdateIntervalTSC >>= 1;
4866
4867 /* value choosen on a 2GHz thinkpad running windows */
4868 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4869 }
4870 else
4871 {
4872 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4873
4874 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4875 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4876 }
4877 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4878
4879 /*
4880 * CpuHz.
4881 */
4882 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4883 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4884}
4885
4886
4887/**
4888 * Updates the GIP.
4889 *
4890 * @param pGip Pointer to the GIP.
4891 * @param u64NanoTS The current nanosecond timesamp.
4892 */
4893void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4894{
4895 /*
4896 * Determin the relevant CPU data.
4897 */
4898 PSUPGIPCPU pGipCpu;
4899 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4900 pGipCpu = &pGip->aCPUs[0];
4901 else
4902 {
4903 unsigned iCpu = ASMGetApicId();
4904 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4905 return;
4906 pGipCpu = &pGip->aCPUs[iCpu];
4907 }
4908
4909 /*
4910 * Start update transaction.
4911 */
4912 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4913 {
4914 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4915 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4916 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4917 pGipCpu->cErrors++;
4918 return;
4919 }
4920
4921 /*
4922 * Recalc the update frequency every 0x800th time.
4923 */
4924 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4925 {
4926 if (pGip->u64NanoTSLastUpdateHz)
4927 {
4928#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4929 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4930 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4931 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4932 {
4933 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4934 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4935 }
4936#endif
4937 }
4938 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4939 }
4940
4941 /*
4942 * Update the data.
4943 */
4944 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4945
4946 /*
4947 * Complete transaction.
4948 */
4949 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4950}
4951
4952
4953/**
4954 * Updates the per cpu GIP data for the calling cpu.
4955 *
4956 * @param pGip Pointer to the GIP.
4957 * @param u64NanoTS The current nanosecond timesamp.
4958 * @param iCpu The CPU index.
4959 */
4960void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4961{
4962 PSUPGIPCPU pGipCpu;
4963
4964 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4965 {
4966 pGipCpu = &pGip->aCPUs[iCpu];
4967
4968 /*
4969 * Start update transaction.
4970 */
4971 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4972 {
4973 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4974 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4975 pGipCpu->cErrors++;
4976 return;
4977 }
4978
4979 /*
4980 * Update the data.
4981 */
4982 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4983
4984 /*
4985 * Complete transaction.
4986 */
4987 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4988 }
4989}
4990
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette