VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 19167

Last change on this file since 19167 was 18850, checked in by vboxsync, 16 years ago

SUPDrv.c: 64-bit darwin selectors.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 188.4 KB
Line 
1/* $Revision: 18850 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/semaphore.h>
41#include <iprt/spinlock.h>
42#include <iprt/thread.h>
43#include <iprt/process.h>
44#include <iprt/mp.h>
45#include <iprt/power.h>
46#include <iprt/cpuset.h>
47#include <iprt/uuid.h>
48#include <VBox/param.h>
49#include <VBox/log.h>
50#include <VBox/err.h>
51#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
52# include <iprt/crc32.h>
53# include <iprt/net.h>
54#endif
55/* VBox/x86.h not compatible with the Linux kernel sources */
56#ifdef RT_OS_LINUX
57# define X86_CPUID_VENDOR_AMD_EBX 0x68747541
58# define X86_CPUID_VENDOR_AMD_ECX 0x444d4163
59# define X86_CPUID_VENDOR_AMD_EDX 0x69746e65
60#else
61# include <VBox/x86.h>
62#endif
63
64/*
65 * Logging assignments:
66 * Log - useful stuff, like failures.
67 * LogFlow - program flow, except the really noisy bits.
68 * Log2 - Cleanup.
69 * Log3 - Loader flow noise.
70 * Log4 - Call VMMR0 flow noise.
71 * Log5 - Native yet-to-be-defined noise.
72 * Log6 - Native ioctl flow noise.
73 *
74 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
75 * instanciation in log-vbox.c(pp).
76 */
77
78
79/*******************************************************************************
80* Defined Constants And Macros *
81*******************************************************************************/
82/* from x86.h - clashes with linux thus this duplication */
83#undef X86_CR0_PG
84#define X86_CR0_PG RT_BIT(31)
85#undef X86_CR0_PE
86#define X86_CR0_PE RT_BIT(0)
87#undef X86_CPUID_AMD_FEATURE_EDX_NX
88#define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)
89#undef MSR_K6_EFER
90#define MSR_K6_EFER 0xc0000080
91#undef MSR_K6_EFER_NXE
92#define MSR_K6_EFER_NXE RT_BIT(11)
93#undef MSR_K6_EFER_LMA
94#define MSR_K6_EFER_LMA RT_BIT(10)
95#undef X86_CR4_PGE
96#define X86_CR4_PGE RT_BIT(7)
97#undef X86_CR4_PAE
98#define X86_CR4_PAE RT_BIT(5)
99#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
100#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)
101
102
103/** The frequency by which we recalculate the u32UpdateHz and
104 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
105#define GIP_UPDATEHZ_RECALC_FREQ 0x800
106
107/**
108 * Validates a session pointer.
109 *
110 * @returns true/false accordingly.
111 * @param pSession The session.
112 */
113#define SUP_IS_SESSION_VALID(pSession) \
114 ( VALID_PTR(pSession) \
115 && pSession->u32Cookie == BIRD_INV)
116
117/** @def VBOX_SVN_REV
118 * The makefile should define this if it can. */
119#ifndef VBOX_SVN_REV
120# define VBOX_SVN_REV 0
121#endif
122
123/*******************************************************************************
124* Internal Functions *
125*******************************************************************************/
126static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
127static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
128static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
129static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
130static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
131static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
132static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
133static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
134static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
135static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
136static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
137static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
138static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq);
139static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);
140#ifdef RT_OS_WINDOWS
141static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
142static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
143#endif /* RT_OS_WINDOWS */
144static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
145static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
146static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
147static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
148static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
149
150#ifdef RT_WITH_W64_UNWIND_HACK
151DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
152DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, unsigned idCpu, unsigned uOperation);
153DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
154DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
155DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
156DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
157DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
158
159DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
160DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
161DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
162DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
163DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
164DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRefEx)(void *pvObj, PSUPDRVSESSION pSession, bool fNoPreempt);
165DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
166DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
167DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
168DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
169DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
170DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
171DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
172DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
173DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
174DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
175DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
176DECLASM(int) UNWIND_WRAP(SUPR0PageAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages);
177DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
178//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
179DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
180DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
181DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
182DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
183DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
184DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
185DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
186DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
187DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
188DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
189DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
190DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
191DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
192DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
193DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
194DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
195DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
196/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
197/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
198/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
199/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
200/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
201DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
202/* RTProcSelf - not necessary */
203/* RTR0ProcHandleSelf - not necessary */
204DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
205DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
206DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
207DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
208DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
209DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
210DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
211DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
212DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
213DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
214DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
215DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
216DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
217DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
218DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
219DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
220DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
221DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
222DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
223DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
224DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
225/* RTTimeNanoTS - not necessary */
226/* RTTimeMilliTS - not necessary */
227/* RTTimeSystemNanoTS - not necessary */
228/* RTTimeSystemMilliTS - not necessary */
229/* RTThreadNativeSelf - not necessary */
230DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
231DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
232#if 0
233/* RTThreadSelf - not necessary */
234DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
235 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
236DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
237DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
238DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
239DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
240DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
241DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
242DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
243DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
244DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
245DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
246#endif
247/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
248/* RTMpCpuId - not necessary */
249/* RTMpCpuIdFromSetIndex - not necessary */
250/* RTMpCpuIdToSetIndex - not necessary */
251/* RTMpIsCpuPossible - not necessary */
252/* RTMpGetCount - not necessary */
253/* RTMpGetMaxCpuId - not necessary */
254/* RTMpGetOnlineCount - not necessary */
255/* RTMpGetOnlineSet - not necessary */
256/* RTMpGetSet - not necessary */
257/* RTMpIsCpuOnline - not necessary */
258DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
259DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
260DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
261DECLASM(int) UNWIND_WRAP(RTMpIsCpuWorkPending)(void);
262/* RTLogRelDefaultInstance - not necessary. */
263DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
264/* RTLogLogger - can't wrap this buster. */
265/* RTLogLoggerEx - can't wrap this buster. */
266DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
267/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
268DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
269DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
270/* AssertMsg2 - can't wrap this buster. */
271#endif /* RT_WITH_W64_UNWIND_HACK */
272
273
274/*******************************************************************************
275* Global Variables *
276*******************************************************************************/
277/**
278 * Array of the R0 SUP API.
279 */
280static SUPFUNC g_aFunctions[] =
281{
282 /* name function */
283 /* Entries with absolute addresses determined at runtime, fixup
284 code makes ugly ASSUMPTIONS about the order here: */
285 { "SUPR0AbsIs64bit", (void *)0 },
286 { "SUPR0Abs64bitKernelCS", (void *)0 },
287 { "SUPR0Abs64bitKernelSS", (void *)0 },
288 { "SUPR0Abs64bitKernelDS", (void *)0 },
289 { "SUPR0AbsKernelCS", (void *)0 },
290 { "SUPR0AbsKernelSS", (void *)0 },
291 { "SUPR0AbsKernelDS", (void *)0 },
292 { "SUPR0AbsKernelES", (void *)0 },
293 { "SUPR0AbsKernelFS", (void *)0 },
294 { "SUPR0AbsKernelGS", (void *)0 },
295 /* Normal function pointers: */
296 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
297 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
298 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
299 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
300 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
301 { "SUPR0ObjAddRefEx", (void *)UNWIND_WRAP(SUPR0ObjAddRefEx) },
302 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
303 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
304 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
305 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
306 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
307 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
308 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
309 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
310 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
311 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
312 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
313 { "SUPR0PageAlloc", (void *)UNWIND_WRAP(SUPR0PageAlloc) },
314 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
315 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
316 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
317 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
318 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
319 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
320 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
321 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
322 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
323 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
324 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
325 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
326 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
327 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
328 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
329 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
330 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
331 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
332 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
333 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
334 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
335 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
336 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
337 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
338 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
339 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
340/* These don't work yet on linux - use fast mutexes!
341 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
342 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
343 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
344 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
345*/
346 { "RTProcSelf", (void *)RTProcSelf },
347 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
348 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
349 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
350 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
351 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
352 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
353 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
354 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
355 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
356 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
357 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
358 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
359 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
360 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
361 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
362 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
363 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
364 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
365 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
366 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
367 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
368 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
369 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
370 { "RTTimeMillieTS", (void *)RTTimeMilliTS },
371 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
372 { "RTTimeSystemMillieTS", (void *)RTTimeSystemMilliTS },
373 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
374 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
375 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
376#if 0 /* Thread APIs, Part 2. */
377 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
378 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
379 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
380 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
381 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
382 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
383 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
384 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
385 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
386 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
387 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
388 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
389#endif
390 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
391 { "RTMpCpuId", (void *)RTMpCpuId },
392 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
393 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
394 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
395 { "RTMpGetCount", (void *)RTMpGetCount },
396 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
397 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
398 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
399 { "RTMpGetSet", (void *)RTMpGetSet },
400 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
401 { "RTMpIsCpuWorkPending", (void *)UNWIND_WRAP(RTMpIsCpuWorkPending) },
402 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
403 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
404 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
405 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
406 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
407 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
408 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
409 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
410 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
411 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
412 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
413 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
414 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
415 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
416#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
417 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
418#endif
419#if defined(RT_OS_DARWIN)
420 { "RTAssertMsg1", (void *)RTAssertMsg1 },
421 { "RTAssertMsg2", (void *)RTAssertMsg2 },
422 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
423#endif
424};
425
426#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
427/**
428 * Drag in the rest of IRPT since we share it with the
429 * rest of the kernel modules on darwin.
430 */
431PFNRT g_apfnVBoxDrvIPRTDeps[] =
432{
433 (PFNRT)RTCrc32,
434 (PFNRT)RTErrConvertFromErrno,
435 (PFNRT)RTNetIPv4IsHdrValid,
436 (PFNRT)RTNetIPv4TCPChecksum,
437 (PFNRT)RTNetIPv4UDPChecksum,
438 (PFNRT)RTUuidCompare,
439 (PFNRT)RTUuidCompareStr,
440 (PFNRT)RTUuidFromStr,
441 NULL
442};
443#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
444
445
446/**
447 * Initializes the device extentsion structure.
448 *
449 * @returns IPRT status code.
450 * @param pDevExt The device extension to initialize.
451 */
452int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
453{
454 int rc;
455
456#ifdef SUPDRV_WITH_RELEASE_LOGGER
457 /*
458 * Create the release log.
459 */
460 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
461 PRTLOGGER pRelLogger;
462 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
463 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
464 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
465 if (RT_SUCCESS(rc))
466 RTLogRelSetDefaultInstance(pRelLogger);
467#endif
468
469 /*
470 * Initialize it.
471 */
472 memset(pDevExt, 0, sizeof(*pDevExt));
473 rc = RTSpinlockCreate(&pDevExt->Spinlock);
474 if (!rc)
475 {
476 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
477 if (!rc)
478 {
479 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
480 if (!rc)
481 {
482 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
483 if (!rc)
484 {
485 rc = supdrvGipCreate(pDevExt);
486 if (RT_SUCCESS(rc))
487 {
488 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
489
490 /*
491 * Fixup the absolute symbols.
492 *
493 * Because of the table indexing assumptions we'll have a little #ifdef orgy
494 * here rather than distributing this to OS specific files. At least for now.
495 */
496#ifdef RT_OS_DARWIN
497# if ARCH_BITS == 32
498 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
499 {
500 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
501 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
502 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
503 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
504 }
505 else
506 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
507 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
508 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
509 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
510 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
511 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
512 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
513# else /* 64-bit darwin: */
514 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
515 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
516 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
517 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
518 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
519 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
520 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
521 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
522 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
523 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
524
525# endif
526#else /* !RT_OS_DARWIN */
527# if ARCH_BITS == 64
528 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
529 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
530 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
531 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
532# else
533 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
534# endif
535 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
536 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
537 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
538 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
539 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
540 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
541#endif /* !RT_OS_DARWIN */
542 return VINF_SUCCESS;
543 }
544
545 RTSemFastMutexDestroy(pDevExt->mtxGip);
546 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
547 }
548 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
549 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
550 }
551 RTSemFastMutexDestroy(pDevExt->mtxLdr);
552 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
553 }
554 RTSpinlockDestroy(pDevExt->Spinlock);
555 pDevExt->Spinlock = NIL_RTSPINLOCK;
556 }
557#ifdef SUPDRV_WITH_RELEASE_LOGGER
558 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
559 RTLogDestroy(RTLogSetDefaultInstance(NULL));
560#endif
561
562 return rc;
563}
564
565
566/**
567 * Delete the device extension (e.g. cleanup members).
568 *
569 * @param pDevExt The device extension to delete.
570 */
571void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
572{
573 PSUPDRVOBJ pObj;
574 PSUPDRVUSAGE pUsage;
575
576 /*
577 * Kill mutexes and spinlocks.
578 */
579 RTSemFastMutexDestroy(pDevExt->mtxGip);
580 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
581 RTSemFastMutexDestroy(pDevExt->mtxLdr);
582 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
583 RTSpinlockDestroy(pDevExt->Spinlock);
584 pDevExt->Spinlock = NIL_RTSPINLOCK;
585 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
586 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
587
588 /*
589 * Free lists.
590 */
591 /* objects. */
592 pObj = pDevExt->pObjs;
593#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
594 Assert(!pObj); /* (can trigger on forced unloads) */
595#endif
596 pDevExt->pObjs = NULL;
597 while (pObj)
598 {
599 void *pvFree = pObj;
600 pObj = pObj->pNext;
601 RTMemFree(pvFree);
602 }
603
604 /* usage records. */
605 pUsage = pDevExt->pUsageFree;
606 pDevExt->pUsageFree = NULL;
607 while (pUsage)
608 {
609 void *pvFree = pUsage;
610 pUsage = pUsage->pNext;
611 RTMemFree(pvFree);
612 }
613
614 /* kill the GIP. */
615 supdrvGipDestroy(pDevExt);
616
617#ifdef SUPDRV_WITH_RELEASE_LOGGER
618 /* destroy the loggers. */
619 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
620 RTLogDestroy(RTLogSetDefaultInstance(NULL));
621#endif
622}
623
624
625/**
626 * Create session.
627 *
628 * @returns IPRT status code.
629 * @param pDevExt Device extension.
630 * @param fUser Flag indicating whether this is a user or kernel session.
631 * @param ppSession Where to store the pointer to the session data.
632 */
633int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
634{
635 /*
636 * Allocate memory for the session data.
637 */
638 int rc = VERR_NO_MEMORY;
639 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
640 if (pSession)
641 {
642 /* Initialize session data. */
643 rc = RTSpinlockCreate(&pSession->Spinlock);
644 if (!rc)
645 {
646 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
647 pSession->pDevExt = pDevExt;
648 pSession->u32Cookie = BIRD_INV;
649 /*pSession->pLdrUsage = NULL;
650 pSession->pVM = NULL;
651 pSession->pUsage = NULL;
652 pSession->pGip = NULL;
653 pSession->fGipReferenced = false;
654 pSession->Bundle.cUsed = 0; */
655 pSession->Uid = NIL_RTUID;
656 pSession->Gid = NIL_RTGID;
657 if (fUser)
658 {
659 pSession->Process = RTProcSelf();
660 pSession->R0Process = RTR0ProcHandleSelf();
661 }
662 else
663 {
664 pSession->Process = NIL_RTPROCESS;
665 pSession->R0Process = NIL_RTR0PROCESS;
666 }
667
668 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
669 return VINF_SUCCESS;
670 }
671
672 RTMemFree(pSession);
673 *ppSession = NULL;
674 Log(("Failed to create spinlock, rc=%d!\n", rc));
675 }
676
677 return rc;
678}
679
680
681/**
682 * Shared code for cleaning up a session.
683 *
684 * @param pDevExt Device extension.
685 * @param pSession Session data.
686 * This data will be freed by this routine.
687 */
688void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
689{
690 /*
691 * Cleanup the session first.
692 */
693 supdrvCleanupSession(pDevExt, pSession);
694
695 /*
696 * Free the rest of the session stuff.
697 */
698 RTSpinlockDestroy(pSession->Spinlock);
699 pSession->Spinlock = NIL_RTSPINLOCK;
700 pSession->pDevExt = NULL;
701 RTMemFree(pSession);
702 LogFlow(("supdrvCloseSession: returns\n"));
703}
704
705
706/**
707 * Shared code for cleaning up a session (but not quite freeing it).
708 *
709 * This is primarily intended for MAC OS X where we have to clean up the memory
710 * stuff before the file handle is closed.
711 *
712 * @param pDevExt Device extension.
713 * @param pSession Session data.
714 * This data will be freed by this routine.
715 */
716void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
717{
718 PSUPDRVBUNDLE pBundle;
719 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
720
721 /*
722 * Remove logger instances related to this session.
723 */
724 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
725
726 /*
727 * Release object references made in this session.
728 * In theory there should be noone racing us in this session.
729 */
730 Log2(("release objects - start\n"));
731 if (pSession->pUsage)
732 {
733 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
734 PSUPDRVUSAGE pUsage;
735 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
736
737 while ((pUsage = pSession->pUsage) != NULL)
738 {
739 PSUPDRVOBJ pObj = pUsage->pObj;
740 pSession->pUsage = pUsage->pNext;
741
742 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
743 if (pUsage->cUsage < pObj->cUsage)
744 {
745 pObj->cUsage -= pUsage->cUsage;
746 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
747 }
748 else
749 {
750 /* Destroy the object and free the record. */
751 if (pDevExt->pObjs == pObj)
752 pDevExt->pObjs = pObj->pNext;
753 else
754 {
755 PSUPDRVOBJ pObjPrev;
756 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
757 if (pObjPrev->pNext == pObj)
758 {
759 pObjPrev->pNext = pObj->pNext;
760 break;
761 }
762 Assert(pObjPrev);
763 }
764 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
765
766 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
767 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
768 if (pObj->pfnDestructor)
769#ifdef RT_WITH_W64_UNWIND_HACK
770 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
771#else
772 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
773#endif
774 RTMemFree(pObj);
775 }
776
777 /* free it and continue. */
778 RTMemFree(pUsage);
779
780 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
781 }
782
783 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
784 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
785 }
786 Log2(("release objects - done\n"));
787
788 /*
789 * Release memory allocated in the session.
790 *
791 * We do not serialize this as we assume that the application will
792 * not allocated memory while closing the file handle object.
793 */
794 Log2(("freeing memory:\n"));
795 pBundle = &pSession->Bundle;
796 while (pBundle)
797 {
798 PSUPDRVBUNDLE pToFree;
799 unsigned i;
800
801 /*
802 * Check and unlock all entries in the bundle.
803 */
804 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
805 {
806 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
807 {
808 int rc;
809 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
810 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
811 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
812 {
813 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
814 AssertRC(rc); /** @todo figure out how to handle this. */
815 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
816 }
817 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
818 AssertRC(rc); /** @todo figure out how to handle this. */
819 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
820 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
821 }
822 }
823
824 /*
825 * Advance and free previous bundle.
826 */
827 pToFree = pBundle;
828 pBundle = pBundle->pNext;
829
830 pToFree->pNext = NULL;
831 pToFree->cUsed = 0;
832 if (pToFree != &pSession->Bundle)
833 RTMemFree(pToFree);
834 }
835 Log2(("freeing memory - done\n"));
836
837 /*
838 * Deregister component factories.
839 */
840 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
841 Log2(("deregistering component factories:\n"));
842 if (pDevExt->pComponentFactoryHead)
843 {
844 PSUPDRVFACTORYREG pPrev = NULL;
845 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
846 while (pCur)
847 {
848 if (pCur->pSession == pSession)
849 {
850 /* unlink it */
851 PSUPDRVFACTORYREG pNext = pCur->pNext;
852 if (pPrev)
853 pPrev->pNext = pNext;
854 else
855 pDevExt->pComponentFactoryHead = pNext;
856
857 /* free it */
858 pCur->pNext = NULL;
859 pCur->pSession = NULL;
860 pCur->pFactory = NULL;
861 RTMemFree(pCur);
862
863 /* next */
864 pCur = pNext;
865 }
866 else
867 {
868 /* next */
869 pPrev = pCur;
870 pCur = pCur->pNext;
871 }
872 }
873 }
874 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
875 Log2(("deregistering component factories - done\n"));
876
877 /*
878 * Loaded images needs to be dereferenced and possibly freed up.
879 */
880 RTSemFastMutexRequest(pDevExt->mtxLdr);
881 Log2(("freeing images:\n"));
882 if (pSession->pLdrUsage)
883 {
884 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
885 pSession->pLdrUsage = NULL;
886 while (pUsage)
887 {
888 void *pvFree = pUsage;
889 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
890 if (pImage->cUsage > pUsage->cUsage)
891 pImage->cUsage -= pUsage->cUsage;
892 else
893 supdrvLdrFree(pDevExt, pImage);
894 pUsage->pImage = NULL;
895 pUsage = pUsage->pNext;
896 RTMemFree(pvFree);
897 }
898 }
899 RTSemFastMutexRelease(pDevExt->mtxLdr);
900 Log2(("freeing images - done\n"));
901
902 /*
903 * Unmap the GIP.
904 */
905 Log2(("umapping GIP:\n"));
906 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
907 {
908 SUPR0GipUnmap(pSession);
909 pSession->fGipReferenced = 0;
910 }
911 Log2(("umapping GIP - done\n"));
912}
913
914
915/**
916 * Fast path I/O Control worker.
917 *
918 * @returns VBox status code that should be passed down to ring-3 unchanged.
919 * @param uIOCtl Function number.
920 * @param idCpu VMCPU id.
921 * @param pDevExt Device extention.
922 * @param pSession Session data.
923 */
924int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, unsigned idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
925{
926 /*
927 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
928 */
929 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
930 {
931 switch (uIOCtl)
932 {
933 case SUP_IOCTL_FAST_DO_RAW_RUN:
934#ifdef RT_WITH_W64_UNWIND_HACK
935 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
936#else
937 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
938#endif
939 break;
940 case SUP_IOCTL_FAST_DO_HWACC_RUN:
941#ifdef RT_WITH_W64_UNWIND_HACK
942 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
943#else
944 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
945#endif
946 break;
947 case SUP_IOCTL_FAST_DO_NOP:
948#ifdef RT_WITH_W64_UNWIND_HACK
949 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
950#else
951 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
952#endif
953 break;
954 default:
955 return VERR_INTERNAL_ERROR;
956 }
957 return VINF_SUCCESS;
958 }
959 return VERR_INTERNAL_ERROR;
960}
961
962
963/**
964 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
965 * We would use strpbrk here if this function would be contained in the RedHat kABI white
966 * list, see http://www.kerneldrivers.org/RHEL5.
967 *
968 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
969 * @param pszStr String to check
970 * @param pszChars Character set
971 */
972static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
973{
974 int chCur;
975 while ((chCur = *pszStr++) != '\0')
976 {
977 int ch;
978 const char *psz = pszChars;
979 while ((ch = *psz++) != '\0')
980 if (ch == chCur)
981 return 1;
982
983 }
984 return 0;
985}
986
987
988/**
989 * I/O Control worker.
990 *
991 * @returns 0 on success.
992 * @returns VERR_INVALID_PARAMETER if the request is invalid.
993 *
994 * @param uIOCtl Function number.
995 * @param pDevExt Device extention.
996 * @param pSession Session data.
997 * @param pReqHdr The request header.
998 */
999int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1000{
1001 /*
1002 * Validate the request.
1003 */
1004 /* this first check could probably be omitted as its also done by the OS specific code... */
1005 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
1006 || pReqHdr->cbIn < sizeof(*pReqHdr)
1007 || pReqHdr->cbOut < sizeof(*pReqHdr)))
1008 {
1009 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
1010 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
1011 return VERR_INVALID_PARAMETER;
1012 }
1013 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
1014 {
1015 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
1016 {
1017 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
1018 return VERR_INVALID_PARAMETER;
1019 }
1020 }
1021 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
1022 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
1023 {
1024 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
1025 return VERR_INVALID_PARAMETER;
1026 }
1027
1028/*
1029 * Validation macros
1030 */
1031#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1032 do { \
1033 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1034 { \
1035 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1036 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1037 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1038 } \
1039 } while (0)
1040
1041#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1042
1043#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1044 do { \
1045 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1046 { \
1047 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1048 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1049 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1050 } \
1051 } while (0)
1052
1053#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1054 do { \
1055 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1056 { \
1057 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1058 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1059 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1060 } \
1061 } while (0)
1062
1063#define REQ_CHECK_EXPR(Name, expr) \
1064 do { \
1065 if (RT_UNLIKELY(!(expr))) \
1066 { \
1067 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1068 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1069 } \
1070 } while (0)
1071
1072#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1073 do { \
1074 if (RT_UNLIKELY(!(expr))) \
1075 { \
1076 OSDBGPRINT( fmt ); \
1077 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1078 } \
1079 } while (0)
1080
1081
1082 /*
1083 * The switch.
1084 */
1085 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1086 {
1087 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1088 {
1089 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1090 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1091 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1092 {
1093 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1094 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1095 return 0;
1096 }
1097
1098#if 0
1099 /*
1100 * Call out to the OS specific code and let it do permission checks on the
1101 * client process.
1102 */
1103 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1104 {
1105 pReq->u.Out.u32Cookie = 0xffffffff;
1106 pReq->u.Out.u32SessionCookie = 0xffffffff;
1107 pReq->u.Out.u32SessionVersion = 0xffffffff;
1108 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1109 pReq->u.Out.pSession = NULL;
1110 pReq->u.Out.cFunctions = 0;
1111 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1112 return 0;
1113 }
1114#endif
1115
1116 /*
1117 * Match the version.
1118 * The current logic is very simple, match the major interface version.
1119 */
1120 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1121 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1122 {
1123 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1124 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1125 pReq->u.Out.u32Cookie = 0xffffffff;
1126 pReq->u.Out.u32SessionCookie = 0xffffffff;
1127 pReq->u.Out.u32SessionVersion = 0xffffffff;
1128 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1129 pReq->u.Out.pSession = NULL;
1130 pReq->u.Out.cFunctions = 0;
1131 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1132 return 0;
1133 }
1134
1135 /*
1136 * Fill in return data and be gone.
1137 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1138 * u32SessionVersion <= u32ReqVersion!
1139 */
1140 /** @todo Somehow validate the client and negotiate a secure cookie... */
1141 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1142 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1143 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1144 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1145 pReq->u.Out.pSession = pSession;
1146 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1147 pReq->Hdr.rc = VINF_SUCCESS;
1148 return 0;
1149 }
1150
1151 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1152 {
1153 /* validate */
1154 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1155 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1156
1157 /* execute */
1158 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1159 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1160 pReq->Hdr.rc = VINF_SUCCESS;
1161 return 0;
1162 }
1163
1164 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
1165 {
1166 /* validate */
1167 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
1168 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
1169
1170 /* execute */
1171 pReq->u.Out.u8Idt = 3;
1172 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1173 return 0;
1174 }
1175
1176 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
1177 {
1178 /* validate */
1179 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
1180 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
1181
1182 /* execute */
1183 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1184 return 0;
1185 }
1186
1187 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1188 {
1189 /* validate */
1190 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1191 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1192 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1193 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1194 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1195
1196 /* execute */
1197 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1198 if (RT_FAILURE(pReq->Hdr.rc))
1199 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1200 return 0;
1201 }
1202
1203 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1204 {
1205 /* validate */
1206 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1207 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1208
1209 /* execute */
1210 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1211 return 0;
1212 }
1213
1214 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1215 {
1216 /* validate */
1217 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1218 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1219
1220 /* execute */
1221 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1222 if (RT_FAILURE(pReq->Hdr.rc))
1223 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1224 return 0;
1225 }
1226
1227 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1228 {
1229 /* validate */
1230 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1231 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1232
1233 /* execute */
1234 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1235 return 0;
1236 }
1237
1238 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1239 {
1240 /* validate */
1241 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1242 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1243 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1244 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1245 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1246 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1247 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1248
1249 /* execute */
1250 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1251 return 0;
1252 }
1253
1254 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1255 {
1256 /* validate */
1257 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1258 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1259 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1260 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1261 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1262 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1263 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1264 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1265 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1266 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1267 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1268 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1269 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1270 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1271 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1272
1273 if (pReq->u.In.cSymbols)
1274 {
1275 uint32_t i;
1276 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1277 for (i = 0; i < pReq->u.In.cSymbols; i++)
1278 {
1279 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1280 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1281 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1282 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1283 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1284 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1285 }
1286 }
1287
1288 /* execute */
1289 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1290 return 0;
1291 }
1292
1293 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1294 {
1295 /* validate */
1296 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1297 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1298
1299 /* execute */
1300 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1301 return 0;
1302 }
1303
1304 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1305 {
1306 /* validate */
1307 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1308 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1309 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1310
1311 /* execute */
1312 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1313 return 0;
1314 }
1315
1316 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1317 {
1318 /* validate */
1319 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1320 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1321 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1322
1323 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1324 {
1325 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1326
1327 /* execute */
1328 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1329#ifdef RT_WITH_W64_UNWIND_HACK
1330 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1331#else
1332 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1333#endif
1334 else
1335 pReq->Hdr.rc = VERR_WRONG_ORDER;
1336 }
1337 else
1338 {
1339 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1340 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1341 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1342 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1343 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1344
1345 /* execute */
1346 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1347#ifdef RT_WITH_W64_UNWIND_HACK
1348 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1349#else
1350 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1351#endif
1352 else
1353 pReq->Hdr.rc = VERR_WRONG_ORDER;
1354 }
1355
1356 if ( RT_FAILURE(pReq->Hdr.rc)
1357 && pReq->Hdr.rc != VERR_INTERRUPTED
1358 && pReq->Hdr.rc != VERR_TIMEOUT)
1359 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1360 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1361 else
1362 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1363 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1364 return 0;
1365 }
1366
1367 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1368 {
1369 /* validate */
1370 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1371 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1372
1373 /* execute */
1374 pReq->Hdr.rc = VINF_SUCCESS;
1375 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1376 return 0;
1377 }
1378
1379 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1380 {
1381 /* validate */
1382 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1383 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1384 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1385
1386 /* execute */
1387 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1388 if (RT_FAILURE(pReq->Hdr.rc))
1389 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1390 return 0;
1391 }
1392
1393 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1394 {
1395 /* validate */
1396 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1397 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1398
1399 /* execute */
1400 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1401 return 0;
1402 }
1403
1404 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1405 {
1406 /* validate */
1407 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1408 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1409
1410 /* execute */
1411 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1412 if (RT_SUCCESS(pReq->Hdr.rc))
1413 pReq->u.Out.pGipR0 = pDevExt->pGip;
1414 return 0;
1415 }
1416
1417 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1418 {
1419 /* validate */
1420 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1421 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1422
1423 /* execute */
1424 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1425 return 0;
1426 }
1427
1428 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1429 {
1430 /* validate */
1431 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1432 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1433 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1434 || ( VALID_PTR(pReq->u.In.pVMR0)
1435 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1436 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1437 /* execute */
1438 pSession->pVM = pReq->u.In.pVMR0;
1439 pReq->Hdr.rc = VINF_SUCCESS;
1440 return 0;
1441 }
1442
1443 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1444 {
1445 /* validate */
1446 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1447 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1448 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1449
1450 /* execute */
1451 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1452 if (RT_FAILURE(pReq->Hdr.rc))
1453 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1454 return 0;
1455 }
1456
1457 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1458 {
1459 /* validate */
1460 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1461 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1462 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1463 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1464 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1465 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1466 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1467 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1468 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1469
1470 /* execute */
1471 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1472 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1473 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1474 &pReq->u.Out.aPages[0]);
1475 if (RT_FAILURE(pReq->Hdr.rc))
1476 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1477 return 0;
1478 }
1479
1480 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1481 {
1482 /* validate */
1483 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1484 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1485 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1486 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1487 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1488 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1489
1490 /* execute */
1491 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1492 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1493 if (RT_FAILURE(pReq->Hdr.rc))
1494 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1495 return 0;
1496 }
1497
1498 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1499 {
1500 /* validate */
1501 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1502 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1503
1504 /* execute */
1505 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1506 return 0;
1507 }
1508
1509 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1510 {
1511 /* validate */
1512 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1513 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1514 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1515
1516 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1517 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1518 else
1519 {
1520 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1521 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1522 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1523 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1524 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1525 }
1526 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1527
1528 /* execute */
1529 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1530 return 0;
1531 }
1532
1533 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS(0)):
1534 {
1535 /* validate */
1536 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
1537 size_t cbStrTab;
1538 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
1539 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
1540 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
1541 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
1542 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
1543 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
1544 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
1545 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
1546 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
1547 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
1548 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
1549
1550 /* execute */
1551 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pDevExt, pSession, pReq);
1552 return 0;
1553 }
1554
1555 default:
1556 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1557 break;
1558 }
1559 return SUPDRV_ERR_GENERAL_FAILURE;
1560}
1561
1562
1563/**
1564 * Inter-Driver Communcation (IDC) worker.
1565 *
1566 * @returns VBox status code.
1567 * @retval VINF_SUCCESS on success.
1568 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1569 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1570 *
1571 * @param uReq The request (function) code.
1572 * @param pDevExt Device extention.
1573 * @param pSession Session data.
1574 * @param pReqHdr The request header.
1575 */
1576int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1577{
1578 /*
1579 * The OS specific code has already validated the pSession
1580 * pointer, and the request size being greater or equal to
1581 * size of the header.
1582 *
1583 * So, just check that pSession is a kernel context session.
1584 */
1585 if (RT_UNLIKELY( pSession
1586 && pSession->R0Process != NIL_RTR0PROCESS))
1587 return VERR_INVALID_PARAMETER;
1588
1589/*
1590 * Validation macro.
1591 */
1592#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1593 do { \
1594 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1595 { \
1596 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1597 (long)pReqHdr->cb, (long)(cbExpect))); \
1598 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1599 } \
1600 } while (0)
1601
1602 switch (uReq)
1603 {
1604 case SUPDRV_IDC_REQ_CONNECT:
1605 {
1606 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1607 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1608
1609 /*
1610 * Validate the cookie and other input.
1611 */
1612 if (pReq->Hdr.pSession != NULL)
1613 {
1614 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1615 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1616 }
1617 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1618 {
1619 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1620 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1621 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1622 }
1623 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1624 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1625 {
1626 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1627 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1628 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1629 }
1630
1631 /*
1632 * Match the version.
1633 * The current logic is very simple, match the major interface version.
1634 */
1635 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1636 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1637 {
1638 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1639 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
1640 pReq->u.Out.pSession = NULL;
1641 pReq->u.Out.uSessionVersion = 0xffffffff;
1642 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1643 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1644 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1645 return VINF_SUCCESS;
1646 }
1647
1648 pReq->u.Out.pSession = NULL;
1649 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1650 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1651 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1652
1653 /*
1654 * On NT we will already have a session associated with the
1655 * client, just like with the SUP_IOCTL_COOKIE request, while
1656 * the other doesn't.
1657 */
1658#ifdef RT_OS_WINDOWS
1659 pReq->Hdr.rc = VINF_SUCCESS;
1660#else
1661 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1662 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1663 if (RT_FAILURE(pReq->Hdr.rc))
1664 {
1665 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1666 return VINF_SUCCESS;
1667 }
1668#endif
1669
1670 pReq->u.Out.pSession = pSession;
1671 pReq->Hdr.pSession = pSession;
1672
1673 return VINF_SUCCESS;
1674 }
1675
1676 case SUPDRV_IDC_REQ_DISCONNECT:
1677 {
1678 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1679
1680#ifdef RT_OS_WINDOWS
1681 /* Windows will destroy the session when the file object is destroyed. */
1682#else
1683 supdrvCloseSession(pDevExt, pSession);
1684#endif
1685 return pReqHdr->rc = VINF_SUCCESS;
1686 }
1687
1688 case SUPDRV_IDC_REQ_GET_SYMBOL:
1689 {
1690 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1691 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1692
1693 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1694 return VINF_SUCCESS;
1695 }
1696
1697 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1698 {
1699 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1700 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1701
1702 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1703 return VINF_SUCCESS;
1704 }
1705
1706 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1707 {
1708 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1709 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1710
1711 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1712 return VINF_SUCCESS;
1713 }
1714
1715 default:
1716 Log(("Unknown IDC %#lx\n", (long)uReq));
1717 break;
1718 }
1719
1720#undef REQ_CHECK_IDC_SIZE
1721 return VERR_NOT_SUPPORTED;
1722}
1723
1724
1725/**
1726 * Register a object for reference counting.
1727 * The object is registered with one reference in the specified session.
1728 *
1729 * @returns Unique identifier on success (pointer).
1730 * All future reference must use this identifier.
1731 * @returns NULL on failure.
1732 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1733 * @param pvUser1 The first user argument.
1734 * @param pvUser2 The second user argument.
1735 */
1736SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1737{
1738 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1739 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1740 PSUPDRVOBJ pObj;
1741 PSUPDRVUSAGE pUsage;
1742
1743 /*
1744 * Validate the input.
1745 */
1746 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1747 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1748 AssertPtrReturn(pfnDestructor, NULL);
1749
1750 /*
1751 * Allocate and initialize the object.
1752 */
1753 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1754 if (!pObj)
1755 return NULL;
1756 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1757 pObj->enmType = enmType;
1758 pObj->pNext = NULL;
1759 pObj->cUsage = 1;
1760 pObj->pfnDestructor = pfnDestructor;
1761 pObj->pvUser1 = pvUser1;
1762 pObj->pvUser2 = pvUser2;
1763 pObj->CreatorUid = pSession->Uid;
1764 pObj->CreatorGid = pSession->Gid;
1765 pObj->CreatorProcess= pSession->Process;
1766 supdrvOSObjInitCreator(pObj, pSession);
1767
1768 /*
1769 * Allocate the usage record.
1770 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
1771 */
1772 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1773
1774 pUsage = pDevExt->pUsageFree;
1775 if (pUsage)
1776 pDevExt->pUsageFree = pUsage->pNext;
1777 else
1778 {
1779 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1780 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1781 if (!pUsage)
1782 {
1783 RTMemFree(pObj);
1784 return NULL;
1785 }
1786 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1787 }
1788
1789 /*
1790 * Insert the object and create the session usage record.
1791 */
1792 /* The object. */
1793 pObj->pNext = pDevExt->pObjs;
1794 pDevExt->pObjs = pObj;
1795
1796 /* The session record. */
1797 pUsage->cUsage = 1;
1798 pUsage->pObj = pObj;
1799 pUsage->pNext = pSession->pUsage;
1800 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1801 pSession->pUsage = pUsage;
1802
1803 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1804
1805 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1806 return pObj;
1807}
1808
1809
1810/**
1811 * Increment the reference counter for the object associating the reference
1812 * with the specified session.
1813 *
1814 * @returns IPRT status code.
1815 * @param pvObj The identifier returned by SUPR0ObjRegister().
1816 * @param pSession The session which is referencing the object.
1817 *
1818 * @remarks The caller should not own any spinlocks and must carefully protect
1819 * itself against potential race with the destructor so freed memory
1820 * isn't accessed here.
1821 */
1822SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1823{
1824 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
1825}
1826
1827
1828/**
1829 * Increment the reference counter for the object associating the reference
1830 * with the specified session.
1831 *
1832 * @returns IPRT status code.
1833 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
1834 * couldn't be allocated. (If you see this you're not doing the right
1835 * thing and it won't ever work reliably.)
1836 *
1837 * @param pvObj The identifier returned by SUPR0ObjRegister().
1838 * @param pSession The session which is referencing the object.
1839 * @param fNoBlocking Set if it's not OK to block. Never try to make the
1840 * first reference to an object in a session with this
1841 * argument set.
1842 *
1843 * @remarks The caller should not own any spinlocks and must carefully protect
1844 * itself against potential race with the destructor so freed memory
1845 * isn't accessed here.
1846 */
1847SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
1848{
1849 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1850 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1851 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1852 int rc = VINF_SUCCESS;
1853 PSUPDRVUSAGE pUsagePre;
1854 PSUPDRVUSAGE pUsage;
1855
1856 /*
1857 * Validate the input.
1858 * Be ready for the destruction race (someone might be stuck in the
1859 * destructor waiting a lock we own).
1860 */
1861 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1862 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
1863 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
1864 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
1865 VERR_INVALID_PARAMETER);
1866
1867 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1868
1869 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1870 {
1871 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1872
1873 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1874 return VERR_WRONG_ORDER;
1875 }
1876
1877 /*
1878 * Preallocate the usage record if we can.
1879 */
1880 pUsagePre = pDevExt->pUsageFree;
1881 if (pUsagePre)
1882 pDevExt->pUsageFree = pUsagePre->pNext;
1883 else if (!fNoBlocking)
1884 {
1885 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1886 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1887 if (!pUsagePre)
1888 return VERR_NO_MEMORY;
1889
1890 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1891 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1892 {
1893 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1894
1895 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1896 return VERR_WRONG_ORDER;
1897 }
1898 }
1899
1900 /*
1901 * Reference the object.
1902 */
1903 pObj->cUsage++;
1904
1905 /*
1906 * Look for the session record.
1907 */
1908 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1909 {
1910 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1911 if (pUsage->pObj == pObj)
1912 break;
1913 }
1914 if (pUsage)
1915 pUsage->cUsage++;
1916 else if (pUsagePre)
1917 {
1918 /* create a new session record. */
1919 pUsagePre->cUsage = 1;
1920 pUsagePre->pObj = pObj;
1921 pUsagePre->pNext = pSession->pUsage;
1922 pSession->pUsage = pUsagePre;
1923 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
1924
1925 pUsagePre = NULL;
1926 }
1927 else
1928 {
1929 pObj->cUsage--;
1930 rc = VERR_TRY_AGAIN;
1931 }
1932
1933 /*
1934 * Put any unused usage record into the free list..
1935 */
1936 if (pUsagePre)
1937 {
1938 pUsagePre->pNext = pDevExt->pUsageFree;
1939 pDevExt->pUsageFree = pUsagePre;
1940 }
1941
1942 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1943
1944 return rc;
1945}
1946
1947
1948/**
1949 * Decrement / destroy a reference counter record for an object.
1950 *
1951 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1952 *
1953 * @returns IPRT status code.
1954 * @retval VINF_SUCCESS if not destroyed.
1955 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
1956 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
1957 * string builds.
1958 *
1959 * @param pvObj The identifier returned by SUPR0ObjRegister().
1960 * @param pSession The session which is referencing the object.
1961 */
1962SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1963{
1964 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1965 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1966 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1967 int rc = VERR_INVALID_PARAMETER;
1968 PSUPDRVUSAGE pUsage;
1969 PSUPDRVUSAGE pUsagePrev;
1970
1971 /*
1972 * Validate the input.
1973 */
1974 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1975 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1976 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1977 VERR_INVALID_PARAMETER);
1978
1979 /*
1980 * Acquire the spinlock and look for the usage record.
1981 */
1982 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1983
1984 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1985 pUsage;
1986 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1987 {
1988 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1989 if (pUsage->pObj == pObj)
1990 {
1991 rc = VINF_SUCCESS;
1992 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1993 if (pUsage->cUsage > 1)
1994 {
1995 pObj->cUsage--;
1996 pUsage->cUsage--;
1997 }
1998 else
1999 {
2000 /*
2001 * Free the session record.
2002 */
2003 if (pUsagePrev)
2004 pUsagePrev->pNext = pUsage->pNext;
2005 else
2006 pSession->pUsage = pUsage->pNext;
2007 pUsage->pNext = pDevExt->pUsageFree;
2008 pDevExt->pUsageFree = pUsage;
2009
2010 /* What about the object? */
2011 if (pObj->cUsage > 1)
2012 pObj->cUsage--;
2013 else
2014 {
2015 /*
2016 * Object is to be destroyed, unlink it.
2017 */
2018 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
2019 rc = VINF_OBJECT_DESTROYED;
2020 if (pDevExt->pObjs == pObj)
2021 pDevExt->pObjs = pObj->pNext;
2022 else
2023 {
2024 PSUPDRVOBJ pObjPrev;
2025 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
2026 if (pObjPrev->pNext == pObj)
2027 {
2028 pObjPrev->pNext = pObj->pNext;
2029 break;
2030 }
2031 Assert(pObjPrev);
2032 }
2033 }
2034 }
2035 break;
2036 }
2037 }
2038
2039 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2040
2041 /*
2042 * Call the destructor and free the object if required.
2043 */
2044 if (rc == VINF_OBJECT_DESTROYED)
2045 {
2046 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2047 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2048 if (pObj->pfnDestructor)
2049#ifdef RT_WITH_W64_UNWIND_HACK
2050 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
2051#else
2052 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2053#endif
2054 RTMemFree(pObj);
2055 }
2056
2057 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2058 return rc;
2059}
2060
2061
2062/**
2063 * Verifies that the current process can access the specified object.
2064 *
2065 * @returns The following IPRT status code:
2066 * @retval VINF_SUCCESS if access was granted.
2067 * @retval VERR_PERMISSION_DENIED if denied access.
2068 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2069 *
2070 * @param pvObj The identifier returned by SUPR0ObjRegister().
2071 * @param pSession The session which wishes to access the object.
2072 * @param pszObjName Object string name. This is optional and depends on the object type.
2073 *
2074 * @remark The caller is responsible for making sure the object isn't removed while
2075 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2076 */
2077SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2078{
2079 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2080 int rc;
2081
2082 /*
2083 * Validate the input.
2084 */
2085 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2086 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2087 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2088 VERR_INVALID_PARAMETER);
2089
2090 /*
2091 * Check access. (returns true if a decision has been made.)
2092 */
2093 rc = VERR_INTERNAL_ERROR;
2094 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2095 return rc;
2096
2097 /*
2098 * Default policy is to allow the user to access his own
2099 * stuff but nothing else.
2100 */
2101 if (pObj->CreatorUid == pSession->Uid)
2102 return VINF_SUCCESS;
2103 return VERR_PERMISSION_DENIED;
2104}
2105
2106
2107/**
2108 * Lock pages.
2109 *
2110 * @returns IPRT status code.
2111 * @param pSession Session to which the locked memory should be associated.
2112 * @param pvR3 Start of the memory range to lock.
2113 * This must be page aligned.
2114 * @param cPages Number of pages to lock.
2115 * @param paPages Where to put the physical addresses of locked memory.
2116 */
2117SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2118{
2119 int rc;
2120 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2121 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2122 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2123
2124 /*
2125 * Verify input.
2126 */
2127 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2128 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2129 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2130 || !pvR3)
2131 {
2132 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2133 return VERR_INVALID_PARAMETER;
2134 }
2135
2136#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
2137 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
2138 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
2139 if (RT_SUCCESS(rc))
2140 return rc;
2141#endif
2142
2143 /*
2144 * Let IPRT do the job.
2145 */
2146 Mem.eType = MEMREF_TYPE_LOCKED;
2147 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
2148 if (RT_SUCCESS(rc))
2149 {
2150 uint32_t iPage = cPages;
2151 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2152 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2153
2154 while (iPage-- > 0)
2155 {
2156 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2157 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2158 {
2159 AssertMsgFailed(("iPage=%d\n", iPage));
2160 rc = VERR_INTERNAL_ERROR;
2161 break;
2162 }
2163 }
2164 if (RT_SUCCESS(rc))
2165 rc = supdrvMemAdd(&Mem, pSession);
2166 if (RT_FAILURE(rc))
2167 {
2168 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2169 AssertRC(rc2);
2170 }
2171 }
2172
2173 return rc;
2174}
2175
2176
2177/**
2178 * Unlocks the memory pointed to by pv.
2179 *
2180 * @returns IPRT status code.
2181 * @param pSession Session to which the memory was locked.
2182 * @param pvR3 Memory to unlock.
2183 */
2184SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2185{
2186 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2187 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2188#ifdef RT_OS_WINDOWS
2189 /*
2190 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
2191 * allocations; ignore this call.
2192 */
2193 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
2194 {
2195 LogFlow(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
2196 return VINF_SUCCESS;
2197 }
2198#endif
2199 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2200}
2201
2202
2203/**
2204 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2205 * backing.
2206 *
2207 * @returns IPRT status code.
2208 * @param pSession Session data.
2209 * @param cPages Number of pages to allocate.
2210 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2211 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2212 * @param pHCPhys Where to put the physical address of allocated memory.
2213 */
2214SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2215{
2216 int rc;
2217 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2218 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2219
2220 /*
2221 * Validate input.
2222 */
2223 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2224 if (!ppvR3 || !ppvR0 || !pHCPhys)
2225 {
2226 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2227 pSession, ppvR0, ppvR3, pHCPhys));
2228 return VERR_INVALID_PARAMETER;
2229
2230 }
2231 if (cPages < 1 || cPages >= 256)
2232 {
2233 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2234 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2235 }
2236
2237 /*
2238 * Let IPRT do the job.
2239 */
2240 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2241 if (RT_SUCCESS(rc))
2242 {
2243 int rc2;
2244 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2245 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2246 if (RT_SUCCESS(rc))
2247 {
2248 Mem.eType = MEMREF_TYPE_CONT;
2249 rc = supdrvMemAdd(&Mem, pSession);
2250 if (!rc)
2251 {
2252 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2253 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2254 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2255 return 0;
2256 }
2257
2258 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2259 AssertRC(rc2);
2260 }
2261 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2262 AssertRC(rc2);
2263 }
2264
2265 return rc;
2266}
2267
2268
2269/**
2270 * Frees memory allocated using SUPR0ContAlloc().
2271 *
2272 * @returns IPRT status code.
2273 * @param pSession The session to which the memory was allocated.
2274 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2275 */
2276SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2277{
2278 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2279 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2280 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2281}
2282
2283
2284/**
2285 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2286 *
2287 * The memory isn't zeroed.
2288 *
2289 * @returns IPRT status code.
2290 * @param pSession Session data.
2291 * @param cPages Number of pages to allocate.
2292 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2293 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2294 * @param paPages Where to put the physical addresses of allocated memory.
2295 */
2296SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2297{
2298 unsigned iPage;
2299 int rc;
2300 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2301 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2302
2303 /*
2304 * Validate input.
2305 */
2306 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2307 if (!ppvR3 || !ppvR0 || !paPages)
2308 {
2309 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2310 pSession, ppvR3, ppvR0, paPages));
2311 return VERR_INVALID_PARAMETER;
2312
2313 }
2314 if (cPages < 1 || cPages >= 256)
2315 {
2316 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2317 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2318 }
2319
2320 /*
2321 * Let IPRT do the work.
2322 */
2323 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2324 if (RT_SUCCESS(rc))
2325 {
2326 int rc2;
2327 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2328 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2329 if (RT_SUCCESS(rc))
2330 {
2331 Mem.eType = MEMREF_TYPE_LOW;
2332 rc = supdrvMemAdd(&Mem, pSession);
2333 if (!rc)
2334 {
2335 for (iPage = 0; iPage < cPages; iPage++)
2336 {
2337 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2338 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2339 }
2340 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2341 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2342 return 0;
2343 }
2344
2345 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2346 AssertRC(rc2);
2347 }
2348
2349 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2350 AssertRC(rc2);
2351 }
2352
2353 return rc;
2354}
2355
2356
2357/**
2358 * Frees memory allocated using SUPR0LowAlloc().
2359 *
2360 * @returns IPRT status code.
2361 * @param pSession The session to which the memory was allocated.
2362 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2363 */
2364SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2365{
2366 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2367 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2368 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2369}
2370
2371
2372
2373/**
2374 * Allocates a chunk of memory with both R0 and R3 mappings.
2375 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2376 *
2377 * @returns IPRT status code.
2378 * @param pSession The session to associated the allocation with.
2379 * @param cb Number of bytes to allocate.
2380 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2381 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2382 */
2383SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2384{
2385 int rc;
2386 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2387 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2388
2389 /*
2390 * Validate input.
2391 */
2392 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2393 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2394 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2395 if (cb < 1 || cb >= _4M)
2396 {
2397 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2398 return VERR_INVALID_PARAMETER;
2399 }
2400
2401 /*
2402 * Let IPRT do the work.
2403 */
2404 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2405 if (RT_SUCCESS(rc))
2406 {
2407 int rc2;
2408 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2409 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2410 if (RT_SUCCESS(rc))
2411 {
2412 Mem.eType = MEMREF_TYPE_MEM;
2413 rc = supdrvMemAdd(&Mem, pSession);
2414 if (!rc)
2415 {
2416 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2417 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2418 return VINF_SUCCESS;
2419 }
2420
2421 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2422 AssertRC(rc2);
2423 }
2424
2425 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2426 AssertRC(rc2);
2427 }
2428
2429 return rc;
2430}
2431
2432
2433/**
2434 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2435 *
2436 * @returns IPRT status code.
2437 * @param pSession The session to which the memory was allocated.
2438 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2439 * @param paPages Where to store the physical addresses.
2440 */
2441SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2442{
2443 PSUPDRVBUNDLE pBundle;
2444 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2445 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2446
2447 /*
2448 * Validate input.
2449 */
2450 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2451 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2452 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2453
2454 /*
2455 * Search for the address.
2456 */
2457 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2458 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2459 {
2460 if (pBundle->cUsed > 0)
2461 {
2462 unsigned i;
2463 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2464 {
2465 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2466 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2467 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2468 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2469 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2470 )
2471 )
2472 {
2473 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2474 size_t iPage;
2475 for (iPage = 0; iPage < cPages; iPage++)
2476 {
2477 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2478 paPages[iPage].uReserved = 0;
2479 }
2480 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2481 return VINF_SUCCESS;
2482 }
2483 }
2484 }
2485 }
2486 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2487 Log(("Failed to find %p!!!\n", (void *)uPtr));
2488 return VERR_INVALID_PARAMETER;
2489}
2490
2491
2492/**
2493 * Free memory allocated by SUPR0MemAlloc().
2494 *
2495 * @returns IPRT status code.
2496 * @param pSession The session owning the allocation.
2497 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2498 */
2499SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2500{
2501 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2502 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2503 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2504}
2505
2506
2507/**
2508 * Allocates a chunk of memory with only a R3 mappings.
2509 *
2510 * The memory is fixed and it's possible to query the physical addresses using
2511 * SUPR0MemGetPhys().
2512 *
2513 * @returns IPRT status code.
2514 * @param pSession The session to associated the allocation with.
2515 * @param cPages The number of pages to allocate.
2516 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2517 * @param paPages Where to store the addresses of the pages. Optional.
2518 */
2519SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2520{
2521 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2522 return SUPR0PageAllocEx(pSession, cPages, 0 /*fFlags*/, ppvR3, NULL, paPages);
2523}
2524
2525
2526/**
2527 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2528 *
2529 * The memory is fixed and it's possible to query the physical addresses using
2530 * SUPR0MemGetPhys().
2531 *
2532 * @returns IPRT status code.
2533 * @param pSession The session to associated the allocation with.
2534 * @param cPages The number of pages to allocate.
2535 * @param fFlags Flags, reserved for the future. Must be zero.
2536 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2537 * NULL if no ring-3 mapping.
2538 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2539 * NULL if no ring-0 mapping.
2540 * @param paPages Where to store the addresses of the pages. Optional.
2541 */
2542SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2543{
2544 int rc;
2545 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2546 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2547
2548 /*
2549 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2550 */
2551 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2552 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2553 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2554 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2555 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2556 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2557 {
2558 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2559 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2560 }
2561
2562 /*
2563 * Let IPRT do the work.
2564 */
2565 if (ppvR0)
2566 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2567 else
2568 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2569 if (RT_SUCCESS(rc))
2570 {
2571 int rc2;
2572 if (ppvR3)
2573 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2574 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2575 else
2576 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2577 if (RT_SUCCESS(rc))
2578 {
2579 Mem.eType = MEMREF_TYPE_PAGE;
2580 rc = supdrvMemAdd(&Mem, pSession);
2581 if (!rc)
2582 {
2583 if (ppvR3)
2584 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2585 if (ppvR0)
2586 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2587 if (paPages)
2588 {
2589 uint32_t iPage = cPages;
2590 while (iPage-- > 0)
2591 {
2592 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2593 Assert(paPages[iPage] != NIL_RTHCPHYS);
2594 }
2595 }
2596 return VINF_SUCCESS;
2597 }
2598
2599 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2600 AssertRC(rc2);
2601 }
2602
2603 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2604 AssertRC(rc2);
2605 }
2606 return rc;
2607}
2608
2609
2610/**
2611 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2612 *
2613 * The memory is fixed and it's possible to query the physical addresses using
2614 * SUPR0MemGetPhys().
2615 *
2616 * @returns IPRT status code.
2617 * @param pSession The session to associated the allocation with.
2618 * @param cPages The number of pages to allocate.
2619 * @param fFlags Flags, reserved for the future. Must be zero.
2620 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2621 * NULL if no ring-3 mapping.
2622 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2623 * NULL if no ring-0 mapping.
2624 * @param paPages Where to store the addresses of the pages. Optional.
2625 */
2626SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2627 uint32_t fFlags, PRTR0PTR ppvR0)
2628{
2629 int rc;
2630 PSUPDRVBUNDLE pBundle;
2631 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2632 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2633 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2634
2635 /*
2636 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2637 */
2638 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2639 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2640 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2641 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2642 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2643 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2644
2645 /*
2646 * Find the memory object.
2647 */
2648 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2649 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2650 {
2651 if (pBundle->cUsed > 0)
2652 {
2653 unsigned i;
2654 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2655 {
2656 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2657 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2658 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2659 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2660 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2661 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2662 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2663 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2664 {
2665 hMemObj = pBundle->aMem[i].MemObj;
2666 break;
2667 }
2668 }
2669 }
2670 }
2671 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2672
2673 rc = VERR_INVALID_PARAMETER;
2674 if (hMemObj != NIL_RTR0MEMOBJ)
2675 {
2676 /*
2677 * Do some furter input validations before calling IPRT.
2678 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2679 */
2680 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2681 if ( offSub < cbMemObj
2682 && cbSub <= cbMemObj
2683 && offSub + cbSub <= cbMemObj)
2684 {
2685 RTR0MEMOBJ hMapObj;
2686 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2687 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2688 if (RT_SUCCESS(rc))
2689 *ppvR0 = RTR0MemObjAddress(hMapObj);
2690 }
2691 else
2692 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2693
2694 }
2695 return rc;
2696}
2697
2698
2699
2700#ifdef RT_OS_WINDOWS
2701/**
2702 * Check if the pages were locked by SUPR0PageAlloc
2703 *
2704 * This function will be removed along with the lock/unlock hacks when
2705 * we've cleaned up the ring-3 code properly.
2706 *
2707 * @returns boolean
2708 * @param pSession The session to which the memory was allocated.
2709 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2710 */
2711static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2712{
2713 PSUPDRVBUNDLE pBundle;
2714 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2715 LogFlow(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2716
2717 /*
2718 * Search for the address.
2719 */
2720 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2721 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2722 {
2723 if (pBundle->cUsed > 0)
2724 {
2725 unsigned i;
2726 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2727 {
2728 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2729 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2730 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2731 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2732 {
2733 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2734 return true;
2735 }
2736 }
2737 }
2738 }
2739 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2740 return false;
2741}
2742
2743
2744/**
2745 * Get the physical addresses of memory allocated using SUPR0PageAllocEx().
2746 *
2747 * This function will be removed along with the lock/unlock hacks when
2748 * we've cleaned up the ring-3 code properly.
2749 *
2750 * @returns IPRT status code.
2751 * @param pSession The session to which the memory was allocated.
2752 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2753 * @param cPages Number of pages in paPages
2754 * @param paPages Where to store the physical addresses.
2755 */
2756static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2757{
2758 PSUPDRVBUNDLE pBundle;
2759 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2760 LogFlow(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
2761
2762 /*
2763 * Search for the address.
2764 */
2765 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2766 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2767 {
2768 if (pBundle->cUsed > 0)
2769 {
2770 unsigned i;
2771 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2772 {
2773 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2774 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2775 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2776 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2777 {
2778 uint32_t iPage;
2779 size_t cMaxPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2780 cPages = (uint32_t)RT_MIN(cMaxPages, cPages);
2781 for (iPage = 0; iPage < cPages; iPage++)
2782 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2783 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2784 return VINF_SUCCESS;
2785 }
2786 }
2787 }
2788 }
2789 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2790 return VERR_INVALID_PARAMETER;
2791}
2792#endif /* RT_OS_WINDOWS */
2793
2794
2795/**
2796 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2797 *
2798 * @returns IPRT status code.
2799 * @param pSession The session owning the allocation.
2800 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2801 * SUPR0PageAllocEx().
2802 */
2803SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2804{
2805 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2806 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2807 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2808}
2809
2810
2811/**
2812 * Maps the GIP into userspace and/or get the physical address of the GIP.
2813 *
2814 * @returns IPRT status code.
2815 * @param pSession Session to which the GIP mapping should belong.
2816 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2817 * @param pHCPhysGip Where to store the physical address. (optional)
2818 *
2819 * @remark There is no reference counting on the mapping, so one call to this function
2820 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2821 * and remove the session as a GIP user.
2822 */
2823SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2824{
2825 int rc = VINF_SUCCESS;
2826 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2827 RTR3PTR pGip = NIL_RTR3PTR;
2828 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2829 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2830
2831 /*
2832 * Validate
2833 */
2834 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2835 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2836 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2837
2838 RTSemFastMutexRequest(pDevExt->mtxGip);
2839 if (pDevExt->pGip)
2840 {
2841 /*
2842 * Map it?
2843 */
2844 if (ppGipR3)
2845 {
2846 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2847 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2848 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2849 if (RT_SUCCESS(rc))
2850 {
2851 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2852 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2853 }
2854 }
2855
2856 /*
2857 * Get physical address.
2858 */
2859 if (pHCPhysGip && !rc)
2860 HCPhys = pDevExt->HCPhysGip;
2861
2862 /*
2863 * Reference globally.
2864 */
2865 if (!pSession->fGipReferenced && !rc)
2866 {
2867 pSession->fGipReferenced = 1;
2868 pDevExt->cGipUsers++;
2869 if (pDevExt->cGipUsers == 1)
2870 {
2871 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2872 unsigned i;
2873
2874 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2875
2876 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2877 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2878 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2879
2880 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2881 AssertRC(rc); rc = VINF_SUCCESS;
2882 }
2883 }
2884 }
2885 else
2886 {
2887 rc = SUPDRV_ERR_GENERAL_FAILURE;
2888 Log(("SUPR0GipMap: GIP is not available!\n"));
2889 }
2890 RTSemFastMutexRelease(pDevExt->mtxGip);
2891
2892 /*
2893 * Write returns.
2894 */
2895 if (pHCPhysGip)
2896 *pHCPhysGip = HCPhys;
2897 if (ppGipR3)
2898 *ppGipR3 = pGip;
2899
2900#ifdef DEBUG_DARWIN_GIP
2901 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
2902#else
2903 LogFlow(( "SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
2904#endif
2905 return rc;
2906}
2907
2908
2909/**
2910 * Unmaps any user mapping of the GIP and terminates all GIP access
2911 * from this session.
2912 *
2913 * @returns IPRT status code.
2914 * @param pSession Session to which the GIP mapping should belong.
2915 */
2916SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2917{
2918 int rc = VINF_SUCCESS;
2919 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2920#ifdef DEBUG_DARWIN_GIP
2921 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2922 pSession,
2923 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2924 pSession->GipMapObjR3));
2925#else
2926 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
2927#endif
2928 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2929
2930 RTSemFastMutexRequest(pDevExt->mtxGip);
2931
2932 /*
2933 * Unmap anything?
2934 */
2935 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2936 {
2937 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2938 AssertRC(rc);
2939 if (RT_SUCCESS(rc))
2940 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2941 }
2942
2943 /*
2944 * Dereference global GIP.
2945 */
2946 if (pSession->fGipReferenced && !rc)
2947 {
2948 pSession->fGipReferenced = 0;
2949 if ( pDevExt->cGipUsers > 0
2950 && !--pDevExt->cGipUsers)
2951 {
2952 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
2953 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = VINF_SUCCESS;
2954 }
2955 }
2956
2957 RTSemFastMutexRelease(pDevExt->mtxGip);
2958
2959 return rc;
2960}
2961
2962
2963/**
2964 * Register a component factory with the support driver.
2965 *
2966 * This is currently restricted to kernel sessions only.
2967 *
2968 * @returns VBox status code.
2969 * @retval VINF_SUCCESS on success.
2970 * @retval VERR_NO_MEMORY if we're out of memory.
2971 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
2972 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2973 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2974 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2975 *
2976 * @param pSession The SUPDRV session (must be a ring-0 session).
2977 * @param pFactory Pointer to the component factory registration structure.
2978 *
2979 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
2980 */
2981SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2982{
2983 PSUPDRVFACTORYREG pNewReg;
2984 const char *psz;
2985 int rc;
2986
2987 /*
2988 * Validate parameters.
2989 */
2990 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2991 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2992 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2993 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
2994 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
2995 AssertReturn(psz, VERR_INVALID_PARAMETER);
2996
2997 /*
2998 * Allocate and initialize a new registration structure.
2999 */
3000 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
3001 if (pNewReg)
3002 {
3003 pNewReg->pNext = NULL;
3004 pNewReg->pFactory = pFactory;
3005 pNewReg->pSession = pSession;
3006 pNewReg->cchName = psz - &pFactory->szName[0];
3007
3008 /*
3009 * Add it to the tail of the list after checking for prior registration.
3010 */
3011 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3012 if (RT_SUCCESS(rc))
3013 {
3014 PSUPDRVFACTORYREG pPrev = NULL;
3015 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3016 while (pCur && pCur->pFactory != pFactory)
3017 {
3018 pPrev = pCur;
3019 pCur = pCur->pNext;
3020 }
3021 if (!pCur)
3022 {
3023 if (pPrev)
3024 pPrev->pNext = pNewReg;
3025 else
3026 pSession->pDevExt->pComponentFactoryHead = pNewReg;
3027 rc = VINF_SUCCESS;
3028 }
3029 else
3030 rc = VERR_ALREADY_EXISTS;
3031
3032 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3033 }
3034
3035 if (RT_FAILURE(rc))
3036 RTMemFree(pNewReg);
3037 }
3038 else
3039 rc = VERR_NO_MEMORY;
3040 return rc;
3041}
3042
3043
3044/**
3045 * Deregister a component factory.
3046 *
3047 * @returns VBox status code.
3048 * @retval VINF_SUCCESS on success.
3049 * @retval VERR_NOT_FOUND if the factory wasn't registered.
3050 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3051 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3052 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3053 *
3054 * @param pSession The SUPDRV session (must be a ring-0 session).
3055 * @param pFactory Pointer to the component factory registration structure
3056 * previously passed SUPR0ComponentRegisterFactory().
3057 *
3058 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
3059 */
3060SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3061{
3062 int rc;
3063
3064 /*
3065 * Validate parameters.
3066 */
3067 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3068 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3069 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3070
3071 /*
3072 * Take the lock and look for the registration record.
3073 */
3074 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3075 if (RT_SUCCESS(rc))
3076 {
3077 PSUPDRVFACTORYREG pPrev = NULL;
3078 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3079 while (pCur && pCur->pFactory != pFactory)
3080 {
3081 pPrev = pCur;
3082 pCur = pCur->pNext;
3083 }
3084 if (pCur)
3085 {
3086 if (!pPrev)
3087 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3088 else
3089 pPrev->pNext = pCur->pNext;
3090
3091 pCur->pNext = NULL;
3092 pCur->pFactory = NULL;
3093 pCur->pSession = NULL;
3094 rc = VINF_SUCCESS;
3095 }
3096 else
3097 rc = VERR_NOT_FOUND;
3098
3099 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3100
3101 RTMemFree(pCur);
3102 }
3103 return rc;
3104}
3105
3106
3107/**
3108 * Queries a component factory.
3109 *
3110 * @returns VBox status code.
3111 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3112 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3113 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3114 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3115 *
3116 * @param pSession The SUPDRV session.
3117 * @param pszName The name of the component factory.
3118 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3119 * @param ppvFactoryIf Where to store the factory interface.
3120 */
3121SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3122{
3123 const char *pszEnd;
3124 size_t cchName;
3125 int rc;
3126
3127 /*
3128 * Validate parameters.
3129 */
3130 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3131
3132 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3133 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3134 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3135 cchName = pszEnd - pszName;
3136
3137 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3138 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3139 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3140
3141 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3142 *ppvFactoryIf = NULL;
3143
3144 /*
3145 * Take the lock and try all factories by this name.
3146 */
3147 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3148 if (RT_SUCCESS(rc))
3149 {
3150 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3151 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3152 while (pCur)
3153 {
3154 if ( pCur->cchName == cchName
3155 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3156 {
3157#ifdef RT_WITH_W64_UNWIND_HACK
3158 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3159#else
3160 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3161#endif
3162 if (pvFactory)
3163 {
3164 *ppvFactoryIf = pvFactory;
3165 rc = VINF_SUCCESS;
3166 break;
3167 }
3168 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3169 }
3170
3171 /* next */
3172 pCur = pCur->pNext;
3173 }
3174
3175 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3176 }
3177 return rc;
3178}
3179
3180
3181/**
3182 * Adds a memory object to the session.
3183 *
3184 * @returns IPRT status code.
3185 * @param pMem Memory tracking structure containing the
3186 * information to track.
3187 * @param pSession The session.
3188 */
3189static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3190{
3191 PSUPDRVBUNDLE pBundle;
3192 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3193
3194 /*
3195 * Find free entry and record the allocation.
3196 */
3197 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3198 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3199 {
3200 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3201 {
3202 unsigned i;
3203 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3204 {
3205 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3206 {
3207 pBundle->cUsed++;
3208 pBundle->aMem[i] = *pMem;
3209 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3210 return VINF_SUCCESS;
3211 }
3212 }
3213 AssertFailed(); /* !!this can't be happening!!! */
3214 }
3215 }
3216 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3217
3218 /*
3219 * Need to allocate a new bundle.
3220 * Insert into the last entry in the bundle.
3221 */
3222 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3223 if (!pBundle)
3224 return VERR_NO_MEMORY;
3225
3226 /* take last entry. */
3227 pBundle->cUsed++;
3228 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3229
3230 /* insert into list. */
3231 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3232 pBundle->pNext = pSession->Bundle.pNext;
3233 pSession->Bundle.pNext = pBundle;
3234 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3235
3236 return VINF_SUCCESS;
3237}
3238
3239
3240/**
3241 * Releases a memory object referenced by pointer and type.
3242 *
3243 * @returns IPRT status code.
3244 * @param pSession Session data.
3245 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3246 * @param eType Memory type.
3247 */
3248static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3249{
3250 PSUPDRVBUNDLE pBundle;
3251 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3252
3253 /*
3254 * Validate input.
3255 */
3256 if (!uPtr)
3257 {
3258 Log(("Illegal address %p\n", (void *)uPtr));
3259 return VERR_INVALID_PARAMETER;
3260 }
3261
3262 /*
3263 * Search for the address.
3264 */
3265 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3266 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3267 {
3268 if (pBundle->cUsed > 0)
3269 {
3270 unsigned i;
3271 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3272 {
3273 if ( pBundle->aMem[i].eType == eType
3274 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3275 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3276 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3277 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3278 )
3279 {
3280 /* Make a copy of it and release it outside the spinlock. */
3281 SUPDRVMEMREF Mem = pBundle->aMem[i];
3282 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3283 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3284 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3285 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3286
3287 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3288 {
3289 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3290 AssertRC(rc); /** @todo figure out how to handle this. */
3291 }
3292 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3293 {
3294 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3295 AssertRC(rc); /** @todo figure out how to handle this. */
3296 }
3297 return VINF_SUCCESS;
3298 }
3299 }
3300 }
3301 }
3302 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3303 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3304 return VERR_INVALID_PARAMETER;
3305}
3306
3307
3308/**
3309 * Opens an image. If it's the first time it's opened the call must upload
3310 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3311 *
3312 * This is the 1st step of the loading.
3313 *
3314 * @returns IPRT status code.
3315 * @param pDevExt Device globals.
3316 * @param pSession Session data.
3317 * @param pReq The open request.
3318 */
3319static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3320{
3321 PSUPDRVLDRIMAGE pImage;
3322 unsigned cb;
3323 void *pv;
3324 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
3325 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3326
3327 /*
3328 * Check if we got an instance of the image already.
3329 */
3330 RTSemFastMutexRequest(pDevExt->mtxLdr);
3331 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3332 {
3333 if ( pImage->szName[cchName] == '\0'
3334 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
3335 {
3336 pImage->cUsage++;
3337 pReq->u.Out.pvImageBase = pImage->pvImage;
3338 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3339 supdrvLdrAddUsage(pSession, pImage);
3340 RTSemFastMutexRelease(pDevExt->mtxLdr);
3341 return VINF_SUCCESS;
3342 }
3343 }
3344 /* (not found - add it!) */
3345
3346 /*
3347 * Allocate memory.
3348 */
3349 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3350 pv = RTMemExecAlloc(cb);
3351 if (!pv)
3352 {
3353 RTSemFastMutexRelease(pDevExt->mtxLdr);
3354 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3355 return VERR_NO_MEMORY;
3356 }
3357
3358 /*
3359 * Setup and link in the LDR stuff.
3360 */
3361 pImage = (PSUPDRVLDRIMAGE)pv;
3362 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
3363 pImage->cbImage = pReq->u.In.cbImage;
3364 pImage->pfnModuleInit = NULL;
3365 pImage->pfnModuleTerm = NULL;
3366 pImage->pfnServiceReqHandler = NULL;
3367 pImage->uState = SUP_IOCTL_LDR_OPEN;
3368 pImage->cUsage = 1;
3369 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
3370
3371 pImage->pNext = pDevExt->pLdrImages;
3372 pDevExt->pLdrImages = pImage;
3373
3374 supdrvLdrAddUsage(pSession, pImage);
3375
3376 pReq->u.Out.pvImageBase = pImage->pvImage;
3377 pReq->u.Out.fNeedsLoading = true;
3378 RTSemFastMutexRelease(pDevExt->mtxLdr);
3379
3380#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3381 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3382#endif
3383 return VINF_SUCCESS;
3384}
3385
3386
3387/**
3388 * Loads the image bits.
3389 *
3390 * This is the 2nd step of the loading.
3391 *
3392 * @returns IPRT status code.
3393 * @param pDevExt Device globals.
3394 * @param pSession Session data.
3395 * @param pReq The request.
3396 */
3397static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3398{
3399 PSUPDRVLDRUSAGE pUsage;
3400 PSUPDRVLDRIMAGE pImage;
3401 int rc;
3402 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3403
3404 /*
3405 * Find the ldr image.
3406 */
3407 RTSemFastMutexRequest(pDevExt->mtxLdr);
3408 pUsage = pSession->pLdrUsage;
3409 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3410 pUsage = pUsage->pNext;
3411 if (!pUsage)
3412 {
3413 RTSemFastMutexRelease(pDevExt->mtxLdr);
3414 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3415 return VERR_INVALID_HANDLE;
3416 }
3417 pImage = pUsage->pImage;
3418 if (pImage->cbImage != pReq->u.In.cbImage)
3419 {
3420 RTSemFastMutexRelease(pDevExt->mtxLdr);
3421 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3422 return VERR_INVALID_HANDLE;
3423 }
3424 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3425 {
3426 unsigned uState = pImage->uState;
3427 RTSemFastMutexRelease(pDevExt->mtxLdr);
3428 if (uState != SUP_IOCTL_LDR_LOAD)
3429 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3430 return SUPDRV_ERR_ALREADY_LOADED;
3431 }
3432 switch (pReq->u.In.eEPType)
3433 {
3434 case SUPLDRLOADEP_NOTHING:
3435 break;
3436
3437 case SUPLDRLOADEP_VMMR0:
3438 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
3439 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
3440 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
3441 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
3442 {
3443 RTSemFastMutexRelease(pDevExt->mtxLdr);
3444 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
3445 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3446 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3447 return VERR_INVALID_PARAMETER;
3448 }
3449 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
3450 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3451 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3452 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3453 {
3454 RTSemFastMutexRelease(pDevExt->mtxLdr);
3455 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3456 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3457 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3458 return VERR_INVALID_PARAMETER;
3459 }
3460 break;
3461
3462 case SUPLDRLOADEP_SERVICE:
3463 if (!pReq->u.In.EP.Service.pfnServiceReq)
3464 {
3465 RTSemFastMutexRelease(pDevExt->mtxLdr);
3466 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
3467 return VERR_INVALID_PARAMETER;
3468 }
3469 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3470 {
3471 RTSemFastMutexRelease(pDevExt->mtxLdr);
3472 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3473 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
3474 return VERR_INVALID_PARAMETER;
3475 }
3476 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3477 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3478 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3479 {
3480 RTSemFastMutexRelease(pDevExt->mtxLdr);
3481 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3482 pImage->pvImage, pReq->u.In.cbImage,
3483 pReq->u.In.EP.Service.apvReserved[0],
3484 pReq->u.In.EP.Service.apvReserved[1],
3485 pReq->u.In.EP.Service.apvReserved[2]));
3486 return VERR_INVALID_PARAMETER;
3487 }
3488 break;
3489
3490 default:
3491 RTSemFastMutexRelease(pDevExt->mtxLdr);
3492 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3493 return VERR_INVALID_PARAMETER;
3494 }
3495 if ( pReq->u.In.pfnModuleInit
3496 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3497 {
3498 RTSemFastMutexRelease(pDevExt->mtxLdr);
3499 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3500 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3501 return VERR_INVALID_PARAMETER;
3502 }
3503 if ( pReq->u.In.pfnModuleTerm
3504 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3505 {
3506 RTSemFastMutexRelease(pDevExt->mtxLdr);
3507 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3508 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3509 return VERR_INVALID_PARAMETER;
3510 }
3511
3512 /*
3513 * Copy the memory.
3514 */
3515 /* no need to do try/except as this is a buffered request. */
3516 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3517 pImage->uState = SUP_IOCTL_LDR_LOAD;
3518 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3519 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3520 pImage->offSymbols = pReq->u.In.offSymbols;
3521 pImage->cSymbols = pReq->u.In.cSymbols;
3522 pImage->offStrTab = pReq->u.In.offStrTab;
3523 pImage->cbStrTab = pReq->u.In.cbStrTab;
3524
3525 /*
3526 * Update any entry points.
3527 */
3528 switch (pReq->u.In.eEPType)
3529 {
3530 default:
3531 case SUPLDRLOADEP_NOTHING:
3532 rc = VINF_SUCCESS;
3533 break;
3534 case SUPLDRLOADEP_VMMR0:
3535 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3536 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3537 break;
3538 case SUPLDRLOADEP_SERVICE:
3539 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3540 rc = VINF_SUCCESS;
3541 break;
3542 }
3543
3544 /*
3545 * On success call the module initialization.
3546 */
3547 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3548 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3549 {
3550 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3551#ifdef RT_WITH_W64_UNWIND_HACK
3552 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
3553#else
3554 rc = pImage->pfnModuleInit();
3555#endif
3556 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3557 supdrvLdrUnsetVMMR0EPs(pDevExt);
3558 }
3559
3560 if (rc)
3561 pImage->uState = SUP_IOCTL_LDR_OPEN;
3562
3563 RTSemFastMutexRelease(pDevExt->mtxLdr);
3564 return rc;
3565}
3566
3567
3568/**
3569 * Frees a previously loaded (prep'ed) image.
3570 *
3571 * @returns IPRT status code.
3572 * @param pDevExt Device globals.
3573 * @param pSession Session data.
3574 * @param pReq The request.
3575 */
3576static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3577{
3578 int rc;
3579 PSUPDRVLDRUSAGE pUsagePrev;
3580 PSUPDRVLDRUSAGE pUsage;
3581 PSUPDRVLDRIMAGE pImage;
3582 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3583
3584 /*
3585 * Find the ldr image.
3586 */
3587 RTSemFastMutexRequest(pDevExt->mtxLdr);
3588 pUsagePrev = NULL;
3589 pUsage = pSession->pLdrUsage;
3590 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3591 {
3592 pUsagePrev = pUsage;
3593 pUsage = pUsage->pNext;
3594 }
3595 if (!pUsage)
3596 {
3597 RTSemFastMutexRelease(pDevExt->mtxLdr);
3598 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3599 return VERR_INVALID_HANDLE;
3600 }
3601
3602 /*
3603 * Check if we can remove anything.
3604 */
3605 rc = VINF_SUCCESS;
3606 pImage = pUsage->pImage;
3607 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3608 {
3609 /*
3610 * Check if there are any objects with destructors in the image, if
3611 * so leave it for the session cleanup routine so we get a chance to
3612 * clean things up in the right order and not leave them all dangling.
3613 */
3614 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3615 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3616 if (pImage->cUsage <= 1)
3617 {
3618 PSUPDRVOBJ pObj;
3619 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3620 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3621 {
3622 rc = VERR_DANGLING_OBJECTS;
3623 break;
3624 }
3625 }
3626 else
3627 {
3628 PSUPDRVUSAGE pGenUsage;
3629 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3630 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3631 {
3632 rc = VERR_DANGLING_OBJECTS;
3633 break;
3634 }
3635 }
3636 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3637 if (rc == VINF_SUCCESS)
3638 {
3639 /* unlink it */
3640 if (pUsagePrev)
3641 pUsagePrev->pNext = pUsage->pNext;
3642 else
3643 pSession->pLdrUsage = pUsage->pNext;
3644
3645 /* free it */
3646 pUsage->pImage = NULL;
3647 pUsage->pNext = NULL;
3648 RTMemFree(pUsage);
3649
3650 /*
3651 * Derefrence the image.
3652 */
3653 if (pImage->cUsage <= 1)
3654 supdrvLdrFree(pDevExt, pImage);
3655 else
3656 pImage->cUsage--;
3657 }
3658 else
3659 {
3660 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3661 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
3662 }
3663 }
3664 else
3665 {
3666 /*
3667 * Dereference both image and usage.
3668 */
3669 pImage->cUsage--;
3670 pUsage->cUsage--;
3671 }
3672
3673 RTSemFastMutexRelease(pDevExt->mtxLdr);
3674 return rc;
3675}
3676
3677
3678/**
3679 * Gets the address of a symbol in an open image.
3680 *
3681 * @returns 0 on success.
3682 * @returns SUPDRV_ERR_* on failure.
3683 * @param pDevExt Device globals.
3684 * @param pSession Session data.
3685 * @param pReq The request buffer.
3686 */
3687static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3688{
3689 PSUPDRVLDRIMAGE pImage;
3690 PSUPDRVLDRUSAGE pUsage;
3691 uint32_t i;
3692 PSUPLDRSYM paSyms;
3693 const char *pchStrings;
3694 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3695 void *pvSymbol = NULL;
3696 int rc = VERR_GENERAL_FAILURE;
3697 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3698
3699 /*
3700 * Find the ldr image.
3701 */
3702 RTSemFastMutexRequest(pDevExt->mtxLdr);
3703 pUsage = pSession->pLdrUsage;
3704 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3705 pUsage = pUsage->pNext;
3706 if (!pUsage)
3707 {
3708 RTSemFastMutexRelease(pDevExt->mtxLdr);
3709 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3710 return VERR_INVALID_HANDLE;
3711 }
3712 pImage = pUsage->pImage;
3713 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3714 {
3715 unsigned uState = pImage->uState;
3716 RTSemFastMutexRelease(pDevExt->mtxLdr);
3717 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3718 return VERR_ALREADY_LOADED;
3719 }
3720
3721 /*
3722 * Search the symbol strings.
3723 */
3724 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3725 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3726 for (i = 0; i < pImage->cSymbols; i++)
3727 {
3728 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3729 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3730 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3731 {
3732 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3733 rc = VINF_SUCCESS;
3734 break;
3735 }
3736 }
3737 RTSemFastMutexRelease(pDevExt->mtxLdr);
3738 pReq->u.Out.pvSymbol = pvSymbol;
3739 return rc;
3740}
3741
3742
3743/**
3744 * Gets the address of a symbol in an open image or the support driver.
3745 *
3746 * @returns VINF_SUCCESS on success.
3747 * @returns
3748 * @param pDevExt Device globals.
3749 * @param pSession Session data.
3750 * @param pReq The request buffer.
3751 */
3752static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3753{
3754 int rc = VINF_SUCCESS;
3755 const char *pszSymbol = pReq->u.In.pszSymbol;
3756 const char *pszModule = pReq->u.In.pszModule;
3757 size_t cbSymbol;
3758 char const *pszEnd;
3759 uint32_t i;
3760
3761 /*
3762 * Input validation.
3763 */
3764 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3765 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3766 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3767 cbSymbol = pszEnd - pszSymbol + 1;
3768
3769 if (pszModule)
3770 {
3771 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3772 pszEnd = (char *)memchr(pszModule, '\0', 64);
3773 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3774 }
3775 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3776
3777
3778 if ( !pszModule
3779 || !strcmp(pszModule, "SupDrv"))
3780 {
3781 /*
3782 * Search the support driver export table.
3783 */
3784 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3785 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3786 {
3787 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3788 break;
3789 }
3790 }
3791 else
3792 {
3793 /*
3794 * Find the loader image.
3795 */
3796 PSUPDRVLDRIMAGE pImage;
3797
3798 RTSemFastMutexRequest(pDevExt->mtxLdr);
3799
3800 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3801 if (!strcmp(pImage->szName, pszModule))
3802 break;
3803 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3804 {
3805 /*
3806 * Search the symbol strings.
3807 */
3808 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3809 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3810 for (i = 0; i < pImage->cSymbols; i++)
3811 {
3812 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3813 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3814 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3815 {
3816 /*
3817 * Found it! Calc the symbol address and add a reference to the module.
3818 */
3819 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3820 rc = supdrvLdrAddUsage(pSession, pImage);
3821 break;
3822 }
3823 }
3824 }
3825 else
3826 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3827
3828 RTSemFastMutexRelease(pDevExt->mtxLdr);
3829 }
3830 return rc;
3831}
3832
3833
3834/**
3835 * Updates the VMMR0 entry point pointers.
3836 *
3837 * @returns IPRT status code.
3838 * @param pDevExt Device globals.
3839 * @param pSession Session data.
3840 * @param pVMMR0 VMMR0 image handle.
3841 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3842 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3843 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3844 * @remark Caller must own the loader mutex.
3845 */
3846static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3847{
3848 int rc = VINF_SUCCESS;
3849 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3850
3851
3852 /*
3853 * Check if not yet set.
3854 */
3855 if (!pDevExt->pvVMMR0)
3856 {
3857 pDevExt->pvVMMR0 = pvVMMR0;
3858 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3859 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3860 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3861 }
3862 else
3863 {
3864 /*
3865 * Return failure or success depending on whether the values match or not.
3866 */
3867 if ( pDevExt->pvVMMR0 != pvVMMR0
3868 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3869 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3870 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3871 {
3872 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3873 rc = VERR_INVALID_PARAMETER;
3874 }
3875 }
3876 return rc;
3877}
3878
3879
3880/**
3881 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3882 *
3883 * @param pDevExt Device globals.
3884 */
3885static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3886{
3887 pDevExt->pvVMMR0 = NULL;
3888 pDevExt->pfnVMMR0EntryInt = NULL;
3889 pDevExt->pfnVMMR0EntryFast = NULL;
3890 pDevExt->pfnVMMR0EntryEx = NULL;
3891}
3892
3893
3894/**
3895 * Adds a usage reference in the specified session of an image.
3896 *
3897 * Called while owning the loader semaphore.
3898 *
3899 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
3900 * @param pSession Session in question.
3901 * @param pImage Image which the session is using.
3902 */
3903static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3904{
3905 PSUPDRVLDRUSAGE pUsage;
3906 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3907
3908 /*
3909 * Referenced it already?
3910 */
3911 pUsage = pSession->pLdrUsage;
3912 while (pUsage)
3913 {
3914 if (pUsage->pImage == pImage)
3915 {
3916 pUsage->cUsage++;
3917 return VINF_SUCCESS;
3918 }
3919 pUsage = pUsage->pNext;
3920 }
3921
3922 /*
3923 * Allocate new usage record.
3924 */
3925 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3926 AssertReturn(pUsage, VERR_NO_MEMORY);
3927 pUsage->cUsage = 1;
3928 pUsage->pImage = pImage;
3929 pUsage->pNext = pSession->pLdrUsage;
3930 pSession->pLdrUsage = pUsage;
3931 return VINF_SUCCESS;
3932}
3933
3934
3935/**
3936 * Frees a load image.
3937 *
3938 * @param pDevExt Pointer to device extension.
3939 * @param pImage Pointer to the image we're gonna free.
3940 * This image must exit!
3941 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3942 */
3943static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3944{
3945 PSUPDRVLDRIMAGE pImagePrev;
3946 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
3947
3948 /* find it - arg. should've used doubly linked list. */
3949 Assert(pDevExt->pLdrImages);
3950 pImagePrev = NULL;
3951 if (pDevExt->pLdrImages != pImage)
3952 {
3953 pImagePrev = pDevExt->pLdrImages;
3954 while (pImagePrev->pNext != pImage)
3955 pImagePrev = pImagePrev->pNext;
3956 Assert(pImagePrev->pNext == pImage);
3957 }
3958
3959 /* unlink */
3960 if (pImagePrev)
3961 pImagePrev->pNext = pImage->pNext;
3962 else
3963 pDevExt->pLdrImages = pImage->pNext;
3964
3965 /* check if this is VMMR0.r0 unset its entry point pointers. */
3966 if (pDevExt->pvVMMR0 == pImage->pvImage)
3967 supdrvLdrUnsetVMMR0EPs(pDevExt);
3968
3969 /* check for objects with destructors in this image. (Shouldn't happen.) */
3970 if (pDevExt->pObjs)
3971 {
3972 unsigned cObjs = 0;
3973 PSUPDRVOBJ pObj;
3974 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3975 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3976 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3977 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3978 {
3979 pObj->pfnDestructor = NULL;
3980 cObjs++;
3981 }
3982 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3983 if (cObjs)
3984 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
3985 }
3986
3987 /* call termination function if fully loaded. */
3988 if ( pImage->pfnModuleTerm
3989 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3990 {
3991 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3992#ifdef RT_WITH_W64_UNWIND_HACK
3993 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
3994#else
3995 pImage->pfnModuleTerm();
3996#endif
3997 }
3998
3999 /* free the image */
4000 pImage->cUsage = 0;
4001 pImage->pNext = 0;
4002 pImage->uState = SUP_IOCTL_LDR_FREE;
4003 RTMemExecFree(pImage);
4004}
4005
4006
4007/**
4008 * Implements the service call request.
4009 *
4010 * @returns VBox status code.
4011 * @param pDevExt The device extension.
4012 * @param pSession The calling session.
4013 * @param pReq The request packet, valid.
4014 */
4015static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
4016{
4017#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
4018 int rc;
4019
4020 /*
4021 * Find the module first in the module referenced by the calling session.
4022 */
4023 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
4024 if (RT_SUCCESS(rc))
4025 {
4026 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
4027 PSUPDRVLDRUSAGE pUsage;
4028
4029 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
4030 if ( pUsage->pImage->pfnServiceReqHandler
4031 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
4032 {
4033 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
4034 break;
4035 }
4036 RTSemFastMutexRelease(pDevExt->mtxLdr);
4037
4038 if (pfnServiceReqHandler)
4039 {
4040 /*
4041 * Call it.
4042 */
4043 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
4044#ifdef RT_WITH_W64_UNWIND_HACK
4045 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4046#else
4047 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4048#endif
4049 else
4050#ifdef RT_WITH_W64_UNWIND_HACK
4051 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
4052 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4053#else
4054 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4055#endif
4056 }
4057 else
4058 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
4059 }
4060
4061 /* log it */
4062 if ( RT_FAILURE(rc)
4063 && rc != VERR_INTERRUPTED
4064 && rc != VERR_TIMEOUT)
4065 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4066 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4067 else
4068 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4069 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4070 return rc;
4071#else /* RT_OS_WINDOWS && !DEBUG */
4072 return VERR_NOT_IMPLEMENTED;
4073#endif /* RT_OS_WINDOWS && !DEBUG */
4074}
4075
4076
4077/**
4078 * Implements the logger settings request.
4079 *
4080 * @returns VBox status code.
4081 * @param pDevExt The device extension.
4082 * @param pSession The caller's session.
4083 * @param pReq The request.
4084 */
4085static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq)
4086{
4087 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
4088 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
4089 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
4090 PRTLOGGER pLogger = NULL;
4091 int rc;
4092
4093 /*
4094 * Some further validation.
4095 */
4096 switch (pReq->u.In.fWhat)
4097 {
4098 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4099 case SUPLOGGERSETTINGS_WHAT_CREATE:
4100 break;
4101
4102 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4103 if (*pszGroup || *pszFlags || *pszDest)
4104 return VERR_INVALID_PARAMETER;
4105 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
4106 return VERR_ACCESS_DENIED;
4107 break;
4108
4109 default:
4110 return VERR_INTERNAL_ERROR;
4111 }
4112
4113 /*
4114 * Get the logger.
4115 */
4116 switch (pReq->u.In.fWhich)
4117 {
4118 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4119 pLogger = RTLogGetDefaultInstance();
4120 break;
4121
4122 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4123 pLogger = RTLogRelDefaultInstance();
4124 break;
4125
4126 default:
4127 return VERR_INTERNAL_ERROR;
4128 }
4129
4130 /*
4131 * Do the job.
4132 */
4133 switch (pReq->u.In.fWhat)
4134 {
4135 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4136 if (pLogger)
4137 {
4138 rc = RTLogFlags(pLogger, pszFlags);
4139 if (RT_SUCCESS(rc))
4140 rc = RTLogGroupSettings(pLogger, pszGroup);
4141 NOREF(pszDest);
4142 }
4143 else
4144 rc = VERR_NOT_FOUND;
4145 break;
4146
4147 case SUPLOGGERSETTINGS_WHAT_CREATE:
4148 {
4149 if (pLogger)
4150 rc = VERR_ALREADY_EXISTS;
4151 else
4152 {
4153 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
4154
4155 rc = RTLogCreate(&pLogger,
4156 0 /* fFlags */,
4157 pszGroup,
4158 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
4159 ? "VBOX_LOG"
4160 : "VBOX_RELEASE_LOG",
4161 RT_ELEMENTS(s_apszGroups),
4162 s_apszGroups,
4163 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
4164 NULL);
4165 if (RT_SUCCESS(rc))
4166 {
4167 rc = RTLogFlags(pLogger, pszFlags);
4168 NOREF(pszDest);
4169 if (RT_SUCCESS(rc))
4170 {
4171 switch (pReq->u.In.fWhich)
4172 {
4173 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4174 pLogger = RTLogSetDefaultInstance(pLogger);
4175 break;
4176 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4177 pLogger = RTLogRelSetDefaultInstance(pLogger);
4178 break;
4179 }
4180 }
4181 RTLogDestroy(pLogger);
4182 }
4183 }
4184 break;
4185 }
4186
4187 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4188 switch (pReq->u.In.fWhich)
4189 {
4190 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4191 pLogger = RTLogSetDefaultInstance(NULL);
4192 break;
4193 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4194 pLogger = RTLogRelSetDefaultInstance(NULL);
4195 break;
4196 }
4197 rc = RTLogDestroy(pLogger);
4198 break;
4199
4200 default:
4201 {
4202 rc = VERR_INTERNAL_ERROR;
4203 break;
4204 }
4205 }
4206
4207 return rc;
4208}
4209
4210
4211/**
4212 * Gets the paging mode of the current CPU.
4213 *
4214 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4215 */
4216SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4217{
4218 SUPPAGINGMODE enmMode;
4219
4220 RTR0UINTREG cr0 = ASMGetCR0();
4221 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4222 enmMode = SUPPAGINGMODE_INVALID;
4223 else
4224 {
4225 RTR0UINTREG cr4 = ASMGetCR4();
4226 uint32_t fNXEPlusLMA = 0;
4227 if (cr4 & X86_CR4_PAE)
4228 {
4229 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
4230 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
4231 {
4232 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4233 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4234 fNXEPlusLMA |= RT_BIT(0);
4235 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4236 fNXEPlusLMA |= RT_BIT(1);
4237 }
4238 }
4239
4240 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4241 {
4242 case 0:
4243 enmMode = SUPPAGINGMODE_32_BIT;
4244 break;
4245
4246 case X86_CR4_PGE:
4247 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4248 break;
4249
4250 case X86_CR4_PAE:
4251 enmMode = SUPPAGINGMODE_PAE;
4252 break;
4253
4254 case X86_CR4_PAE | RT_BIT(0):
4255 enmMode = SUPPAGINGMODE_PAE_NX;
4256 break;
4257
4258 case X86_CR4_PAE | X86_CR4_PGE:
4259 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4260 break;
4261
4262 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4263 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4264 break;
4265
4266 case RT_BIT(1) | X86_CR4_PAE:
4267 enmMode = SUPPAGINGMODE_AMD64;
4268 break;
4269
4270 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4271 enmMode = SUPPAGINGMODE_AMD64_NX;
4272 break;
4273
4274 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4275 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4276 break;
4277
4278 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4279 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4280 break;
4281
4282 default:
4283 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4284 enmMode = SUPPAGINGMODE_INVALID;
4285 break;
4286 }
4287 }
4288 return enmMode;
4289}
4290
4291
4292/**
4293 * Enables or disabled hardware virtualization extensions using native OS APIs.
4294 *
4295 * @returns VBox status code.
4296 * @retval VINF_SUCCESS on success.
4297 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4298 *
4299 * @param fEnable Whether to enable or disable.
4300 */
4301SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4302{
4303#ifdef RT_OS_DARWIN
4304 return supdrvOSEnableVTx(fEnable);
4305#else
4306 return VERR_NOT_SUPPORTED;
4307#endif
4308}
4309
4310
4311/**
4312 * Creates the GIP.
4313 *
4314 * @returns VBox status code.
4315 * @param pDevExt Instance data. GIP stuff may be updated.
4316 */
4317static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4318{
4319 PSUPGLOBALINFOPAGE pGip;
4320 RTHCPHYS HCPhysGip;
4321 uint32_t u32SystemResolution;
4322 uint32_t u32Interval;
4323 int rc;
4324
4325 LogFlow(("supdrvGipCreate:\n"));
4326
4327 /* assert order */
4328 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4329 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4330 Assert(!pDevExt->pGipTimer);
4331
4332 /*
4333 * Allocate a suitable page with a default kernel mapping.
4334 */
4335 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4336 if (RT_FAILURE(rc))
4337 {
4338 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4339 return rc;
4340 }
4341 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4342 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4343
4344#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4345 * It only applies to Windows and should probably revisited later, if possible made part of the
4346 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4347 /*
4348 * Try bump up the system timer resolution.
4349 * The more interrupts the better...
4350 */
4351 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4352 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4353 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4354 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4355 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4356 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4357 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4358 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4359 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4360 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4361 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4362 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4363 )
4364 {
4365 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4366 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4367 }
4368#endif
4369
4370 /*
4371 * Find a reasonable update interval and initialize the structure.
4372 */
4373 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4374 while (u32Interval < 10000000 /* 10 ms */)
4375 u32Interval += u32SystemResolution;
4376
4377 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4378
4379 /*
4380 * Create the timer.
4381 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4382 */
4383 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4384 {
4385 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4386 if (rc == VERR_NOT_SUPPORTED)
4387 {
4388 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4389 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4390 }
4391 }
4392 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4393 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4394 if (RT_SUCCESS(rc))
4395 {
4396 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4397 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4398 if (RT_SUCCESS(rc))
4399 {
4400 /*
4401 * We're good.
4402 */
4403 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4404 return VINF_SUCCESS;
4405 }
4406
4407 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4408 }
4409 else
4410 {
4411 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4412 Assert(!pDevExt->pGipTimer);
4413 }
4414 supdrvGipDestroy(pDevExt);
4415 return rc;
4416}
4417
4418
4419/**
4420 * Terminates the GIP.
4421 *
4422 * @param pDevExt Instance data. GIP stuff may be updated.
4423 */
4424static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4425{
4426 int rc;
4427#ifdef DEBUG_DARWIN_GIP
4428 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4429 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4430 pDevExt->pGipTimer, pDevExt->GipMemObj));
4431#endif
4432
4433 /*
4434 * Invalid the GIP data.
4435 */
4436 if (pDevExt->pGip)
4437 {
4438 supdrvGipTerm(pDevExt->pGip);
4439 pDevExt->pGip = NULL;
4440 }
4441
4442 /*
4443 * Destroy the timer and free the GIP memory object.
4444 */
4445 if (pDevExt->pGipTimer)
4446 {
4447 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4448 pDevExt->pGipTimer = NULL;
4449 }
4450
4451 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4452 {
4453 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4454 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4455 }
4456
4457 /*
4458 * Finally, release the system timer resolution request if one succeeded.
4459 */
4460 if (pDevExt->u32SystemTimerGranularityGrant)
4461 {
4462 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4463 pDevExt->u32SystemTimerGranularityGrant = 0;
4464 }
4465}
4466
4467
4468/**
4469 * Timer callback function sync GIP mode.
4470 * @param pTimer The timer.
4471 * @param pvUser The device extension.
4472 */
4473static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4474{
4475 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4476 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4477
4478 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4479
4480 ASMSetFlags(fOldFlags);
4481}
4482
4483
4484/**
4485 * Timer callback function for async GIP mode.
4486 * @param pTimer The timer.
4487 * @param pvUser The device extension.
4488 */
4489static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4490{
4491 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4492 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4493 RTCPUID idCpu = RTMpCpuId();
4494 uint64_t NanoTS = RTTimeSystemNanoTS();
4495
4496 /** @todo reset the transaction number and whatnot when iTick == 1. */
4497 if (pDevExt->idGipMaster == idCpu)
4498 supdrvGipUpdate(pDevExt->pGip, NanoTS);
4499 else
4500 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, ASMGetApicId());
4501
4502 ASMSetFlags(fOldFlags);
4503}
4504
4505
4506/**
4507 * Multiprocessor event notification callback.
4508 *
4509 * This is used to make sue that the GIP master gets passed on to
4510 * another CPU.
4511 *
4512 * @param enmEvent The event.
4513 * @param idCpu The cpu it applies to.
4514 * @param pvUser Pointer to the device extension.
4515 */
4516static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4517{
4518 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4519 if (enmEvent == RTMPEVENT_OFFLINE)
4520 {
4521 RTCPUID idGipMaster;
4522 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4523 if (idGipMaster == idCpu)
4524 {
4525 /*
4526 * Find a new GIP master.
4527 */
4528 bool fIgnored;
4529 unsigned i;
4530 RTCPUID idNewGipMaster = NIL_RTCPUID;
4531 RTCPUSET OnlineCpus;
4532 RTMpGetOnlineSet(&OnlineCpus);
4533
4534 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4535 {
4536 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4537 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4538 && idCurCpu != idGipMaster)
4539 {
4540 idNewGipMaster = idCurCpu;
4541 break;
4542 }
4543 }
4544
4545 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4546 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4547 NOREF(fIgnored);
4548 }
4549 }
4550}
4551
4552
4553/**
4554 * Initializes the GIP data.
4555 *
4556 * @returns IPRT status code.
4557 * @param pDevExt Pointer to the device instance data.
4558 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4559 * @param HCPhys The physical address of the GIP.
4560 * @param u64NanoTS The current nanosecond timestamp.
4561 * @param uUpdateHz The update freqence.
4562 */
4563int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4564{
4565 unsigned i;
4566#ifdef DEBUG_DARWIN_GIP
4567 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4568#else
4569 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4570#endif
4571
4572 /*
4573 * Initialize the structure.
4574 */
4575 memset(pGip, 0, PAGE_SIZE);
4576 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4577 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4578 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4579 pGip->u32UpdateHz = uUpdateHz;
4580 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4581 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4582
4583 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4584 {
4585 pGip->aCPUs[i].u32TransactionId = 2;
4586 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4587 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4588
4589 /*
4590 * We don't know the following values until we've executed updates.
4591 * So, we'll just insert very high values.
4592 */
4593 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4594 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4595 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4596 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4597 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4598 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4599 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4600 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4601 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4602 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4603 }
4604
4605 /*
4606 * Link it to the device extension.
4607 */
4608 pDevExt->pGip = pGip;
4609 pDevExt->HCPhysGip = HCPhys;
4610 pDevExt->cGipUsers = 0;
4611
4612 return VINF_SUCCESS;
4613}
4614
4615
4616/**
4617 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4618 *
4619 * @param idCpu Ignored.
4620 * @param pvUser1 Where to put the TSC.
4621 * @param pvUser2 Ignored.
4622 */
4623static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4624{
4625#if 1
4626 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4627#else
4628 *(uint64_t *)pvUser1 = ASMReadTSC();
4629#endif
4630}
4631
4632
4633/**
4634 * Determine if Async GIP mode is required because of TSC drift.
4635 *
4636 * When using the default/normal timer code it is essential that the time stamp counter
4637 * (TSC) runs never backwards, that is, a read operation to the counter should return
4638 * a bigger value than any previous read operation. This is guaranteed by the latest
4639 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4640 * case we have to choose the asynchronous timer mode.
4641 *
4642 * @param poffMin Pointer to the determined difference between different cores.
4643 * @return false if the time stamp counters appear to be synchron, true otherwise.
4644 */
4645bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4646{
4647 /*
4648 * Just iterate all the cpus 8 times and make sure that the TSC is
4649 * ever increasing. We don't bother taking TSC rollover into account.
4650 */
4651 RTCPUSET CpuSet;
4652 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4653 int iCpu;
4654 int cLoops = 8;
4655 bool fAsync = false;
4656 int rc = VINF_SUCCESS;
4657 uint64_t offMax = 0;
4658 uint64_t offMin = ~(uint64_t)0;
4659 uint64_t PrevTsc = ASMReadTSC();
4660
4661 while (cLoops-- > 0)
4662 {
4663 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4664 {
4665 uint64_t CurTsc;
4666 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4667 if (RT_SUCCESS(rc))
4668 {
4669 if (CurTsc <= PrevTsc)
4670 {
4671 fAsync = true;
4672 offMin = offMax = PrevTsc - CurTsc;
4673 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4674 iCpu, cLoops, CurTsc, PrevTsc));
4675 break;
4676 }
4677
4678 /* Gather statistics (except the first time). */
4679 if (iCpu != 0 || cLoops != 7)
4680 {
4681 uint64_t off = CurTsc - PrevTsc;
4682 if (off < offMin)
4683 offMin = off;
4684 if (off > offMax)
4685 offMax = off;
4686 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4687 }
4688
4689 /* Next */
4690 PrevTsc = CurTsc;
4691 }
4692 else if (rc == VERR_NOT_SUPPORTED)
4693 break;
4694 else
4695 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4696 }
4697
4698 /* broke out of the loop. */
4699 if (iCpu <= iLastCpu)
4700 break;
4701 }
4702
4703 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4704 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4705 fAsync, iLastCpu, rc, offMin, offMax));
4706#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4707 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4708#endif
4709 return fAsync;
4710}
4711
4712
4713/**
4714 * Determin the GIP TSC mode.
4715 *
4716 * @returns The most suitable TSC mode.
4717 * @param pDevExt Pointer to the device instance data.
4718 */
4719static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)
4720{
4721 /*
4722 * On SMP we're faced with two problems:
4723 * (1) There might be a skew between the CPU, so that cpu0
4724 * returns a TSC that is sligtly different from cpu1.
4725 * (2) Power management (and other things) may cause the TSC
4726 * to run at a non-constant speed, and cause the speed
4727 * to be different on the cpus. This will result in (1).
4728 *
4729 * So, on SMP systems we'll have to select the ASYNC update method
4730 * if there are symphoms of these problems.
4731 */
4732 if (RTMpGetCount() > 1)
4733 {
4734 uint32_t uEAX, uEBX, uECX, uEDX;
4735 uint64_t u64DiffCoresIgnored;
4736
4737 /* Permit the user and/or the OS specfic bits to force async mode. */
4738 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
4739 return SUPGIPMODE_ASYNC_TSC;
4740
4741 /* Try check for current differences between the cpus. */
4742 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))
4743 return SUPGIPMODE_ASYNC_TSC;
4744
4745 /*
4746 * If the CPU supports power management and is an AMD one we
4747 * won't trust it unless it has the TscInvariant bit is set.
4748 */
4749 /* Check for "AuthenticAMD" */
4750 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4751 if ( uEAX >= 1
4752 && uEBX == X86_CPUID_VENDOR_AMD_EBX
4753 && uECX == X86_CPUID_VENDOR_AMD_ECX
4754 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
4755 {
4756 /* Check for APM support and that TscInvariant is cleared. */
4757 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4758 if (uEAX >= 0x80000007)
4759 {
4760 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4761 if ( !(uEDX & RT_BIT(8))/* TscInvariant */
4762 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4763 return SUPGIPMODE_ASYNC_TSC;
4764 }
4765 }
4766 }
4767 return SUPGIPMODE_SYNC_TSC;
4768}
4769
4770
4771/**
4772 * Invalidates the GIP data upon termination.
4773 *
4774 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4775 */
4776void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4777{
4778 unsigned i;
4779 pGip->u32Magic = 0;
4780 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4781 {
4782 pGip->aCPUs[i].u64NanoTS = 0;
4783 pGip->aCPUs[i].u64TSC = 0;
4784 pGip->aCPUs[i].iTSCHistoryHead = 0;
4785 }
4786}
4787
4788
4789/**
4790 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4791 * updates all the per cpu data except the transaction id.
4792 *
4793 * @param pGip The GIP.
4794 * @param pGipCpu Pointer to the per cpu data.
4795 * @param u64NanoTS The current time stamp.
4796 */
4797static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4798{
4799 uint64_t u64TSC;
4800 uint64_t u64TSCDelta;
4801 uint32_t u32UpdateIntervalTSC;
4802 uint32_t u32UpdateIntervalTSCSlack;
4803 unsigned iTSCHistoryHead;
4804 uint64_t u64CpuHz;
4805
4806 /*
4807 * Update the NanoTS.
4808 */
4809 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4810
4811 /*
4812 * Calc TSC delta.
4813 */
4814 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4815 u64TSC = ASMReadTSC();
4816 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4817 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4818
4819 if (u64TSCDelta >> 32)
4820 {
4821 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4822 pGipCpu->cErrors++;
4823 }
4824
4825 /*
4826 * TSC History.
4827 */
4828 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4829
4830 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4831 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4832 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4833
4834 /*
4835 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4836 */
4837 if (pGip->u32UpdateHz >= 1000)
4838 {
4839 uint32_t u32;
4840 u32 = pGipCpu->au32TSCHistory[0];
4841 u32 += pGipCpu->au32TSCHistory[1];
4842 u32 += pGipCpu->au32TSCHistory[2];
4843 u32 += pGipCpu->au32TSCHistory[3];
4844 u32 >>= 2;
4845 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4846 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4847 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4848 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4849 u32UpdateIntervalTSC >>= 2;
4850 u32UpdateIntervalTSC += u32;
4851 u32UpdateIntervalTSC >>= 1;
4852
4853 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4854 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4855 }
4856 else if (pGip->u32UpdateHz >= 90)
4857 {
4858 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4859 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4860 u32UpdateIntervalTSC >>= 1;
4861
4862 /* value choosen on a 2GHz thinkpad running windows */
4863 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4864 }
4865 else
4866 {
4867 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4868
4869 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4870 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4871 }
4872 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4873
4874 /*
4875 * CpuHz.
4876 */
4877 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4878 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4879}
4880
4881
4882/**
4883 * Updates the GIP.
4884 *
4885 * @param pGip Pointer to the GIP.
4886 * @param u64NanoTS The current nanosecond timesamp.
4887 */
4888void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4889{
4890 /*
4891 * Determin the relevant CPU data.
4892 */
4893 PSUPGIPCPU pGipCpu;
4894 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4895 pGipCpu = &pGip->aCPUs[0];
4896 else
4897 {
4898 unsigned iCpu = ASMGetApicId();
4899 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4900 return;
4901 pGipCpu = &pGip->aCPUs[iCpu];
4902 }
4903
4904 /*
4905 * Start update transaction.
4906 */
4907 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4908 {
4909 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4910 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4911 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4912 pGipCpu->cErrors++;
4913 return;
4914 }
4915
4916 /*
4917 * Recalc the update frequency every 0x800th time.
4918 */
4919 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4920 {
4921 if (pGip->u64NanoTSLastUpdateHz)
4922 {
4923#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4924 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4925 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4926 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4927 {
4928 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4929 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4930 }
4931#endif
4932 }
4933 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4934 }
4935
4936 /*
4937 * Update the data.
4938 */
4939 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4940
4941 /*
4942 * Complete transaction.
4943 */
4944 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4945}
4946
4947
4948/**
4949 * Updates the per cpu GIP data for the calling cpu.
4950 *
4951 * @param pGip Pointer to the GIP.
4952 * @param u64NanoTS The current nanosecond timesamp.
4953 * @param iCpu The CPU index.
4954 */
4955void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4956{
4957 PSUPGIPCPU pGipCpu;
4958
4959 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4960 {
4961 pGipCpu = &pGip->aCPUs[iCpu];
4962
4963 /*
4964 * Start update transaction.
4965 */
4966 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4967 {
4968 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4969 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4970 pGipCpu->cErrors++;
4971 return;
4972 }
4973
4974 /*
4975 * Update the data.
4976 */
4977 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4978
4979 /*
4980 * Complete transaction.
4981 */
4982 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4983 }
4984}
4985
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette