VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 91792

Last change on this file since 91792 was 91789, checked in by vboxsync, 3 years ago

SUPDrv,IPRT,VBoxGuest: Don't export ellipsis functions from SUPDrv, as that makes switching back to the kernel stack unsafe. Exports has changes (added+removed), but not bumping major IOC version as that was done a few hours ago already and it's Sunday. bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 277.1 KB
Line 
1/* $Id: SUPDrv.cpp 91789 2021-10-17 18:16:11Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_vmx.h>
68
69#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
70# include "dtrace/SUPDrv.h"
71#else
72# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
73# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
74# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
75# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
76#endif
77
78/*
79 * Logging assignments:
80 * Log - useful stuff, like failures.
81 * LogFlow - program flow, except the really noisy bits.
82 * Log2 - Cleanup.
83 * Log3 - Loader flow noise.
84 * Log4 - Call VMMR0 flow noise.
85 * Log5 - Native yet-to-be-defined noise.
86 * Log6 - Native ioctl flow noise.
87 *
88 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
89 * instantiation in log-vbox.c(pp).
90 */
91
92
93/*********************************************************************************************************************************
94* Defined Constants And Macros *
95*********************************************************************************************************************************/
96/** @def VBOX_SVN_REV
97 * The makefile should define this if it can. */
98#ifndef VBOX_SVN_REV
99# define VBOX_SVN_REV 0
100#endif
101
102/** @ SUPDRV_CHECK_SMAP_SETUP
103 * SMAP check setup. */
104/** @def SUPDRV_CHECK_SMAP_CHECK
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
106 * will be logged and @a a_BadExpr is executed. */
107#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
108# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
109# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
110 do { \
111 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
112 { \
113 RTCCUINTREG fEfl = ASMGetFlags(); \
114 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
115 { /* likely */ } \
116 else \
117 { \
118 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
119 a_BadExpr; \
120 } \
121 } \
122 } while (0)
123#else
124# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
125# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
133static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
134static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
135static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
136static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
137static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
138static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
139static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
140static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
141static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
142static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
143DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference);
144static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
145DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
146DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
147static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
148static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
149static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
150static int supdrvIOCtl_ResumeSuspendedKbds(void);
151
152
153/*********************************************************************************************************************************
154* Global Variables *
155*********************************************************************************************************************************/
156/** @def STKBACK
157 * Indicates that the symbol needs to switch back to the kernel stack on darwin.
158 * See @bugref{10124} for details. */
159#ifdef RT_OS_DARWIN
160# define STKBACK(a) "StkBack_" a
161#else
162# define STKBACK(a) a
163#endif
164/** @def STKOKAY
165 * The oposite of STKBACK, just for make the table nicly aligned. */
166#define STKOKAY(a) a
167
168/**
169 * Array of the R0 SUP API.
170 *
171 * While making changes to these exports, make sure to update the IOC
172 * minor version (SUPDRV_IOC_VERSION).
173 *
174 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
175 * produce definition files from which import libraries are generated.
176 * Take care when commenting things and especially with \#ifdef'ing.
177 */
178static SUPFUNC g_aFunctions[] =
179{
180/* SED: START */
181 /* name function */
182 /* Entries with absolute addresses determined at runtime, fixup
183 code makes ugly ASSUMPTIONS about the order here: */
184 { STKOKAY("SUPR0AbsIs64bit"), (void *)0 },
185 { STKOKAY("SUPR0Abs64bitKernelCS"), (void *)0 },
186 { STKOKAY("SUPR0Abs64bitKernelSS"), (void *)0 },
187 { STKOKAY("SUPR0Abs64bitKernelDS"), (void *)0 },
188 { STKOKAY("SUPR0AbsKernelCS"), (void *)0 },
189 { STKOKAY("SUPR0AbsKernelSS"), (void *)0 },
190 { STKOKAY("SUPR0AbsKernelDS"), (void *)0 },
191 { STKOKAY("SUPR0AbsKernelES"), (void *)0 },
192 { STKOKAY("SUPR0AbsKernelFS"), (void *)0 },
193 { STKOKAY("SUPR0AbsKernelGS"), (void *)0 },
194 /* Normal function pointers: */
195 { STKOKAY("g_pSUPGlobalInfoPage"), (void *)&g_pSUPGlobalInfoPage }, /* SED: DATA */
196 { STKOKAY("SUPGetGIP"), (void *)(uintptr_t)SUPGetGIP },
197 { STKBACK("SUPReadTscWithDelta"), (void *)(uintptr_t)SUPReadTscWithDelta },
198 { STKBACK("SUPGetTscDeltaSlow"), (void *)(uintptr_t)SUPGetTscDeltaSlow },
199 { STKBACK("SUPGetCpuHzFromGipForAsyncMode"), (void *)(uintptr_t)SUPGetCpuHzFromGipForAsyncMode },
200 { STKOKAY("SUPIsTscFreqCompatible"), (void *)(uintptr_t)SUPIsTscFreqCompatible },
201 { STKOKAY("SUPIsTscFreqCompatibleEx"), (void *)(uintptr_t)SUPIsTscFreqCompatibleEx },
202 { STKBACK("SUPR0BadContext"), (void *)(uintptr_t)SUPR0BadContext },
203 { STKBACK("SUPR0ComponentDeregisterFactory"), (void *)(uintptr_t)SUPR0ComponentDeregisterFactory },
204 { STKBACK("SUPR0ComponentQueryFactory"), (void *)(uintptr_t)SUPR0ComponentQueryFactory },
205 { STKBACK("SUPR0ComponentRegisterFactory"), (void *)(uintptr_t)SUPR0ComponentRegisterFactory },
206 { STKBACK("SUPR0ContAlloc"), (void *)(uintptr_t)SUPR0ContAlloc },
207 { STKBACK("SUPR0ContFree"), (void *)(uintptr_t)SUPR0ContFree },
208 { STKBACK("SUPR0ChangeCR4"), (void *)(uintptr_t)SUPR0ChangeCR4 },
209 { STKBACK("SUPR0EnableVTx"), (void *)(uintptr_t)SUPR0EnableVTx },
210 { STKBACK("SUPR0SuspendVTxOnCpu"), (void *)(uintptr_t)SUPR0SuspendVTxOnCpu },
211 { STKBACK("SUPR0ResumeVTxOnCpu"), (void *)(uintptr_t)SUPR0ResumeVTxOnCpu },
212 { STKOKAY("SUPR0GetCurrentGdtRw"), (void *)(uintptr_t)SUPR0GetCurrentGdtRw },
213 { STKOKAY("SUPR0GetKernelFeatures"), (void *)(uintptr_t)SUPR0GetKernelFeatures },
214 { STKBACK("SUPR0GetHwvirtMsrs"), (void *)(uintptr_t)SUPR0GetHwvirtMsrs },
215 { STKBACK("SUPR0GetPagingMode"), (void *)(uintptr_t)SUPR0GetPagingMode },
216 { STKBACK("SUPR0GetSvmUsability"), (void *)(uintptr_t)SUPR0GetSvmUsability },
217 { STKBACK("SUPR0GetVTSupport"), (void *)(uintptr_t)SUPR0GetVTSupport },
218 { STKBACK("SUPR0GetVmxUsability"), (void *)(uintptr_t)SUPR0GetVmxUsability },
219 { STKBACK("SUPR0LdrIsLockOwnerByMod"), (void *)(uintptr_t)SUPR0LdrIsLockOwnerByMod },
220 { STKBACK("SUPR0LdrLock"), (void *)(uintptr_t)SUPR0LdrLock },
221 { STKBACK("SUPR0LdrUnlock"), (void *)(uintptr_t)SUPR0LdrUnlock },
222 { STKBACK("SUPR0LdrModByName"), (void *)(uintptr_t)SUPR0LdrModByName },
223 { STKBACK("SUPR0LdrModRelease"), (void *)(uintptr_t)SUPR0LdrModRelease },
224 { STKBACK("SUPR0LdrModRetain"), (void *)(uintptr_t)SUPR0LdrModRetain },
225 { STKBACK("SUPR0LockMem"), (void *)(uintptr_t)SUPR0LockMem },
226 { STKBACK("SUPR0LowAlloc"), (void *)(uintptr_t)SUPR0LowAlloc },
227 { STKBACK("SUPR0LowFree"), (void *)(uintptr_t)SUPR0LowFree },
228 { STKBACK("SUPR0MemAlloc"), (void *)(uintptr_t)SUPR0MemAlloc },
229 { STKBACK("SUPR0MemFree"), (void *)(uintptr_t)SUPR0MemFree },
230 { STKBACK("SUPR0MemGetPhys"), (void *)(uintptr_t)SUPR0MemGetPhys },
231 { STKBACK("SUPR0ObjAddRef"), (void *)(uintptr_t)SUPR0ObjAddRef },
232 { STKBACK("SUPR0ObjAddRefEx"), (void *)(uintptr_t)SUPR0ObjAddRefEx },
233 { STKBACK("SUPR0ObjRegister"), (void *)(uintptr_t)SUPR0ObjRegister },
234 { STKBACK("SUPR0ObjRelease"), (void *)(uintptr_t)SUPR0ObjRelease },
235 { STKBACK("SUPR0ObjVerifyAccess"), (void *)(uintptr_t)SUPR0ObjVerifyAccess },
236 { STKBACK("SUPR0PageAllocEx"), (void *)(uintptr_t)SUPR0PageAllocEx },
237 { STKBACK("SUPR0PageFree"), (void *)(uintptr_t)SUPR0PageFree },
238 { STKBACK("SUPR0PageMapKernel"), (void *)(uintptr_t)SUPR0PageMapKernel },
239 { STKBACK("SUPR0PageProtect"), (void *)(uintptr_t)SUPR0PageProtect },
240#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
241 { STKOKAY("SUPR0HCPhysToVirt"), (void *)(uintptr_t)SUPR0HCPhysToVirt }, /* only-linux, only-solaris, only-freebsd */
242#endif
243 { STKBACK("SUPR0PrintfV"), (void *)(uintptr_t)SUPR0PrintfV },
244 { STKBACK("SUPR0GetSessionGVM"), (void *)(uintptr_t)SUPR0GetSessionGVM },
245 { STKBACK("SUPR0GetSessionVM"), (void *)(uintptr_t)SUPR0GetSessionVM },
246 { STKBACK("SUPR0SetSessionVM"), (void *)(uintptr_t)SUPR0SetSessionVM },
247 { STKBACK("SUPR0TscDeltaMeasureBySetIndex"), (void *)(uintptr_t)SUPR0TscDeltaMeasureBySetIndex },
248 { STKBACK("SUPR0TracerDeregisterDrv"), (void *)(uintptr_t)SUPR0TracerDeregisterDrv },
249 { STKBACK("SUPR0TracerDeregisterImpl"), (void *)(uintptr_t)SUPR0TracerDeregisterImpl },
250 { STKBACK("SUPR0TracerFireProbe"), (void *)(uintptr_t)SUPR0TracerFireProbe },
251 { STKBACK("SUPR0TracerRegisterDrv"), (void *)(uintptr_t)SUPR0TracerRegisterDrv },
252 { STKBACK("SUPR0TracerRegisterImpl"), (void *)(uintptr_t)SUPR0TracerRegisterImpl },
253 { STKBACK("SUPR0TracerRegisterModule"), (void *)(uintptr_t)SUPR0TracerRegisterModule },
254 { STKBACK("SUPR0TracerUmodProbeFire"), (void *)(uintptr_t)SUPR0TracerUmodProbeFire },
255 { STKBACK("SUPR0UnlockMem"), (void *)(uintptr_t)SUPR0UnlockMem },
256#ifdef RT_OS_WINDOWS
257 { STKBACK("SUPR0IoCtlSetupForHandle"), (void *)(uintptr_t)SUPR0IoCtlSetupForHandle }, /* only-windows */
258 { STKBACK("SUPR0IoCtlPerform"), (void *)(uintptr_t)SUPR0IoCtlPerform }, /* only-windows */
259 { STKBACK("SUPR0IoCtlCleanup"), (void *)(uintptr_t)SUPR0IoCtlCleanup }, /* only-windows */
260#endif
261 { STKBACK("SUPSemEventClose"), (void *)(uintptr_t)SUPSemEventClose },
262 { STKBACK("SUPSemEventCreate"), (void *)(uintptr_t)SUPSemEventCreate },
263 { STKBACK("SUPSemEventGetResolution"), (void *)(uintptr_t)SUPSemEventGetResolution },
264 { STKBACK("SUPSemEventMultiClose"), (void *)(uintptr_t)SUPSemEventMultiClose },
265 { STKBACK("SUPSemEventMultiCreate"), (void *)(uintptr_t)SUPSemEventMultiCreate },
266 { STKBACK("SUPSemEventMultiGetResolution"), (void *)(uintptr_t)SUPSemEventMultiGetResolution },
267 { STKBACK("SUPSemEventMultiReset"), (void *)(uintptr_t)SUPSemEventMultiReset },
268 { STKBACK("SUPSemEventMultiSignal"), (void *)(uintptr_t)SUPSemEventMultiSignal },
269 { STKBACK("SUPSemEventMultiWait"), (void *)(uintptr_t)SUPSemEventMultiWait },
270 { STKBACK("SUPSemEventMultiWaitNoResume"), (void *)(uintptr_t)SUPSemEventMultiWaitNoResume },
271 { STKBACK("SUPSemEventMultiWaitNsAbsIntr"), (void *)(uintptr_t)SUPSemEventMultiWaitNsAbsIntr },
272 { STKBACK("SUPSemEventMultiWaitNsRelIntr"), (void *)(uintptr_t)SUPSemEventMultiWaitNsRelIntr },
273 { STKBACK("SUPSemEventSignal"), (void *)(uintptr_t)SUPSemEventSignal },
274 { STKBACK("SUPSemEventWait"), (void *)(uintptr_t)SUPSemEventWait },
275 { STKBACK("SUPSemEventWaitNoResume"), (void *)(uintptr_t)SUPSemEventWaitNoResume },
276 { STKBACK("SUPSemEventWaitNsAbsIntr"), (void *)(uintptr_t)SUPSemEventWaitNsAbsIntr },
277 { STKBACK("SUPSemEventWaitNsRelIntr"), (void *)(uintptr_t)SUPSemEventWaitNsRelIntr },
278
279 { STKBACK("RTAssertAreQuiet"), (void *)(uintptr_t)RTAssertAreQuiet },
280 { STKBACK("RTAssertMayPanic"), (void *)(uintptr_t)RTAssertMayPanic },
281 { STKBACK("RTAssertMsg1"), (void *)(uintptr_t)RTAssertMsg1 },
282 { STKBACK("RTAssertMsg2AddV"), (void *)(uintptr_t)RTAssertMsg2AddV },
283 { STKBACK("RTAssertMsg2V"), (void *)(uintptr_t)RTAssertMsg2V },
284 { STKBACK("RTAssertSetMayPanic"), (void *)(uintptr_t)RTAssertSetMayPanic },
285 { STKBACK("RTAssertSetQuiet"), (void *)(uintptr_t)RTAssertSetQuiet },
286 { STKOKAY("RTCrc32"), (void *)(uintptr_t)RTCrc32 },
287 { STKOKAY("RTCrc32Finish"), (void *)(uintptr_t)RTCrc32Finish },
288 { STKOKAY("RTCrc32Process"), (void *)(uintptr_t)RTCrc32Process },
289 { STKOKAY("RTCrc32Start"), (void *)(uintptr_t)RTCrc32Start },
290 { STKOKAY("RTErrConvertFromErrno"), (void *)(uintptr_t)RTErrConvertFromErrno },
291 { STKOKAY("RTErrConvertToErrno"), (void *)(uintptr_t)RTErrConvertToErrno },
292 { STKBACK("RTHandleTableAllocWithCtx"), (void *)(uintptr_t)RTHandleTableAllocWithCtx },
293 { STKBACK("RTHandleTableCreate"), (void *)(uintptr_t)RTHandleTableCreate },
294 { STKBACK("RTHandleTableCreateEx"), (void *)(uintptr_t)RTHandleTableCreateEx },
295 { STKBACK("RTHandleTableDestroy"), (void *)(uintptr_t)RTHandleTableDestroy },
296 { STKBACK("RTHandleTableFreeWithCtx"), (void *)(uintptr_t)RTHandleTableFreeWithCtx },
297 { STKBACK("RTHandleTableLookupWithCtx"), (void *)(uintptr_t)RTHandleTableLookupWithCtx },
298 { STKBACK("RTLogBulkUpdate"), (void *)(uintptr_t)RTLogBulkUpdate},
299 { STKBACK("RTLogCheckGroupFlags"), (void *)(uintptr_t)RTLogCheckGroupFlags },
300 { STKBACK("RTLogCreateExV"), (void *)(uintptr_t)RTLogCreateExV },
301 { STKBACK("RTLogDestroy"), (void *)(uintptr_t)RTLogDestroy },
302 { STKBACK("RTLogDefaultInstance"), (void *)(uintptr_t)RTLogDefaultInstance },
303 { STKBACK("RTLogDefaultInstanceEx"), (void *)(uintptr_t)RTLogDefaultInstanceEx },
304 { STKBACK("SUPR0DefaultLogInstanceEx"), (void *)(uintptr_t)SUPR0DefaultLogInstanceEx },
305 { STKBACK("RTLogGetDefaultInstance"), (void *)(uintptr_t)RTLogGetDefaultInstance },
306 { STKBACK("RTLogGetDefaultInstanceEx"), (void *)(uintptr_t)RTLogGetDefaultInstanceEx },
307 { STKBACK("SUPR0GetDefaultLogInstanceEx"), (void *)(uintptr_t)SUPR0GetDefaultLogInstanceEx },
308 { STKBACK("RTLogLoggerExV"), (void *)(uintptr_t)RTLogLoggerExV },
309 { STKBACK("RTLogPrintfV"), (void *)(uintptr_t)RTLogPrintfV },
310 { STKBACK("RTLogRelGetDefaultInstance"), (void *)(uintptr_t)RTLogRelGetDefaultInstance },
311 { STKBACK("RTLogRelGetDefaultInstanceEx"), (void *)(uintptr_t)RTLogRelGetDefaultInstanceEx },
312 { STKBACK("SUPR0GetDefaultLogRelInstanceEx"), (void *)(uintptr_t)SUPR0GetDefaultLogRelInstanceEx },
313 { STKBACK("RTLogSetDefaultInstanceThread"), (void *)(uintptr_t)RTLogSetDefaultInstanceThread },
314 { STKBACK("RTLogSetFlushCallback"), (void *)(uintptr_t)RTLogSetFlushCallback },
315 { STKBACK("RTLogSetR0ProgramStart"), (void *)(uintptr_t)RTLogSetR0ProgramStart },
316 { STKBACK("RTLogSetR0ThreadNameV"), (void *)(uintptr_t)RTLogSetR0ThreadNameV },
317 { STKBACK("RTMemAllocExTag"), (void *)(uintptr_t)RTMemAllocExTag },
318 { STKBACK("RTMemAllocTag"), (void *)(uintptr_t)RTMemAllocTag },
319 { STKBACK("RTMemAllocVarTag"), (void *)(uintptr_t)RTMemAllocVarTag },
320 { STKBACK("RTMemAllocZTag"), (void *)(uintptr_t)RTMemAllocZTag },
321 { STKBACK("RTMemAllocZVarTag"), (void *)(uintptr_t)RTMemAllocZVarTag },
322 { STKBACK("RTMemDupExTag"), (void *)(uintptr_t)RTMemDupExTag },
323 { STKBACK("RTMemDupTag"), (void *)(uintptr_t)RTMemDupTag },
324 { STKBACK("RTMemFree"), (void *)(uintptr_t)RTMemFree },
325 { STKBACK("RTMemFreeEx"), (void *)(uintptr_t)RTMemFreeEx },
326 { STKBACK("RTMemReallocTag"), (void *)(uintptr_t)RTMemReallocTag },
327 { STKBACK("RTMpCpuId"), (void *)(uintptr_t)RTMpCpuId },
328 { STKBACK("RTMpCpuIdFromSetIndex"), (void *)(uintptr_t)RTMpCpuIdFromSetIndex },
329 { STKBACK("RTMpCpuIdToSetIndex"), (void *)(uintptr_t)RTMpCpuIdToSetIndex },
330 { STKBACK("RTMpCurSetIndex"), (void *)(uintptr_t)RTMpCurSetIndex },
331 { STKBACK("RTMpCurSetIndexAndId"), (void *)(uintptr_t)RTMpCurSetIndexAndId },
332 { STKBACK("RTMpGetArraySize"), (void *)(uintptr_t)RTMpGetArraySize },
333 { STKBACK("RTMpGetCount"), (void *)(uintptr_t)RTMpGetCount },
334 { STKBACK("RTMpGetMaxCpuId"), (void *)(uintptr_t)RTMpGetMaxCpuId },
335 { STKBACK("RTMpGetOnlineCount"), (void *)(uintptr_t)RTMpGetOnlineCount },
336 { STKBACK("RTMpGetOnlineSet"), (void *)(uintptr_t)RTMpGetOnlineSet },
337 { STKBACK("RTMpGetSet"), (void *)(uintptr_t)RTMpGetSet },
338 { STKBACK("RTMpIsCpuOnline"), (void *)(uintptr_t)RTMpIsCpuOnline },
339 { STKBACK("RTMpIsCpuPossible"), (void *)(uintptr_t)RTMpIsCpuPossible },
340 { STKBACK("RTMpIsCpuWorkPending"), (void *)(uintptr_t)RTMpIsCpuWorkPending },
341 { STKBACK("RTMpNotificationDeregister"), (void *)(uintptr_t)RTMpNotificationDeregister },
342 { STKBACK("RTMpNotificationRegister"), (void *)(uintptr_t)RTMpNotificationRegister },
343 { STKBACK("RTMpOnAll"), (void *)(uintptr_t)RTMpOnAll },
344 { STKBACK("RTMpOnOthers"), (void *)(uintptr_t)RTMpOnOthers },
345 { STKBACK("RTMpOnSpecific"), (void *)(uintptr_t)RTMpOnSpecific },
346 { STKBACK("RTMpPokeCpu"), (void *)(uintptr_t)RTMpPokeCpu },
347 { STKOKAY("RTNetIPv4AddDataChecksum"), (void *)(uintptr_t)RTNetIPv4AddDataChecksum },
348 { STKOKAY("RTNetIPv4AddTCPChecksum"), (void *)(uintptr_t)RTNetIPv4AddTCPChecksum },
349 { STKOKAY("RTNetIPv4AddUDPChecksum"), (void *)(uintptr_t)RTNetIPv4AddUDPChecksum },
350 { STKOKAY("RTNetIPv4FinalizeChecksum"), (void *)(uintptr_t)RTNetIPv4FinalizeChecksum },
351 { STKOKAY("RTNetIPv4HdrChecksum"), (void *)(uintptr_t)RTNetIPv4HdrChecksum },
352 { STKOKAY("RTNetIPv4IsDHCPValid"), (void *)(uintptr_t)RTNetIPv4IsDHCPValid },
353 { STKOKAY("RTNetIPv4IsHdrValid"), (void *)(uintptr_t)RTNetIPv4IsHdrValid },
354 { STKOKAY("RTNetIPv4IsTCPSizeValid"), (void *)(uintptr_t)RTNetIPv4IsTCPSizeValid },
355 { STKOKAY("RTNetIPv4IsTCPValid"), (void *)(uintptr_t)RTNetIPv4IsTCPValid },
356 { STKOKAY("RTNetIPv4IsUDPSizeValid"), (void *)(uintptr_t)RTNetIPv4IsUDPSizeValid },
357 { STKOKAY("RTNetIPv4IsUDPValid"), (void *)(uintptr_t)RTNetIPv4IsUDPValid },
358 { STKOKAY("RTNetIPv4PseudoChecksum"), (void *)(uintptr_t)RTNetIPv4PseudoChecksum },
359 { STKOKAY("RTNetIPv4PseudoChecksumBits"), (void *)(uintptr_t)RTNetIPv4PseudoChecksumBits },
360 { STKOKAY("RTNetIPv4TCPChecksum"), (void *)(uintptr_t)RTNetIPv4TCPChecksum },
361 { STKOKAY("RTNetIPv4UDPChecksum"), (void *)(uintptr_t)RTNetIPv4UDPChecksum },
362 { STKOKAY("RTNetIPv6PseudoChecksum"), (void *)(uintptr_t)RTNetIPv6PseudoChecksum },
363 { STKOKAY("RTNetIPv6PseudoChecksumBits"), (void *)(uintptr_t)RTNetIPv6PseudoChecksumBits },
364 { STKOKAY("RTNetIPv6PseudoChecksumEx"), (void *)(uintptr_t)RTNetIPv6PseudoChecksumEx },
365 { STKOKAY("RTNetTCPChecksum"), (void *)(uintptr_t)RTNetTCPChecksum },
366 { STKOKAY("RTNetUDPChecksum"), (void *)(uintptr_t)RTNetUDPChecksum },
367 { STKBACK("RTPowerNotificationDeregister"), (void *)(uintptr_t)RTPowerNotificationDeregister },
368 { STKBACK("RTPowerNotificationRegister"), (void *)(uintptr_t)RTPowerNotificationRegister },
369 { STKBACK("RTProcSelf"), (void *)(uintptr_t)RTProcSelf },
370 { STKBACK("RTR0AssertPanicSystem"), (void *)(uintptr_t)RTR0AssertPanicSystem },
371#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
372 { STKBACK("RTR0DbgKrnlInfoOpen"), (void *)(uintptr_t)RTR0DbgKrnlInfoOpen }, /* only-darwin, only-solaris, only-windows */
373 { STKBACK("RTR0DbgKrnlInfoQueryMember"), (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember }, /* only-darwin, only-solaris, only-windows */
374# if defined(RT_OS_SOLARIS)
375 { STKBACK("RTR0DbgKrnlInfoQuerySize"), (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySize }, /* only-solaris */
376# endif
377 { STKBACK("RTR0DbgKrnlInfoQuerySymbol"), (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol }, /* only-darwin, only-solaris, only-windows */
378 { STKBACK("RTR0DbgKrnlInfoRelease"), (void *)(uintptr_t)RTR0DbgKrnlInfoRelease }, /* only-darwin, only-solaris, only-windows */
379 { STKBACK("RTR0DbgKrnlInfoRetain"), (void *)(uintptr_t)RTR0DbgKrnlInfoRetain }, /* only-darwin, only-solaris, only-windows */
380#endif
381 { STKBACK("RTR0MemAreKrnlAndUsrDifferent"), (void *)(uintptr_t)RTR0MemAreKrnlAndUsrDifferent },
382 { STKBACK("RTR0MemKernelIsValidAddr"), (void *)(uintptr_t)RTR0MemKernelIsValidAddr },
383 { STKBACK("RTR0MemKernelCopyFrom"), (void *)(uintptr_t)RTR0MemKernelCopyFrom },
384 { STKBACK("RTR0MemKernelCopyTo"), (void *)(uintptr_t)RTR0MemKernelCopyTo },
385 { STKOKAY("RTR0MemObjAddress"), (void *)(uintptr_t)RTR0MemObjAddress },
386 { STKOKAY("RTR0MemObjAddressR3"), (void *)(uintptr_t)RTR0MemObjAddressR3 },
387 { STKBACK("RTR0MemObjAllocContTag"), (void *)(uintptr_t)RTR0MemObjAllocContTag },
388 { STKBACK("RTR0MemObjAllocLargeTag"), (void *)(uintptr_t)RTR0MemObjAllocLargeTag },
389 { STKBACK("RTR0MemObjAllocLowTag"), (void *)(uintptr_t)RTR0MemObjAllocLowTag },
390 { STKBACK("RTR0MemObjAllocPageTag"), (void *)(uintptr_t)RTR0MemObjAllocPageTag },
391 { STKBACK("RTR0MemObjAllocPhysExTag"), (void *)(uintptr_t)RTR0MemObjAllocPhysExTag },
392 { STKBACK("RTR0MemObjAllocPhysNCTag"), (void *)(uintptr_t)RTR0MemObjAllocPhysNCTag },
393 { STKBACK("RTR0MemObjAllocPhysTag"), (void *)(uintptr_t)RTR0MemObjAllocPhysTag },
394 { STKBACK("RTR0MemObjEnterPhysTag"), (void *)(uintptr_t)RTR0MemObjEnterPhysTag },
395 { STKBACK("RTR0MemObjFree"), (void *)(uintptr_t)RTR0MemObjFree },
396 { STKBACK("RTR0MemObjGetPagePhysAddr"), (void *)(uintptr_t)RTR0MemObjGetPagePhysAddr },
397 { STKOKAY("RTR0MemObjIsMapping"), (void *)(uintptr_t)RTR0MemObjIsMapping },
398 { STKBACK("RTR0MemObjLockUserTag"), (void *)(uintptr_t)RTR0MemObjLockUserTag },
399 { STKBACK("RTR0MemObjLockKernelTag"), (void *)(uintptr_t)RTR0MemObjLockKernelTag },
400 { STKBACK("RTR0MemObjMapKernelExTag"), (void *)(uintptr_t)RTR0MemObjMapKernelExTag },
401 { STKBACK("RTR0MemObjMapKernelTag"), (void *)(uintptr_t)RTR0MemObjMapKernelTag },
402 { STKBACK("RTR0MemObjMapUserTag"), (void *)(uintptr_t)RTR0MemObjMapUserTag },
403 { STKBACK("RTR0MemObjMapUserExTag"), (void *)(uintptr_t)RTR0MemObjMapUserExTag },
404 { STKBACK("RTR0MemObjProtect"), (void *)(uintptr_t)RTR0MemObjProtect },
405 { STKOKAY("RTR0MemObjSize"), (void *)(uintptr_t)RTR0MemObjSize },
406 { STKBACK("RTR0MemUserCopyFrom"), (void *)(uintptr_t)RTR0MemUserCopyFrom },
407 { STKBACK("RTR0MemUserCopyTo"), (void *)(uintptr_t)RTR0MemUserCopyTo },
408 { STKBACK("RTR0MemUserIsValidAddr"), (void *)(uintptr_t)RTR0MemUserIsValidAddr },
409 { STKBACK("RTR0ProcHandleSelf"), (void *)(uintptr_t)RTR0ProcHandleSelf },
410 { STKBACK("RTSemEventCreate"), (void *)(uintptr_t)RTSemEventCreate },
411 { STKBACK("RTSemEventDestroy"), (void *)(uintptr_t)RTSemEventDestroy },
412 { STKBACK("RTSemEventGetResolution"), (void *)(uintptr_t)RTSemEventGetResolution },
413 { STKBACK("RTSemEventIsSignalSafe"), (void *)(uintptr_t)RTSemEventIsSignalSafe },
414 { STKBACK("RTSemEventMultiCreate"), (void *)(uintptr_t)RTSemEventMultiCreate },
415 { STKBACK("RTSemEventMultiDestroy"), (void *)(uintptr_t)RTSemEventMultiDestroy },
416 { STKBACK("RTSemEventMultiGetResolution"), (void *)(uintptr_t)RTSemEventMultiGetResolution },
417 { STKBACK("RTSemEventMultiIsSignalSafe"), (void *)(uintptr_t)RTSemEventMultiIsSignalSafe },
418 { STKBACK("RTSemEventMultiReset"), (void *)(uintptr_t)RTSemEventMultiReset },
419 { STKBACK("RTSemEventMultiSignal"), (void *)(uintptr_t)RTSemEventMultiSignal },
420 { STKBACK("RTSemEventMultiWait"), (void *)(uintptr_t)RTSemEventMultiWait },
421 { STKBACK("RTSemEventMultiWaitEx"), (void *)(uintptr_t)RTSemEventMultiWaitEx },
422 { STKBACK("RTSemEventMultiWaitExDebug"), (void *)(uintptr_t)RTSemEventMultiWaitExDebug },
423 { STKBACK("RTSemEventMultiWaitNoResume"), (void *)(uintptr_t)RTSemEventMultiWaitNoResume },
424 { STKBACK("RTSemEventSignal"), (void *)(uintptr_t)RTSemEventSignal },
425 { STKBACK("RTSemEventWait"), (void *)(uintptr_t)RTSemEventWait },
426 { STKBACK("RTSemEventWaitEx"), (void *)(uintptr_t)RTSemEventWaitEx },
427 { STKBACK("RTSemEventWaitExDebug"), (void *)(uintptr_t)RTSemEventWaitExDebug },
428 { STKBACK("RTSemEventWaitNoResume"), (void *)(uintptr_t)RTSemEventWaitNoResume },
429 { STKBACK("RTSemFastMutexCreate"), (void *)(uintptr_t)RTSemFastMutexCreate },
430 { STKBACK("RTSemFastMutexDestroy"), (void *)(uintptr_t)RTSemFastMutexDestroy },
431 { STKBACK("RTSemFastMutexRelease"), (void *)(uintptr_t)RTSemFastMutexRelease },
432 { STKBACK("RTSemFastMutexRequest"), (void *)(uintptr_t)RTSemFastMutexRequest },
433 { STKBACK("RTSemMutexCreate"), (void *)(uintptr_t)RTSemMutexCreate },
434 { STKBACK("RTSemMutexDestroy"), (void *)(uintptr_t)RTSemMutexDestroy },
435 { STKBACK("RTSemMutexRelease"), (void *)(uintptr_t)RTSemMutexRelease },
436 { STKBACK("RTSemMutexRequest"), (void *)(uintptr_t)RTSemMutexRequest },
437 { STKBACK("RTSemMutexRequestDebug"), (void *)(uintptr_t)RTSemMutexRequestDebug },
438 { STKBACK("RTSemMutexRequestNoResume"), (void *)(uintptr_t)RTSemMutexRequestNoResume },
439 { STKBACK("RTSemMutexRequestNoResumeDebug"), (void *)(uintptr_t)RTSemMutexRequestNoResumeDebug },
440 { STKBACK("RTSpinlockAcquire"), (void *)(uintptr_t)RTSpinlockAcquire },
441 { STKBACK("RTSpinlockCreate"), (void *)(uintptr_t)RTSpinlockCreate },
442 { STKBACK("RTSpinlockDestroy"), (void *)(uintptr_t)RTSpinlockDestroy },
443 { STKBACK("RTSpinlockRelease"), (void *)(uintptr_t)RTSpinlockRelease },
444 { STKOKAY("RTStrCopy"), (void *)(uintptr_t)RTStrCopy },
445 { STKBACK("RTStrDupTag"), (void *)(uintptr_t)RTStrDupTag },
446 { STKBACK("RTStrFormatNumber"), (void *)(uintptr_t)RTStrFormatNumber },
447 { STKBACK("RTStrFormatTypeDeregister"), (void *)(uintptr_t)RTStrFormatTypeDeregister },
448 { STKBACK("RTStrFormatTypeRegister"), (void *)(uintptr_t)RTStrFormatTypeRegister },
449 { STKBACK("RTStrFormatTypeSetUser"), (void *)(uintptr_t)RTStrFormatTypeSetUser },
450 { STKBACK("RTStrFormatV"), (void *)(uintptr_t)RTStrFormatV },
451 { STKBACK("RTStrFree"), (void *)(uintptr_t)RTStrFree },
452 { STKOKAY("RTStrNCmp"), (void *)(uintptr_t)RTStrNCmp },
453 { STKBACK("RTStrPrintfExV"), (void *)(uintptr_t)RTStrPrintfExV },
454 { STKBACK("RTStrPrintfV"), (void *)(uintptr_t)RTStrPrintfV },
455 { STKBACK("RTStrPrintf2ExV"), (void *)(uintptr_t)RTStrPrintf2ExV },
456 { STKBACK("RTStrPrintf2V"), (void *)(uintptr_t)RTStrPrintf2V },
457 { STKBACK("RTThreadCreate"), (void *)(uintptr_t)RTThreadCreate },
458 { STKBACK("RTThreadCtxHookIsEnabled"), (void *)(uintptr_t)RTThreadCtxHookIsEnabled },
459 { STKBACK("RTThreadCtxHookCreate"), (void *)(uintptr_t)RTThreadCtxHookCreate },
460 { STKBACK("RTThreadCtxHookDestroy"), (void *)(uintptr_t)RTThreadCtxHookDestroy },
461 { STKBACK("RTThreadCtxHookDisable"), (void *)(uintptr_t)RTThreadCtxHookDisable },
462 { STKBACK("RTThreadCtxHookEnable"), (void *)(uintptr_t)RTThreadCtxHookEnable },
463 { STKBACK("RTThreadGetName"), (void *)(uintptr_t)RTThreadGetName },
464 { STKBACK("RTThreadGetNative"), (void *)(uintptr_t)RTThreadGetNative },
465 { STKBACK("RTThreadGetType"), (void *)(uintptr_t)RTThreadGetType },
466 { STKBACK("RTThreadIsInInterrupt"), (void *)(uintptr_t)RTThreadIsInInterrupt },
467 { STKBACK("RTThreadNativeSelf"), (void *)(uintptr_t)RTThreadNativeSelf },
468 { STKBACK("RTThreadPreemptDisable"), (void *)(uintptr_t)RTThreadPreemptDisable },
469 { STKBACK("RTThreadPreemptIsEnabled"), (void *)(uintptr_t)RTThreadPreemptIsEnabled },
470 { STKBACK("RTThreadPreemptIsPending"), (void *)(uintptr_t)RTThreadPreemptIsPending },
471 { STKBACK("RTThreadPreemptIsPendingTrusty"), (void *)(uintptr_t)RTThreadPreemptIsPendingTrusty },
472 { STKBACK("RTThreadPreemptIsPossible"), (void *)(uintptr_t)RTThreadPreemptIsPossible },
473 { STKBACK("RTThreadPreemptRestore"), (void *)(uintptr_t)RTThreadPreemptRestore },
474 { STKBACK("RTThreadQueryTerminationStatus"), (void *)(uintptr_t)RTThreadQueryTerminationStatus },
475 { STKBACK("RTThreadSelf"), (void *)(uintptr_t)RTThreadSelf },
476 { STKBACK("RTThreadSelfName"), (void *)(uintptr_t)RTThreadSelfName },
477 { STKBACK("RTThreadSleep"), (void *)(uintptr_t)RTThreadSleep },
478 { STKBACK("RTThreadUserReset"), (void *)(uintptr_t)RTThreadUserReset },
479 { STKBACK("RTThreadUserSignal"), (void *)(uintptr_t)RTThreadUserSignal },
480 { STKBACK("RTThreadUserWait"), (void *)(uintptr_t)RTThreadUserWait },
481 { STKBACK("RTThreadUserWaitNoResume"), (void *)(uintptr_t)RTThreadUserWaitNoResume },
482 { STKBACK("RTThreadWait"), (void *)(uintptr_t)RTThreadWait },
483 { STKBACK("RTThreadWaitNoResume"), (void *)(uintptr_t)RTThreadWaitNoResume },
484 { STKBACK("RTThreadYield"), (void *)(uintptr_t)RTThreadYield },
485 { STKBACK("RTTimeNow"), (void *)(uintptr_t)RTTimeNow },
486 { STKBACK("RTTimerCanDoHighResolution"), (void *)(uintptr_t)RTTimerCanDoHighResolution },
487 { STKBACK("RTTimerChangeInterval"), (void *)(uintptr_t)RTTimerChangeInterval },
488 { STKBACK("RTTimerCreate"), (void *)(uintptr_t)RTTimerCreate },
489 { STKBACK("RTTimerCreateEx"), (void *)(uintptr_t)RTTimerCreateEx },
490 { STKBACK("RTTimerDestroy"), (void *)(uintptr_t)RTTimerDestroy },
491 { STKBACK("RTTimerGetSystemGranularity"), (void *)(uintptr_t)RTTimerGetSystemGranularity },
492 { STKBACK("RTTimerReleaseSystemGranularity"), (void *)(uintptr_t)RTTimerReleaseSystemGranularity },
493 { STKBACK("RTTimerRequestSystemGranularity"), (void *)(uintptr_t)RTTimerRequestSystemGranularity },
494 { STKBACK("RTTimerStart"), (void *)(uintptr_t)RTTimerStart },
495 { STKBACK("RTTimerStop"), (void *)(uintptr_t)RTTimerStop },
496 { STKBACK("RTTimeSystemMilliTS"), (void *)(uintptr_t)RTTimeSystemMilliTS },
497 { STKBACK("RTTimeSystemNanoTS"), (void *)(uintptr_t)RTTimeSystemNanoTS },
498 { STKOKAY("RTUuidCompare"), (void *)(uintptr_t)RTUuidCompare },
499 { STKOKAY("RTUuidCompareStr"), (void *)(uintptr_t)RTUuidCompareStr },
500 { STKOKAY("RTUuidFromStr"), (void *)(uintptr_t)RTUuidFromStr },
501/* SED: END */
502};
503
504#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
505/**
506 * Drag in the rest of IRPT since we share it with the
507 * rest of the kernel modules on darwin.
508 */
509struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
510{
511 /* VBoxNetAdp */
512 { (PFNRT)RTRandBytes },
513 /* VBoxUSB */
514 { (PFNRT)RTPathStripFilename },
515#if !defined(RT_OS_FREEBSD)
516 { (PFNRT)RTHandleTableAlloc },
517 { (PFNRT)RTStrPurgeEncoding },
518#endif
519 { NULL }
520};
521#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
522
523
524
525/**
526 * Initializes the device extentsion structure.
527 *
528 * @returns IPRT status code.
529 * @param pDevExt The device extension to initialize.
530 * @param cbSession The size of the session structure. The size of
531 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
532 * defined because we're skipping the OS specific members
533 * then.
534 */
535int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
536{
537 int rc;
538
539#ifdef SUPDRV_WITH_RELEASE_LOGGER
540 /*
541 * Create the release log.
542 */
543 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
544 PRTLOGGER pRelLogger;
545 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
546 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
547 if (RT_SUCCESS(rc))
548 RTLogRelSetDefaultInstance(pRelLogger);
549 /** @todo Add native hook for getting logger config parameters and setting
550 * them. On linux we should use the module parameter stuff... */
551#endif
552
553#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
554 /*
555 * Require SSE2 to be present.
556 */
557 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
558 {
559 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
560 return VERR_UNSUPPORTED_CPU;
561 }
562#endif
563
564 /*
565 * Initialize it.
566 */
567 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
568 pDevExt->Spinlock = NIL_RTSPINLOCK;
569 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
570 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
571#ifdef SUPDRV_USE_MUTEX_FOR_LDR
572 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
573#else
574 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
575#endif
576#ifdef SUPDRV_USE_MUTEX_FOR_GIP
577 pDevExt->mtxGip = NIL_RTSEMMUTEX;
578 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
579#else
580 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
581 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
582#endif
583
584 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
585 if (RT_SUCCESS(rc))
586 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
587 if (RT_SUCCESS(rc))
588 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
589
590 if (RT_SUCCESS(rc))
591#ifdef SUPDRV_USE_MUTEX_FOR_LDR
592 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
593#else
594 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
595#endif
596 if (RT_SUCCESS(rc))
597#ifdef SUPDRV_USE_MUTEX_FOR_GIP
598 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
599#else
600 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
601#endif
602 if (RT_SUCCESS(rc))
603 {
604 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
605 if (RT_SUCCESS(rc))
606 {
607#ifdef SUPDRV_USE_MUTEX_FOR_GIP
608 rc = RTSemMutexCreate(&pDevExt->mtxGip);
609#else
610 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
611#endif
612 if (RT_SUCCESS(rc))
613 {
614 rc = supdrvGipCreate(pDevExt);
615 if (RT_SUCCESS(rc))
616 {
617 rc = supdrvTracerInit(pDevExt);
618 if (RT_SUCCESS(rc))
619 {
620 pDevExt->pLdrInitImage = NULL;
621 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
622 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
623 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
624 pDevExt->cbSession = (uint32_t)cbSession;
625
626 /*
627 * Fixup the absolute symbols.
628 *
629 * Because of the table indexing assumptions we'll have a little #ifdef orgy
630 * here rather than distributing this to OS specific files. At least for now.
631 */
632#ifdef RT_OS_DARWIN
633# if ARCH_BITS == 32
634 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
635 {
636 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
637 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
638 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
639 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
640 }
641 else
642 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
643 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
644 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
645 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
646 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
647 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
648 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
649# else /* 64-bit darwin: */
650 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
651 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
652 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
653 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
654 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
655 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
656 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
657 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
658 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
659 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
660
661# endif
662#else /* !RT_OS_DARWIN */
663# if ARCH_BITS == 64
664 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
665 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
666 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
667 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
668# else
669 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
670# endif
671 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
672 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
673 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
674 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
675 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
676 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
677#endif /* !RT_OS_DARWIN */
678 return VINF_SUCCESS;
679 }
680
681 supdrvGipDestroy(pDevExt);
682 }
683
684#ifdef SUPDRV_USE_MUTEX_FOR_GIP
685 RTSemMutexDestroy(pDevExt->mtxGip);
686 pDevExt->mtxGip = NIL_RTSEMMUTEX;
687#else
688 RTSemFastMutexDestroy(pDevExt->mtxGip);
689 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
690#endif
691 }
692 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
693 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
694 }
695 }
696
697#ifdef SUPDRV_USE_MUTEX_FOR_GIP
698 RTSemMutexDestroy(pDevExt->mtxTscDelta);
699 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
700#else
701 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
702 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
703#endif
704#ifdef SUPDRV_USE_MUTEX_FOR_LDR
705 RTSemMutexDestroy(pDevExt->mtxLdr);
706 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
707#else
708 RTSemFastMutexDestroy(pDevExt->mtxLdr);
709 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
710#endif
711 RTSpinlockDestroy(pDevExt->Spinlock);
712 pDevExt->Spinlock = NIL_RTSPINLOCK;
713 RTSpinlockDestroy(pDevExt->hGipSpinlock);
714 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
715 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
716 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
717
718#ifdef SUPDRV_WITH_RELEASE_LOGGER
719 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
720 RTLogDestroy(RTLogSetDefaultInstance(NULL));
721#endif
722
723 return rc;
724}
725
726
727/**
728 * Delete the device extension (e.g. cleanup members).
729 *
730 * @param pDevExt The device extension to delete.
731 */
732void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
733{
734 PSUPDRVOBJ pObj;
735 PSUPDRVUSAGE pUsage;
736
737 /*
738 * Kill mutexes and spinlocks.
739 */
740#ifdef SUPDRV_USE_MUTEX_FOR_GIP
741 RTSemMutexDestroy(pDevExt->mtxGip);
742 pDevExt->mtxGip = NIL_RTSEMMUTEX;
743 RTSemMutexDestroy(pDevExt->mtxTscDelta);
744 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
745#else
746 RTSemFastMutexDestroy(pDevExt->mtxGip);
747 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
748 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
749 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
750#endif
751#ifdef SUPDRV_USE_MUTEX_FOR_LDR
752 RTSemMutexDestroy(pDevExt->mtxLdr);
753 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
754#else
755 RTSemFastMutexDestroy(pDevExt->mtxLdr);
756 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
757#endif
758 RTSpinlockDestroy(pDevExt->Spinlock);
759 pDevExt->Spinlock = NIL_RTSPINLOCK;
760 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
761 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
762 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
763 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
764
765 /*
766 * Free lists.
767 */
768 /* objects. */
769 pObj = pDevExt->pObjs;
770 Assert(!pObj); /* (can trigger on forced unloads) */
771 pDevExt->pObjs = NULL;
772 while (pObj)
773 {
774 void *pvFree = pObj;
775 pObj = pObj->pNext;
776 RTMemFree(pvFree);
777 }
778
779 /* usage records. */
780 pUsage = pDevExt->pUsageFree;
781 pDevExt->pUsageFree = NULL;
782 while (pUsage)
783 {
784 void *pvFree = pUsage;
785 pUsage = pUsage->pNext;
786 RTMemFree(pvFree);
787 }
788
789 /* kill the GIP. */
790 supdrvGipDestroy(pDevExt);
791 RTSpinlockDestroy(pDevExt->hGipSpinlock);
792 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
793
794 supdrvTracerTerm(pDevExt);
795
796#ifdef SUPDRV_WITH_RELEASE_LOGGER
797 /* destroy the loggers. */
798 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
799 RTLogDestroy(RTLogSetDefaultInstance(NULL));
800#endif
801}
802
803
804/**
805 * Create session.
806 *
807 * @returns IPRT status code.
808 * @param pDevExt Device extension.
809 * @param fUser Flag indicating whether this is a user or kernel
810 * session.
811 * @param fUnrestricted Unrestricted access (system) or restricted access
812 * (user)?
813 * @param ppSession Where to store the pointer to the session data.
814 */
815int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
816{
817 int rc;
818 PSUPDRVSESSION pSession;
819
820 if (!SUP_IS_DEVEXT_VALID(pDevExt))
821 return VERR_INVALID_PARAMETER;
822
823 /*
824 * Allocate memory for the session data.
825 */
826 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
827 if (pSession)
828 {
829 /* Initialize session data. */
830 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
831 if (!rc)
832 {
833 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
834 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
835 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
836 if (RT_SUCCESS(rc))
837 {
838 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
839 pSession->pDevExt = pDevExt;
840 pSession->u32Cookie = BIRD_INV;
841 pSession->fUnrestricted = fUnrestricted;
842 /*pSession->fInHashTable = false; */
843 pSession->cRefs = 1;
844 /*pSession->pCommonNextHash = NULL;
845 pSession->ppOsSessionPtr = NULL; */
846 if (fUser)
847 {
848 pSession->Process = RTProcSelf();
849 pSession->R0Process = RTR0ProcHandleSelf();
850 }
851 else
852 {
853 pSession->Process = NIL_RTPROCESS;
854 pSession->R0Process = NIL_RTR0PROCESS;
855 }
856 /*pSession->pLdrUsage = NULL;
857 pSession->pVM = NULL;
858 pSession->pUsage = NULL;
859 pSession->pGip = NULL;
860 pSession->fGipReferenced = false;
861 pSession->Bundle.cUsed = 0; */
862 pSession->Uid = NIL_RTUID;
863 pSession->Gid = NIL_RTGID;
864 /*pSession->uTracerData = 0;*/
865 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
866 RTListInit(&pSession->TpProviders);
867 /*pSession->cTpProviders = 0;*/
868 /*pSession->cTpProbesFiring = 0;*/
869 RTListInit(&pSession->TpUmods);
870 /*RT_ZERO(pSession->apTpLookupTable);*/
871
872 VBOXDRV_SESSION_CREATE(pSession, fUser);
873 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
874 return VINF_SUCCESS;
875 }
876
877 RTSpinlockDestroy(pSession->Spinlock);
878 }
879 RTMemFree(pSession);
880 *ppSession = NULL;
881 Log(("Failed to create spinlock, rc=%d!\n", rc));
882 }
883 else
884 rc = VERR_NO_MEMORY;
885
886 return rc;
887}
888
889
890/**
891 * Cleans up the session in the context of the process to which it belongs, the
892 * caller will free the session and the session spinlock.
893 *
894 * This should normally occur when the session is closed or as the process
895 * exits. Careful reference counting in the OS specfic code makes sure that
896 * there cannot be any races between process/handle cleanup callbacks and
897 * threads doing I/O control calls.
898 *
899 * @param pDevExt The device extension.
900 * @param pSession Session data.
901 */
902static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
903{
904 int rc;
905 PSUPDRVBUNDLE pBundle;
906 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
907
908 Assert(!pSession->fInHashTable);
909 Assert(!pSession->ppOsSessionPtr);
910 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
911 ("R0Process=%p cur=%p; curpid=%u\n",
912 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
913
914 /*
915 * Remove logger instances related to this session.
916 */
917 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
918
919 /*
920 * Destroy the handle table.
921 */
922 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
923 AssertRC(rc);
924 pSession->hHandleTable = NIL_RTHANDLETABLE;
925
926 /*
927 * Release object references made in this session.
928 * In theory there should be noone racing us in this session.
929 */
930 Log2(("release objects - start\n"));
931 if (pSession->pUsage)
932 {
933 PSUPDRVUSAGE pUsage;
934 RTSpinlockAcquire(pDevExt->Spinlock);
935
936 while ((pUsage = pSession->pUsage) != NULL)
937 {
938 PSUPDRVOBJ pObj = pUsage->pObj;
939 pSession->pUsage = pUsage->pNext;
940
941 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
942 if (pUsage->cUsage < pObj->cUsage)
943 {
944 pObj->cUsage -= pUsage->cUsage;
945 RTSpinlockRelease(pDevExt->Spinlock);
946 }
947 else
948 {
949 /* Destroy the object and free the record. */
950 if (pDevExt->pObjs == pObj)
951 pDevExt->pObjs = pObj->pNext;
952 else
953 {
954 PSUPDRVOBJ pObjPrev;
955 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
956 if (pObjPrev->pNext == pObj)
957 {
958 pObjPrev->pNext = pObj->pNext;
959 break;
960 }
961 Assert(pObjPrev);
962 }
963 RTSpinlockRelease(pDevExt->Spinlock);
964
965 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
966 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
967 if (pObj->pfnDestructor)
968 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
969 RTMemFree(pObj);
970 }
971
972 /* free it and continue. */
973 RTMemFree(pUsage);
974
975 RTSpinlockAcquire(pDevExt->Spinlock);
976 }
977
978 RTSpinlockRelease(pDevExt->Spinlock);
979 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
980 }
981 Log2(("release objects - done\n"));
982
983 /*
984 * Make sure the associated VM pointers are NULL.
985 */
986 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
987 {
988 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
989 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
990 pSession->pSessionGVM = NULL;
991 pSession->pSessionVM = NULL;
992 pSession->pFastIoCtrlVM = NULL;
993 }
994
995 /*
996 * Do tracer cleanups related to this session.
997 */
998 Log2(("release tracer stuff - start\n"));
999 supdrvTracerCleanupSession(pDevExt, pSession);
1000 Log2(("release tracer stuff - end\n"));
1001
1002 /*
1003 * Release memory allocated in the session.
1004 *
1005 * We do not serialize this as we assume that the application will
1006 * not allocated memory while closing the file handle object.
1007 */
1008 Log2(("freeing memory:\n"));
1009 pBundle = &pSession->Bundle;
1010 while (pBundle)
1011 {
1012 PSUPDRVBUNDLE pToFree;
1013 unsigned i;
1014
1015 /*
1016 * Check and unlock all entries in the bundle.
1017 */
1018 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
1019 {
1020 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
1021 {
1022 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1023 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1024 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1025 {
1026 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1027 AssertRC(rc); /** @todo figure out how to handle this. */
1028 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1029 }
1030 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1031 AssertRC(rc); /** @todo figure out how to handle this. */
1032 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1033 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1034 }
1035 }
1036
1037 /*
1038 * Advance and free previous bundle.
1039 */
1040 pToFree = pBundle;
1041 pBundle = pBundle->pNext;
1042
1043 pToFree->pNext = NULL;
1044 pToFree->cUsed = 0;
1045 if (pToFree != &pSession->Bundle)
1046 RTMemFree(pToFree);
1047 }
1048 Log2(("freeing memory - done\n"));
1049
1050 /*
1051 * Deregister component factories.
1052 */
1053 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1054 Log2(("deregistering component factories:\n"));
1055 if (pDevExt->pComponentFactoryHead)
1056 {
1057 PSUPDRVFACTORYREG pPrev = NULL;
1058 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1059 while (pCur)
1060 {
1061 if (pCur->pSession == pSession)
1062 {
1063 /* unlink it */
1064 PSUPDRVFACTORYREG pNext = pCur->pNext;
1065 if (pPrev)
1066 pPrev->pNext = pNext;
1067 else
1068 pDevExt->pComponentFactoryHead = pNext;
1069
1070 /* free it */
1071 pCur->pNext = NULL;
1072 pCur->pSession = NULL;
1073 pCur->pFactory = NULL;
1074 RTMemFree(pCur);
1075
1076 /* next */
1077 pCur = pNext;
1078 }
1079 else
1080 {
1081 /* next */
1082 pPrev = pCur;
1083 pCur = pCur->pNext;
1084 }
1085 }
1086 }
1087 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1088 Log2(("deregistering component factories - done\n"));
1089
1090 /*
1091 * Loaded images needs to be dereferenced and possibly freed up.
1092 */
1093 supdrvLdrLock(pDevExt);
1094 Log2(("freeing images:\n"));
1095 if (pSession->pLdrUsage)
1096 {
1097 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1098 pSession->pLdrUsage = NULL;
1099 while (pUsage)
1100 {
1101 void *pvFree = pUsage;
1102 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1103 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1104 if (pImage->cImgUsage > cUsage)
1105 supdrvLdrSubtractUsage(pDevExt, pImage, cUsage);
1106 else
1107 supdrvLdrFree(pDevExt, pImage);
1108 pUsage->pImage = NULL;
1109 pUsage = pUsage->pNext;
1110 RTMemFree(pvFree);
1111 }
1112 }
1113 supdrvLdrUnlock(pDevExt);
1114 Log2(("freeing images - done\n"));
1115
1116 /*
1117 * Unmap the GIP.
1118 */
1119 Log2(("umapping GIP:\n"));
1120 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1121 {
1122 SUPR0GipUnmap(pSession);
1123 pSession->fGipReferenced = 0;
1124 }
1125 Log2(("umapping GIP - done\n"));
1126}
1127
1128
1129/**
1130 * Common code for freeing a session when the reference count reaches zero.
1131 *
1132 * @param pDevExt Device extension.
1133 * @param pSession Session data.
1134 * This data will be freed by this routine.
1135 */
1136static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1137{
1138 VBOXDRV_SESSION_CLOSE(pSession);
1139
1140 /*
1141 * Cleanup the session first.
1142 */
1143 supdrvCleanupSession(pDevExt, pSession);
1144 supdrvOSCleanupSession(pDevExt, pSession);
1145
1146 /*
1147 * Free the rest of the session stuff.
1148 */
1149 RTSpinlockDestroy(pSession->Spinlock);
1150 pSession->Spinlock = NIL_RTSPINLOCK;
1151 pSession->pDevExt = NULL;
1152 RTMemFree(pSession);
1153 LogFlow(("supdrvDestroySession: returns\n"));
1154}
1155
1156
1157/**
1158 * Inserts the session into the global hash table.
1159 *
1160 * @retval VINF_SUCCESS on success.
1161 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1162 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1163 * session (asserted).
1164 * @retval VERR_DUPLICATE if there is already a session for that pid.
1165 *
1166 * @param pDevExt The device extension.
1167 * @param pSession The session.
1168 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1169 * available and used. This will set to point to the
1170 * session while under the protection of the session
1171 * hash table spinlock. It will also be kept in
1172 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1173 * cleanup use.
1174 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1175 */
1176int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1177 void *pvUser)
1178{
1179 PSUPDRVSESSION pCur;
1180 unsigned iHash;
1181
1182 /*
1183 * Validate input.
1184 */
1185 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1186 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1187
1188 /*
1189 * Calculate the hash table index and acquire the spinlock.
1190 */
1191 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1192
1193 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1194
1195 /*
1196 * If there are a collisions, we need to carefully check if we got a
1197 * duplicate. There can only be one open session per process.
1198 */
1199 pCur = pDevExt->apSessionHashTab[iHash];
1200 if (pCur)
1201 {
1202 while (pCur && pCur->Process != pSession->Process)
1203 pCur = pCur->pCommonNextHash;
1204
1205 if (pCur)
1206 {
1207 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1208 if (pCur == pSession)
1209 {
1210 Assert(pSession->fInHashTable);
1211 AssertFailed();
1212 return VERR_WRONG_ORDER;
1213 }
1214 Assert(!pSession->fInHashTable);
1215 if (pCur->R0Process == pSession->R0Process)
1216 return VERR_RESOURCE_IN_USE;
1217 return VERR_DUPLICATE;
1218 }
1219 }
1220 Assert(!pSession->fInHashTable);
1221 Assert(!pSession->ppOsSessionPtr);
1222
1223 /*
1224 * Insert it, doing a callout to the OS specific code in case it has
1225 * anything it wishes to do while we're holding the spinlock.
1226 */
1227 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1228 pDevExt->apSessionHashTab[iHash] = pSession;
1229 pSession->fInHashTable = true;
1230 ASMAtomicIncS32(&pDevExt->cSessions);
1231
1232 pSession->ppOsSessionPtr = ppOsSessionPtr;
1233 if (ppOsSessionPtr)
1234 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1235
1236 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1237
1238 /*
1239 * Retain a reference for the pointer in the session table.
1240 */
1241 ASMAtomicIncU32(&pSession->cRefs);
1242
1243 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1244 return VINF_SUCCESS;
1245}
1246
1247
1248/**
1249 * Removes the session from the global hash table.
1250 *
1251 * @retval VINF_SUCCESS on success.
1252 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1253 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1254 * session (asserted).
1255 *
1256 * @param pDevExt The device extension.
1257 * @param pSession The session. The caller is expected to have a reference
1258 * to this so it won't croak on us when we release the hash
1259 * table reference.
1260 * @param pvUser OS specific context value for the
1261 * supdrvOSSessionHashTabInserted callback.
1262 */
1263int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1264{
1265 PSUPDRVSESSION pCur;
1266 unsigned iHash;
1267 int32_t cRefs;
1268
1269 /*
1270 * Validate input.
1271 */
1272 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1273 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1274
1275 /*
1276 * Calculate the hash table index and acquire the spinlock.
1277 */
1278 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1279
1280 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1281
1282 /*
1283 * Unlink it.
1284 */
1285 pCur = pDevExt->apSessionHashTab[iHash];
1286 if (pCur == pSession)
1287 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1288 else
1289 {
1290 PSUPDRVSESSION pPrev = pCur;
1291 while (pCur && pCur != pSession)
1292 {
1293 pPrev = pCur;
1294 pCur = pCur->pCommonNextHash;
1295 }
1296 if (pCur)
1297 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1298 else
1299 {
1300 Assert(!pSession->fInHashTable);
1301 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1302 return VERR_NOT_FOUND;
1303 }
1304 }
1305
1306 pSession->pCommonNextHash = NULL;
1307 pSession->fInHashTable = false;
1308
1309 ASMAtomicDecS32(&pDevExt->cSessions);
1310
1311 /*
1312 * Clear OS specific session pointer if available and do the OS callback.
1313 */
1314 if (pSession->ppOsSessionPtr)
1315 {
1316 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1317 pSession->ppOsSessionPtr = NULL;
1318 }
1319
1320 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1321
1322 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1323
1324 /*
1325 * Drop the reference the hash table had to the session. This shouldn't
1326 * be the last reference!
1327 */
1328 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1329 Assert(cRefs > 0 && cRefs < _1M);
1330 if (cRefs == 0)
1331 supdrvDestroySession(pDevExt, pSession);
1332
1333 return VINF_SUCCESS;
1334}
1335
1336
1337/**
1338 * Looks up the session for the current process in the global hash table or in
1339 * OS specific pointer.
1340 *
1341 * @returns Pointer to the session with a reference that the caller must
1342 * release. If no valid session was found, NULL is returned.
1343 *
1344 * @param pDevExt The device extension.
1345 * @param Process The process ID.
1346 * @param R0Process The ring-0 process handle.
1347 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1348 * this is used instead of the hash table. For
1349 * additional safety it must then be equal to the
1350 * SUPDRVSESSION::ppOsSessionPtr member.
1351 * This can be NULL even if the OS has a session
1352 * pointer.
1353 */
1354PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1355 PSUPDRVSESSION *ppOsSessionPtr)
1356{
1357 PSUPDRVSESSION pCur;
1358 unsigned iHash;
1359
1360 /*
1361 * Validate input.
1362 */
1363 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1364
1365 /*
1366 * Calculate the hash table index and acquire the spinlock.
1367 */
1368 iHash = SUPDRV_SESSION_HASH(Process);
1369
1370 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1371
1372 /*
1373 * If an OS session pointer is provided, always use it.
1374 */
1375 if (ppOsSessionPtr)
1376 {
1377 pCur = *ppOsSessionPtr;
1378 if ( pCur
1379 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1380 || pCur->Process != Process
1381 || pCur->R0Process != R0Process) )
1382 pCur = NULL;
1383 }
1384 else
1385 {
1386 /*
1387 * Otherwise, do the hash table lookup.
1388 */
1389 pCur = pDevExt->apSessionHashTab[iHash];
1390 while ( pCur
1391 && ( pCur->Process != Process
1392 || pCur->R0Process != R0Process) )
1393 pCur = pCur->pCommonNextHash;
1394 }
1395
1396 /*
1397 * Retain the session.
1398 */
1399 if (pCur)
1400 {
1401 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1402 NOREF(cRefs);
1403 Assert(cRefs > 1 && cRefs < _1M);
1404 }
1405
1406 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1407
1408 return pCur;
1409}
1410
1411
1412/**
1413 * Retain a session to make sure it doesn't go away while it is in use.
1414 *
1415 * @returns New reference count on success, UINT32_MAX on failure.
1416 * @param pSession Session data.
1417 */
1418uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1419{
1420 uint32_t cRefs;
1421 AssertPtrReturn(pSession, UINT32_MAX);
1422 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1423
1424 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1425 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1426 return cRefs;
1427}
1428
1429
1430/**
1431 * Releases a given session.
1432 *
1433 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1434 * @param pSession Session data.
1435 */
1436uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1437{
1438 uint32_t cRefs;
1439 AssertPtrReturn(pSession, UINT32_MAX);
1440 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1441
1442 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1443 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1444 if (cRefs == 0)
1445 supdrvDestroySession(pSession->pDevExt, pSession);
1446 return cRefs;
1447}
1448
1449
1450/**
1451 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1452 *
1453 * @returns IPRT status code, see SUPR0ObjAddRef.
1454 * @param hHandleTable The handle table handle. Ignored.
1455 * @param pvObj The object pointer.
1456 * @param pvCtx Context, the handle type. Ignored.
1457 * @param pvUser Session pointer.
1458 */
1459static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1460{
1461 NOREF(pvCtx);
1462 NOREF(hHandleTable);
1463 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1464}
1465
1466
1467/**
1468 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1469 *
1470 * @param hHandleTable The handle table handle. Ignored.
1471 * @param h The handle value. Ignored.
1472 * @param pvObj The object pointer.
1473 * @param pvCtx Context, the handle type. Ignored.
1474 * @param pvUser Session pointer.
1475 */
1476static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1477{
1478 NOREF(pvCtx);
1479 NOREF(h);
1480 NOREF(hHandleTable);
1481 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1482}
1483
1484
1485/**
1486 * Fast path I/O Control worker.
1487 *
1488 * @returns VBox status code that should be passed down to ring-3 unchanged.
1489 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1490 * @param idCpu VMCPU id.
1491 * @param pDevExt Device extention.
1492 * @param pSession Session data.
1493 */
1494int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1495{
1496 /*
1497 * Validate input and check that the VM has a session.
1498 */
1499 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1500 {
1501 PVM pVM = pSession->pSessionVM;
1502 PGVM pGVM = pSession->pSessionGVM;
1503 if (RT_LIKELY( pGVM != NULL
1504 && pVM != NULL
1505 && pVM == pSession->pFastIoCtrlVM))
1506 {
1507 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1508 {
1509 /*
1510 * Make the call.
1511 */
1512 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1513 return VINF_SUCCESS;
1514 }
1515
1516 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1517 }
1518 else
1519 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1520 pGVM, pVM, pSession->pFastIoCtrlVM);
1521 }
1522 else
1523 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1524 return VERR_INTERNAL_ERROR;
1525}
1526
1527
1528/**
1529 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1530 *
1531 * Check if pszStr contains any character of pszChars. We would use strpbrk
1532 * here if this function would be contained in the RedHat kABI white list, see
1533 * http://www.kerneldrivers.org/RHEL5.
1534 *
1535 * @returns true if fine, false if not.
1536 * @param pszName The module name to check.
1537 */
1538static bool supdrvIsLdrModuleNameValid(const char *pszName)
1539{
1540 int chCur;
1541 while ((chCur = *pszName++) != '\0')
1542 {
1543 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1544 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1545 while (offInv-- > 0)
1546 if (s_szInvalidChars[offInv] == chCur)
1547 return false;
1548 }
1549 return true;
1550}
1551
1552
1553
1554/**
1555 * I/O Control inner worker (tracing reasons).
1556 *
1557 * @returns IPRT status code.
1558 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1559 *
1560 * @param uIOCtl Function number.
1561 * @param pDevExt Device extention.
1562 * @param pSession Session data.
1563 * @param pReqHdr The request header.
1564 */
1565static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1566{
1567 /*
1568 * Validation macros
1569 */
1570#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1571 do { \
1572 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1573 { \
1574 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1575 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1576 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1577 } \
1578 } while (0)
1579
1580#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1581
1582#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1583 do { \
1584 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1585 { \
1586 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1587 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1588 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1589 } \
1590 } while (0)
1591
1592#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1593 do { \
1594 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1595 { \
1596 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1597 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1598 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1599 } \
1600 } while (0)
1601
1602#define REQ_CHECK_EXPR(Name, expr) \
1603 do { \
1604 if (RT_UNLIKELY(!(expr))) \
1605 { \
1606 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1607 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1608 } \
1609 } while (0)
1610
1611#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1612 do { \
1613 if (RT_UNLIKELY(!(expr))) \
1614 { \
1615 OSDBGPRINT( fmt ); \
1616 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1617 } \
1618 } while (0)
1619
1620 /*
1621 * The switch.
1622 */
1623 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1624 {
1625 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1626 {
1627 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1628 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1629 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1630 {
1631 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1632 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1633 return 0;
1634 }
1635
1636#if 0
1637 /*
1638 * Call out to the OS specific code and let it do permission checks on the
1639 * client process.
1640 */
1641 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1642 {
1643 pReq->u.Out.u32Cookie = 0xffffffff;
1644 pReq->u.Out.u32SessionCookie = 0xffffffff;
1645 pReq->u.Out.u32SessionVersion = 0xffffffff;
1646 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1647 pReq->u.Out.pSession = NULL;
1648 pReq->u.Out.cFunctions = 0;
1649 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1650 return 0;
1651 }
1652#endif
1653
1654 /*
1655 * Match the version.
1656 * The current logic is very simple, match the major interface version.
1657 */
1658 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1659 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1660 {
1661 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1662 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1663 pReq->u.Out.u32Cookie = 0xffffffff;
1664 pReq->u.Out.u32SessionCookie = 0xffffffff;
1665 pReq->u.Out.u32SessionVersion = 0xffffffff;
1666 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1667 pReq->u.Out.pSession = NULL;
1668 pReq->u.Out.cFunctions = 0;
1669 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1670 return 0;
1671 }
1672
1673 /*
1674 * Fill in return data and be gone.
1675 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1676 * u32SessionVersion <= u32ReqVersion!
1677 */
1678 /** @todo Somehow validate the client and negotiate a secure cookie... */
1679 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1680 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1681 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1682 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1683 pReq->u.Out.pSession = pSession;
1684 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1685 pReq->Hdr.rc = VINF_SUCCESS;
1686 return 0;
1687 }
1688
1689 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1690 {
1691 /* validate */
1692 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1693 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1694
1695 /* execute */
1696 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1697 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1698 pReq->Hdr.rc = VINF_SUCCESS;
1699 return 0;
1700 }
1701
1702 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1703 {
1704 /* validate */
1705 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1706 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1707 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1708 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1709 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1710
1711 /* execute */
1712 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1713 if (RT_FAILURE(pReq->Hdr.rc))
1714 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1715 return 0;
1716 }
1717
1718 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1719 {
1720 /* validate */
1721 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1722 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1723
1724 /* execute */
1725 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1726 return 0;
1727 }
1728
1729 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1730 {
1731 /* validate */
1732 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1733 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1734
1735 /* execute */
1736 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1737 if (RT_FAILURE(pReq->Hdr.rc))
1738 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1739 return 0;
1740 }
1741
1742 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1743 {
1744 /* validate */
1745 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1746 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1747
1748 /* execute */
1749 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1750 return 0;
1751 }
1752
1753 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1754 {
1755 /* validate */
1756 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1757 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1758 if ( pReq->u.In.cbImageWithEverything != 0
1759 || pReq->u.In.cbImageBits != 0)
1760 {
1761 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1762 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1763 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1764 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1765 }
1766 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1767 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1768 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1769 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1770
1771 /* execute */
1772 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1773 return 0;
1774 }
1775
1776 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1777 {
1778 /* validate */
1779 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1780 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1781 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1782 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1783 || ( pReq->u.In.cSymbols <= 16384
1784 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1785 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1786 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1787 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1788 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1789 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1790 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1791 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1792 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1793 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1794 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1795 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1796 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1797 && pReq->u.In.cSegments <= 128
1798 && pReq->u.In.cSegments <= (pReq->u.In.cbImageBits + PAGE_SIZE - 1) / PAGE_SIZE
1799 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1800 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1801 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1802 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1803 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1804
1805 if (pReq->u.In.cSymbols)
1806 {
1807 uint32_t i;
1808 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1809 for (i = 0; i < pReq->u.In.cSymbols; i++)
1810 {
1811 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1812 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1813 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1814 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1815 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1816 pReq->u.In.cbStrTab - paSyms[i].offName),
1817 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1818 }
1819 }
1820 {
1821 uint32_t i;
1822 uint32_t offPrevEnd = 0;
1823 PSUPLDRSEG paSegs = (PSUPLDRSEG)&pReq->u.In.abImage[pReq->u.In.offSegments];
1824 for (i = 0; i < pReq->u.In.cSegments; i++)
1825 {
1826 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1827 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1828 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1829 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1830 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1831 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1832 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1833 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1834 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1835 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1836 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1837 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1838 }
1839 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1840 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1841 }
1842 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fFlags & ~SUPLDRLOAD_F_VALID_MASK),
1843 ("SUP_IOCTL_LDR_LOAD: fFlags=%#x\n", (unsigned)pReq->u.In.fFlags));
1844
1845 /* execute */
1846 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1847 return 0;
1848 }
1849
1850 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1851 {
1852 /* validate */
1853 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1854 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1855
1856 /* execute */
1857 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1858 return 0;
1859 }
1860
1861 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1862 {
1863 /* validate */
1864 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1865
1866 /* execute */
1867 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1868 return 0;
1869 }
1870
1871 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1872 {
1873 /* validate */
1874 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1875 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1876 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1877
1878 /* execute */
1879 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1880 return 0;
1881 }
1882
1883 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1884 {
1885 /* validate */
1886 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1887 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1888 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1889
1890 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1891 {
1892 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1893
1894 /* execute */
1895 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1896 {
1897 if (pReq->u.In.pVMR0 == NULL)
1898 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1899 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1900 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1901 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1902 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1903 else
1904 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1905 }
1906 else
1907 pReq->Hdr.rc = VERR_WRONG_ORDER;
1908 }
1909 else
1910 {
1911 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1912 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1913 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1914 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1915 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1916
1917 /* execute */
1918 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1919 {
1920 if (pReq->u.In.pVMR0 == NULL)
1921 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1922 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1923 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1924 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1925 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1926 else
1927 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1928 }
1929 else
1930 pReq->Hdr.rc = VERR_WRONG_ORDER;
1931 }
1932
1933 if ( RT_FAILURE(pReq->Hdr.rc)
1934 && pReq->Hdr.rc != VERR_INTERRUPTED
1935 && pReq->Hdr.rc != VERR_TIMEOUT)
1936 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1937 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1938 else
1939 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1940 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1941 return 0;
1942 }
1943
1944 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1945 {
1946 /* validate */
1947 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1948 PSUPVMMR0REQHDR pVMMReq;
1949 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1950 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1951
1952 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1953 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1954 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1955 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1956 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1957
1958 /* execute */
1959 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1960 {
1961 if (pReq->u.In.pVMR0 == NULL)
1962 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1963 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1964 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1965 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1966 else
1967 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1968 }
1969 else
1970 pReq->Hdr.rc = VERR_WRONG_ORDER;
1971
1972 if ( RT_FAILURE(pReq->Hdr.rc)
1973 && pReq->Hdr.rc != VERR_INTERRUPTED
1974 && pReq->Hdr.rc != VERR_TIMEOUT)
1975 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1976 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1977 else
1978 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1979 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1980 return 0;
1981 }
1982
1983 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1984 {
1985 /* validate */
1986 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1987 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1988
1989 /* execute */
1990 pReq->Hdr.rc = VINF_SUCCESS;
1991 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1992 return 0;
1993 }
1994
1995 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1996 {
1997 /* validate */
1998 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1999 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
2000 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
2001
2002 /* execute */
2003 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
2004 if (RT_FAILURE(pReq->Hdr.rc))
2005 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2006 return 0;
2007 }
2008
2009 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
2010 {
2011 /* validate */
2012 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
2013 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
2014
2015 /* execute */
2016 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
2017 return 0;
2018 }
2019
2020 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
2021 {
2022 /* validate */
2023 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
2024 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
2025
2026 /* execute */
2027 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
2028 if (RT_SUCCESS(pReq->Hdr.rc))
2029 pReq->u.Out.pGipR0 = pDevExt->pGip;
2030 return 0;
2031 }
2032
2033 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2034 {
2035 /* validate */
2036 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2037 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2038
2039 /* execute */
2040 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2041 return 0;
2042 }
2043
2044 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2045 {
2046 /* validate */
2047 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2048 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2049 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2050 || ( RT_VALID_PTR(pReq->u.In.pVMR0)
2051 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2052 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2053
2054 /* execute */
2055 RTSpinlockAcquire(pDevExt->Spinlock);
2056 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2057 {
2058 if (pSession->pFastIoCtrlVM == NULL)
2059 {
2060 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2061 RTSpinlockRelease(pDevExt->Spinlock);
2062 pReq->Hdr.rc = VINF_SUCCESS;
2063 }
2064 else
2065 {
2066 RTSpinlockRelease(pDevExt->Spinlock);
2067 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2068 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2069 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2070 }
2071 }
2072 else
2073 {
2074 RTSpinlockRelease(pDevExt->Spinlock);
2075 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2076 pSession->pSessionVM, pReq->u.In.pVMR0));
2077 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2078 }
2079 return 0;
2080 }
2081
2082 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2083 {
2084 /* validate */
2085 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2086 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2087 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2088 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2089 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2090 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2091 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2092 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2093 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2094
2095 /* execute */
2096 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2097 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2098 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2099 &pReq->u.Out.aPages[0]);
2100 if (RT_FAILURE(pReq->Hdr.rc))
2101 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2102 return 0;
2103 }
2104
2105 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2106 {
2107 /* validate */
2108 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2109 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2110 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2111 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2112 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2113 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2114
2115 /* execute */
2116 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2117 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2118 if (RT_FAILURE(pReq->Hdr.rc))
2119 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2120 return 0;
2121 }
2122
2123 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2124 {
2125 /* validate */
2126 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2127 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2128 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2129 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2130 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2131 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2132 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2133
2134 /* execute */
2135 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2136 return 0;
2137 }
2138
2139 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2140 {
2141 /* validate */
2142 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2143 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2144
2145 /* execute */
2146 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2147 return 0;
2148 }
2149
2150 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2151 {
2152 /* validate */
2153 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2154 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2155 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2156
2157 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2158 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2159 else
2160 {
2161 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2162 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2163 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2164 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2165 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2166 }
2167 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2168
2169 /* execute */
2170 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2171 return 0;
2172 }
2173
2174 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2175 {
2176 /* validate */
2177 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2178 size_t cbStrTab;
2179 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2180 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2181 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2182 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2183 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2184 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2185 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2186 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2187 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2188 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2189 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2190
2191 /* execute */
2192 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2193 return 0;
2194 }
2195
2196 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2197 {
2198 /* validate */
2199 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2200 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2201 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2202
2203 /* execute */
2204 switch (pReq->u.In.uType)
2205 {
2206 case SUP_SEM_TYPE_EVENT:
2207 {
2208 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2209 switch (pReq->u.In.uOp)
2210 {
2211 case SUPSEMOP2_WAIT_MS_REL:
2212 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2213 break;
2214 case SUPSEMOP2_WAIT_NS_ABS:
2215 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2216 break;
2217 case SUPSEMOP2_WAIT_NS_REL:
2218 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2219 break;
2220 case SUPSEMOP2_SIGNAL:
2221 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2222 break;
2223 case SUPSEMOP2_CLOSE:
2224 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2225 break;
2226 case SUPSEMOP2_RESET:
2227 default:
2228 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2229 break;
2230 }
2231 break;
2232 }
2233
2234 case SUP_SEM_TYPE_EVENT_MULTI:
2235 {
2236 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2237 switch (pReq->u.In.uOp)
2238 {
2239 case SUPSEMOP2_WAIT_MS_REL:
2240 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2241 break;
2242 case SUPSEMOP2_WAIT_NS_ABS:
2243 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2244 break;
2245 case SUPSEMOP2_WAIT_NS_REL:
2246 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2247 break;
2248 case SUPSEMOP2_SIGNAL:
2249 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2250 break;
2251 case SUPSEMOP2_CLOSE:
2252 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2253 break;
2254 case SUPSEMOP2_RESET:
2255 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2256 break;
2257 default:
2258 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2259 break;
2260 }
2261 break;
2262 }
2263
2264 default:
2265 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2266 break;
2267 }
2268 return 0;
2269 }
2270
2271 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2272 {
2273 /* validate */
2274 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2275 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2276 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2277
2278 /* execute */
2279 switch (pReq->u.In.uType)
2280 {
2281 case SUP_SEM_TYPE_EVENT:
2282 {
2283 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2284 switch (pReq->u.In.uOp)
2285 {
2286 case SUPSEMOP3_CREATE:
2287 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2288 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2289 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2290 break;
2291 case SUPSEMOP3_GET_RESOLUTION:
2292 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2293 pReq->Hdr.rc = VINF_SUCCESS;
2294 pReq->Hdr.cbOut = sizeof(*pReq);
2295 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2296 break;
2297 default:
2298 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2299 break;
2300 }
2301 break;
2302 }
2303
2304 case SUP_SEM_TYPE_EVENT_MULTI:
2305 {
2306 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2307 switch (pReq->u.In.uOp)
2308 {
2309 case SUPSEMOP3_CREATE:
2310 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2311 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2312 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2313 break;
2314 case SUPSEMOP3_GET_RESOLUTION:
2315 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2316 pReq->Hdr.rc = VINF_SUCCESS;
2317 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2318 break;
2319 default:
2320 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2321 break;
2322 }
2323 break;
2324 }
2325
2326 default:
2327 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2328 break;
2329 }
2330 return 0;
2331 }
2332
2333 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2334 {
2335 /* validate */
2336 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2337 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2338
2339 /* execute */
2340 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2341 if (RT_FAILURE(pReq->Hdr.rc))
2342 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2343 return 0;
2344 }
2345
2346 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2347 {
2348 /* validate */
2349 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2350 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2351
2352 /* execute */
2353 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2354 return 0;
2355 }
2356
2357 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2358 {
2359 /* validate */
2360 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2361
2362 /* execute */
2363 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2364 return 0;
2365 }
2366
2367 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2368 {
2369 /* validate */
2370 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2371 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2372
2373 /* execute */
2374 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2375 return 0;
2376 }
2377
2378 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2379 {
2380 /* validate */
2381 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2382 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2383 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2384 return VERR_INVALID_PARAMETER;
2385
2386 /* execute */
2387 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2388 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2389 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2390 pReq->u.In.szName, pReq->u.In.fFlags);
2391 return 0;
2392 }
2393
2394 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2395 {
2396 /* validate */
2397 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2398 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2399
2400 /* execute */
2401 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2402 return 0;
2403 }
2404
2405 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2406 {
2407 /* validate */
2408 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2409 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2410
2411 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2412 pReqHdr->rc = VINF_SUCCESS;
2413 return 0;
2414 }
2415
2416 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2417 {
2418 /* validate */
2419 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2420 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2421 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2422 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2423
2424 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2425 return 0;
2426 }
2427
2428 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2429 {
2430 /* validate */
2431 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2432
2433 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2434 return 0;
2435 }
2436
2437 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2438 {
2439 /* validate */
2440 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2441 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2442
2443 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2444 return 0;
2445 }
2446
2447 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2448 {
2449 /* validate */
2450 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2451 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2452
2453 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2454 return 0;
2455 }
2456
2457 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2458 {
2459 /* validate */
2460 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2461 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2462
2463 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2464 return 0;
2465 }
2466
2467 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2468 {
2469 /* validate */
2470 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2471 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2472
2473 /* execute */
2474 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2475 if (RT_FAILURE(pReq->Hdr.rc))
2476 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2477 return 0;
2478 }
2479
2480 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2481 {
2482 /* validate */
2483 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2484 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2485 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2486 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2487 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2488
2489 /* execute */
2490 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2491 if (RT_FAILURE(pReq->Hdr.rc))
2492 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2493 return 0;
2494 }
2495
2496 default:
2497 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2498 break;
2499 }
2500 return VERR_GENERAL_FAILURE;
2501}
2502
2503
2504/**
2505 * I/O Control inner worker for the restricted operations.
2506 *
2507 * @returns IPRT status code.
2508 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2509 *
2510 * @param uIOCtl Function number.
2511 * @param pDevExt Device extention.
2512 * @param pSession Session data.
2513 * @param pReqHdr The request header.
2514 */
2515static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2516{
2517 /*
2518 * The switch.
2519 */
2520 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2521 {
2522 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2523 {
2524 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2525 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2526 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2527 {
2528 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2529 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2530 return 0;
2531 }
2532
2533 /*
2534 * Match the version.
2535 * The current logic is very simple, match the major interface version.
2536 */
2537 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2538 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2539 {
2540 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2541 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2542 pReq->u.Out.u32Cookie = 0xffffffff;
2543 pReq->u.Out.u32SessionCookie = 0xffffffff;
2544 pReq->u.Out.u32SessionVersion = 0xffffffff;
2545 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2546 pReq->u.Out.pSession = NULL;
2547 pReq->u.Out.cFunctions = 0;
2548 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2549 return 0;
2550 }
2551
2552 /*
2553 * Fill in return data and be gone.
2554 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2555 * u32SessionVersion <= u32ReqVersion!
2556 */
2557 /** @todo Somehow validate the client and negotiate a secure cookie... */
2558 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2559 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2560 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2561 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2562 pReq->u.Out.pSession = pSession;
2563 pReq->u.Out.cFunctions = 0;
2564 pReq->Hdr.rc = VINF_SUCCESS;
2565 return 0;
2566 }
2567
2568 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2569 {
2570 /* validate */
2571 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2572 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2573
2574 /* execute */
2575 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2576 if (RT_FAILURE(pReq->Hdr.rc))
2577 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2578 return 0;
2579 }
2580
2581 default:
2582 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2583 break;
2584 }
2585 return VERR_GENERAL_FAILURE;
2586}
2587
2588
2589/**
2590 * I/O Control worker.
2591 *
2592 * @returns IPRT status code.
2593 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2594 *
2595 * @param uIOCtl Function number.
2596 * @param pDevExt Device extention.
2597 * @param pSession Session data.
2598 * @param pReqHdr The request header.
2599 * @param cbReq The size of the request buffer.
2600 */
2601int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2602{
2603 int rc;
2604 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2605
2606 /*
2607 * Validate the request.
2608 */
2609 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2610 {
2611 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2612 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2613 return VERR_INVALID_PARAMETER;
2614 }
2615 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2616 || pReqHdr->cbIn < sizeof(*pReqHdr)
2617 || pReqHdr->cbIn > cbReq
2618 || pReqHdr->cbOut < sizeof(*pReqHdr)
2619 || pReqHdr->cbOut > cbReq))
2620 {
2621 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2622 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2623 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2624 return VERR_INVALID_PARAMETER;
2625 }
2626 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2627 {
2628 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2629 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2630 return VERR_INVALID_PARAMETER;
2631 }
2632 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2633 {
2634 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2635 {
2636 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2637 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2638 return VERR_INVALID_PARAMETER;
2639 }
2640 }
2641 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2642 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2643 {
2644 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2645 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2646 return VERR_INVALID_PARAMETER;
2647 }
2648
2649 /*
2650 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2651 */
2652 if (pSession->fUnrestricted)
2653 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2654 else
2655 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2656
2657 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2658 return rc;
2659}
2660
2661
2662/**
2663 * Inter-Driver Communication (IDC) worker.
2664 *
2665 * @returns VBox status code.
2666 * @retval VINF_SUCCESS on success.
2667 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2668 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2669 *
2670 * @param uReq The request (function) code.
2671 * @param pDevExt Device extention.
2672 * @param pSession Session data.
2673 * @param pReqHdr The request header.
2674 */
2675int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2676{
2677 /*
2678 * The OS specific code has already validated the pSession
2679 * pointer, and the request size being greater or equal to
2680 * size of the header.
2681 *
2682 * So, just check that pSession is a kernel context session.
2683 */
2684 if (RT_UNLIKELY( pSession
2685 && pSession->R0Process != NIL_RTR0PROCESS))
2686 return VERR_INVALID_PARAMETER;
2687
2688/*
2689 * Validation macro.
2690 */
2691#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2692 do { \
2693 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2694 { \
2695 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2696 (long)pReqHdr->cb, (long)(cbExpect))); \
2697 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2698 } \
2699 } while (0)
2700
2701 switch (uReq)
2702 {
2703 case SUPDRV_IDC_REQ_CONNECT:
2704 {
2705 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2706 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2707
2708 /*
2709 * Validate the cookie and other input.
2710 */
2711 if (pReq->Hdr.pSession != NULL)
2712 {
2713 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2714 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2715 }
2716 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2717 {
2718 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2719 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2720 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2721 }
2722 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2723 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2724 {
2725 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2726 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2727 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2728 }
2729 if (pSession != NULL)
2730 {
2731 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2732 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2733 }
2734
2735 /*
2736 * Match the version.
2737 * The current logic is very simple, match the major interface version.
2738 */
2739 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2740 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2741 {
2742 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2743 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2744 pReq->u.Out.pSession = NULL;
2745 pReq->u.Out.uSessionVersion = 0xffffffff;
2746 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2747 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2748 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2749 return VINF_SUCCESS;
2750 }
2751
2752 pReq->u.Out.pSession = NULL;
2753 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2754 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2755 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2756
2757 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2758 if (RT_FAILURE(pReq->Hdr.rc))
2759 {
2760 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2761 return VINF_SUCCESS;
2762 }
2763
2764 pReq->u.Out.pSession = pSession;
2765 pReq->Hdr.pSession = pSession;
2766
2767 return VINF_SUCCESS;
2768 }
2769
2770 case SUPDRV_IDC_REQ_DISCONNECT:
2771 {
2772 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2773
2774 supdrvSessionRelease(pSession);
2775 return pReqHdr->rc = VINF_SUCCESS;
2776 }
2777
2778 case SUPDRV_IDC_REQ_GET_SYMBOL:
2779 {
2780 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2781 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2782
2783 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2784 return VINF_SUCCESS;
2785 }
2786
2787 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2788 {
2789 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2790 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2791
2792 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2793 return VINF_SUCCESS;
2794 }
2795
2796 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2797 {
2798 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2799 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2800
2801 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2802 return VINF_SUCCESS;
2803 }
2804
2805 default:
2806 Log(("Unknown IDC %#lx\n", (long)uReq));
2807 break;
2808 }
2809
2810#undef REQ_CHECK_IDC_SIZE
2811 return VERR_NOT_SUPPORTED;
2812}
2813
2814
2815/**
2816 * Register a object for reference counting.
2817 * The object is registered with one reference in the specified session.
2818 *
2819 * @returns Unique identifier on success (pointer).
2820 * All future reference must use this identifier.
2821 * @returns NULL on failure.
2822 * @param pSession The caller's session.
2823 * @param enmType The object type.
2824 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2825 * @param pvUser1 The first user argument.
2826 * @param pvUser2 The second user argument.
2827 */
2828SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2829{
2830 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2831 PSUPDRVOBJ pObj;
2832 PSUPDRVUSAGE pUsage;
2833
2834 /*
2835 * Validate the input.
2836 */
2837 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2838 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2839 AssertPtrReturn(pfnDestructor, NULL);
2840
2841 /*
2842 * Allocate and initialize the object.
2843 */
2844 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2845 if (!pObj)
2846 return NULL;
2847 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2848 pObj->enmType = enmType;
2849 pObj->pNext = NULL;
2850 pObj->cUsage = 1;
2851 pObj->pfnDestructor = pfnDestructor;
2852 pObj->pvUser1 = pvUser1;
2853 pObj->pvUser2 = pvUser2;
2854 pObj->CreatorUid = pSession->Uid;
2855 pObj->CreatorGid = pSession->Gid;
2856 pObj->CreatorProcess= pSession->Process;
2857 supdrvOSObjInitCreator(pObj, pSession);
2858
2859 /*
2860 * Allocate the usage record.
2861 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2862 */
2863 RTSpinlockAcquire(pDevExt->Spinlock);
2864
2865 pUsage = pDevExt->pUsageFree;
2866 if (pUsage)
2867 pDevExt->pUsageFree = pUsage->pNext;
2868 else
2869 {
2870 RTSpinlockRelease(pDevExt->Spinlock);
2871 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2872 if (!pUsage)
2873 {
2874 RTMemFree(pObj);
2875 return NULL;
2876 }
2877 RTSpinlockAcquire(pDevExt->Spinlock);
2878 }
2879
2880 /*
2881 * Insert the object and create the session usage record.
2882 */
2883 /* The object. */
2884 pObj->pNext = pDevExt->pObjs;
2885 pDevExt->pObjs = pObj;
2886
2887 /* The session record. */
2888 pUsage->cUsage = 1;
2889 pUsage->pObj = pObj;
2890 pUsage->pNext = pSession->pUsage;
2891 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2892 pSession->pUsage = pUsage;
2893
2894 RTSpinlockRelease(pDevExt->Spinlock);
2895
2896 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2897 return pObj;
2898}
2899SUPR0_EXPORT_SYMBOL(SUPR0ObjRegister);
2900
2901
2902/**
2903 * Increment the reference counter for the object associating the reference
2904 * with the specified session.
2905 *
2906 * @returns IPRT status code.
2907 * @param pvObj The identifier returned by SUPR0ObjRegister().
2908 * @param pSession The session which is referencing the object.
2909 *
2910 * @remarks The caller should not own any spinlocks and must carefully protect
2911 * itself against potential race with the destructor so freed memory
2912 * isn't accessed here.
2913 */
2914SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2915{
2916 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2917}
2918SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRef);
2919
2920
2921/**
2922 * Increment the reference counter for the object associating the reference
2923 * with the specified session.
2924 *
2925 * @returns IPRT status code.
2926 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2927 * couldn't be allocated. (If you see this you're not doing the right
2928 * thing and it won't ever work reliably.)
2929 *
2930 * @param pvObj The identifier returned by SUPR0ObjRegister().
2931 * @param pSession The session which is referencing the object.
2932 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2933 * first reference to an object in a session with this
2934 * argument set.
2935 *
2936 * @remarks The caller should not own any spinlocks and must carefully protect
2937 * itself against potential race with the destructor so freed memory
2938 * isn't accessed here.
2939 */
2940SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2941{
2942 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2943 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2944 int rc = VINF_SUCCESS;
2945 PSUPDRVUSAGE pUsagePre;
2946 PSUPDRVUSAGE pUsage;
2947
2948 /*
2949 * Validate the input.
2950 * Be ready for the destruction race (someone might be stuck in the
2951 * destructor waiting a lock we own).
2952 */
2953 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2954 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2955 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2956 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2957 VERR_INVALID_PARAMETER);
2958
2959 RTSpinlockAcquire(pDevExt->Spinlock);
2960
2961 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2962 {
2963 RTSpinlockRelease(pDevExt->Spinlock);
2964
2965 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2966 return VERR_WRONG_ORDER;
2967 }
2968
2969 /*
2970 * Preallocate the usage record if we can.
2971 */
2972 pUsagePre = pDevExt->pUsageFree;
2973 if (pUsagePre)
2974 pDevExt->pUsageFree = pUsagePre->pNext;
2975 else if (!fNoBlocking)
2976 {
2977 RTSpinlockRelease(pDevExt->Spinlock);
2978 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2979 if (!pUsagePre)
2980 return VERR_NO_MEMORY;
2981
2982 RTSpinlockAcquire(pDevExt->Spinlock);
2983 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2984 {
2985 RTSpinlockRelease(pDevExt->Spinlock);
2986
2987 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2988 return VERR_WRONG_ORDER;
2989 }
2990 }
2991
2992 /*
2993 * Reference the object.
2994 */
2995 pObj->cUsage++;
2996
2997 /*
2998 * Look for the session record.
2999 */
3000 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
3001 {
3002 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3003 if (pUsage->pObj == pObj)
3004 break;
3005 }
3006 if (pUsage)
3007 pUsage->cUsage++;
3008 else if (pUsagePre)
3009 {
3010 /* create a new session record. */
3011 pUsagePre->cUsage = 1;
3012 pUsagePre->pObj = pObj;
3013 pUsagePre->pNext = pSession->pUsage;
3014 pSession->pUsage = pUsagePre;
3015 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
3016
3017 pUsagePre = NULL;
3018 }
3019 else
3020 {
3021 pObj->cUsage--;
3022 rc = VERR_TRY_AGAIN;
3023 }
3024
3025 /*
3026 * Put any unused usage record into the free list..
3027 */
3028 if (pUsagePre)
3029 {
3030 pUsagePre->pNext = pDevExt->pUsageFree;
3031 pDevExt->pUsageFree = pUsagePre;
3032 }
3033
3034 RTSpinlockRelease(pDevExt->Spinlock);
3035
3036 return rc;
3037}
3038SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRefEx);
3039
3040
3041/**
3042 * Decrement / destroy a reference counter record for an object.
3043 *
3044 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3045 *
3046 * @returns IPRT status code.
3047 * @retval VINF_SUCCESS if not destroyed.
3048 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3049 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3050 * string builds.
3051 *
3052 * @param pvObj The identifier returned by SUPR0ObjRegister().
3053 * @param pSession The session which is referencing the object.
3054 */
3055SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3056{
3057 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3058 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3059 int rc = VERR_INVALID_PARAMETER;
3060 PSUPDRVUSAGE pUsage;
3061 PSUPDRVUSAGE pUsagePrev;
3062
3063 /*
3064 * Validate the input.
3065 */
3066 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3067 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3068 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3069 VERR_INVALID_PARAMETER);
3070
3071 /*
3072 * Acquire the spinlock and look for the usage record.
3073 */
3074 RTSpinlockAcquire(pDevExt->Spinlock);
3075
3076 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3077 pUsage;
3078 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3079 {
3080 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3081 if (pUsage->pObj == pObj)
3082 {
3083 rc = VINF_SUCCESS;
3084 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3085 if (pUsage->cUsage > 1)
3086 {
3087 pObj->cUsage--;
3088 pUsage->cUsage--;
3089 }
3090 else
3091 {
3092 /*
3093 * Free the session record.
3094 */
3095 if (pUsagePrev)
3096 pUsagePrev->pNext = pUsage->pNext;
3097 else
3098 pSession->pUsage = pUsage->pNext;
3099 pUsage->pNext = pDevExt->pUsageFree;
3100 pDevExt->pUsageFree = pUsage;
3101
3102 /* What about the object? */
3103 if (pObj->cUsage > 1)
3104 pObj->cUsage--;
3105 else
3106 {
3107 /*
3108 * Object is to be destroyed, unlink it.
3109 */
3110 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3111 rc = VINF_OBJECT_DESTROYED;
3112 if (pDevExt->pObjs == pObj)
3113 pDevExt->pObjs = pObj->pNext;
3114 else
3115 {
3116 PSUPDRVOBJ pObjPrev;
3117 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3118 if (pObjPrev->pNext == pObj)
3119 {
3120 pObjPrev->pNext = pObj->pNext;
3121 break;
3122 }
3123 Assert(pObjPrev);
3124 }
3125 }
3126 }
3127 break;
3128 }
3129 }
3130
3131 RTSpinlockRelease(pDevExt->Spinlock);
3132
3133 /*
3134 * Call the destructor and free the object if required.
3135 */
3136 if (rc == VINF_OBJECT_DESTROYED)
3137 {
3138 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3139 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3140 if (pObj->pfnDestructor)
3141 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3142 RTMemFree(pObj);
3143 }
3144
3145 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3146 return rc;
3147}
3148SUPR0_EXPORT_SYMBOL(SUPR0ObjRelease);
3149
3150
3151/**
3152 * Verifies that the current process can access the specified object.
3153 *
3154 * @returns The following IPRT status code:
3155 * @retval VINF_SUCCESS if access was granted.
3156 * @retval VERR_PERMISSION_DENIED if denied access.
3157 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3158 *
3159 * @param pvObj The identifier returned by SUPR0ObjRegister().
3160 * @param pSession The session which wishes to access the object.
3161 * @param pszObjName Object string name. This is optional and depends on the object type.
3162 *
3163 * @remark The caller is responsible for making sure the object isn't removed while
3164 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3165 */
3166SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3167{
3168 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3169 int rc;
3170
3171 /*
3172 * Validate the input.
3173 */
3174 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3175 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3176 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3177 VERR_INVALID_PARAMETER);
3178
3179 /*
3180 * Check access. (returns true if a decision has been made.)
3181 */
3182 rc = VERR_INTERNAL_ERROR;
3183 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3184 return rc;
3185
3186 /*
3187 * Default policy is to allow the user to access his own
3188 * stuff but nothing else.
3189 */
3190 if (pObj->CreatorUid == pSession->Uid)
3191 return VINF_SUCCESS;
3192 return VERR_PERMISSION_DENIED;
3193}
3194SUPR0_EXPORT_SYMBOL(SUPR0ObjVerifyAccess);
3195
3196
3197/**
3198 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3199 *
3200 * @returns The associated VM pointer.
3201 * @param pSession The session of the current thread.
3202 */
3203SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3204{
3205 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3206 return pSession->pSessionVM;
3207}
3208SUPR0_EXPORT_SYMBOL(SUPR0GetSessionVM);
3209
3210
3211/**
3212 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3213 *
3214 * @returns The associated GVM pointer.
3215 * @param pSession The session of the current thread.
3216 */
3217SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3218{
3219 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3220 return pSession->pSessionGVM;
3221}
3222SUPR0_EXPORT_SYMBOL(SUPR0GetSessionGVM);
3223
3224
3225/**
3226 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3227 *
3228 * This will fail if there is already a VM associated with the session and pVM
3229 * isn't NULL.
3230 *
3231 * @retval VINF_SUCCESS
3232 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3233 * session.
3234 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3235 * the session is invalid.
3236 *
3237 * @param pSession The session of the current thread.
3238 * @param pGVM The GVM to associate with the session. Pass NULL to
3239 * dissassociate.
3240 * @param pVM The VM to associate with the session. Pass NULL to
3241 * dissassociate.
3242 */
3243SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3244{
3245 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3246 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3247
3248 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3249 if (pGVM)
3250 {
3251 if (!pSession->pSessionGVM)
3252 {
3253 pSession->pSessionGVM = pGVM;
3254 pSession->pSessionVM = pVM;
3255 pSession->pFastIoCtrlVM = NULL;
3256 }
3257 else
3258 {
3259 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3260 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3261 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3262 return VERR_ALREADY_EXISTS;
3263 }
3264 }
3265 else
3266 {
3267 pSession->pSessionGVM = NULL;
3268 pSession->pSessionVM = NULL;
3269 pSession->pFastIoCtrlVM = NULL;
3270 }
3271 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3272 return VINF_SUCCESS;
3273}
3274SUPR0_EXPORT_SYMBOL(SUPR0SetSessionVM);
3275
3276
3277/** @copydoc RTLogDefaultInstanceEx
3278 * @remarks To allow overriding RTLogDefaultInstanceEx locally. */
3279SUPR0DECL(struct RTLOGGER *) SUPR0DefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3280{
3281 return RTLogDefaultInstanceEx(fFlagsAndGroup);
3282}
3283SUPR0_EXPORT_SYMBOL(SUPR0DefaultLogInstanceEx);
3284
3285
3286/** @copydoc RTLogGetDefaultInstanceEx
3287 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3288SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3289{
3290 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3291}
3292SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogInstanceEx);
3293
3294
3295/** @copydoc RTLogRelGetDefaultInstanceEx
3296 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3297SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3298{
3299 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3300}
3301SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogRelInstanceEx);
3302
3303
3304/**
3305 * Lock pages.
3306 *
3307 * @returns IPRT status code.
3308 * @param pSession Session to which the locked memory should be associated.
3309 * @param pvR3 Start of the memory range to lock.
3310 * This must be page aligned.
3311 * @param cPages Number of pages to lock.
3312 * @param paPages Where to put the physical addresses of locked memory.
3313 */
3314SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3315{
3316 int rc;
3317 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3318 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3319 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3320
3321 /*
3322 * Verify input.
3323 */
3324 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3325 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3326 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3327 || !pvR3)
3328 {
3329 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3330 return VERR_INVALID_PARAMETER;
3331 }
3332
3333 /*
3334 * Let IPRT do the job.
3335 */
3336 Mem.eType = MEMREF_TYPE_LOCKED;
3337 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3338 if (RT_SUCCESS(rc))
3339 {
3340 uint32_t iPage = cPages;
3341 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3342 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3343
3344 while (iPage-- > 0)
3345 {
3346 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3347 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3348 {
3349 AssertMsgFailed(("iPage=%d\n", iPage));
3350 rc = VERR_INTERNAL_ERROR;
3351 break;
3352 }
3353 }
3354 if (RT_SUCCESS(rc))
3355 rc = supdrvMemAdd(&Mem, pSession);
3356 if (RT_FAILURE(rc))
3357 {
3358 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3359 AssertRC(rc2);
3360 }
3361 }
3362
3363 return rc;
3364}
3365SUPR0_EXPORT_SYMBOL(SUPR0LockMem);
3366
3367
3368/**
3369 * Unlocks the memory pointed to by pv.
3370 *
3371 * @returns IPRT status code.
3372 * @param pSession Session to which the memory was locked.
3373 * @param pvR3 Memory to unlock.
3374 */
3375SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3376{
3377 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3378 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3379 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3380}
3381SUPR0_EXPORT_SYMBOL(SUPR0UnlockMem);
3382
3383
3384/**
3385 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3386 * backing.
3387 *
3388 * @returns IPRT status code.
3389 * @param pSession Session data.
3390 * @param cPages Number of pages to allocate.
3391 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3392 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3393 * @param pHCPhys Where to put the physical address of allocated memory.
3394 */
3395SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3396{
3397 int rc;
3398 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3399 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3400
3401 /*
3402 * Validate input.
3403 */
3404 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3405 if (!ppvR3 || !ppvR0 || !pHCPhys)
3406 {
3407 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3408 pSession, ppvR0, ppvR3, pHCPhys));
3409 return VERR_INVALID_PARAMETER;
3410
3411 }
3412 if (cPages < 1 || cPages >= 256)
3413 {
3414 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3415 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3416 }
3417
3418 /*
3419 * Let IPRT do the job.
3420 */
3421 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3422 if (RT_SUCCESS(rc))
3423 {
3424 int rc2;
3425 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3426 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3427 if (RT_SUCCESS(rc))
3428 {
3429 Mem.eType = MEMREF_TYPE_CONT;
3430 rc = supdrvMemAdd(&Mem, pSession);
3431 if (!rc)
3432 {
3433 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3434 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3435 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3436 return 0;
3437 }
3438
3439 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3440 AssertRC(rc2);
3441 }
3442 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3443 AssertRC(rc2);
3444 }
3445
3446 return rc;
3447}
3448SUPR0_EXPORT_SYMBOL(SUPR0ContAlloc);
3449
3450
3451/**
3452 * Frees memory allocated using SUPR0ContAlloc().
3453 *
3454 * @returns IPRT status code.
3455 * @param pSession The session to which the memory was allocated.
3456 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3457 */
3458SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3459{
3460 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3461 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3462 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3463}
3464SUPR0_EXPORT_SYMBOL(SUPR0ContFree);
3465
3466
3467/**
3468 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3469 *
3470 * The memory isn't zeroed.
3471 *
3472 * @returns IPRT status code.
3473 * @param pSession Session data.
3474 * @param cPages Number of pages to allocate.
3475 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3476 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3477 * @param paPages Where to put the physical addresses of allocated memory.
3478 */
3479SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3480{
3481 unsigned iPage;
3482 int rc;
3483 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3484 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3485
3486 /*
3487 * Validate input.
3488 */
3489 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3490 if (!ppvR3 || !ppvR0 || !paPages)
3491 {
3492 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3493 pSession, ppvR3, ppvR0, paPages));
3494 return VERR_INVALID_PARAMETER;
3495
3496 }
3497 if (cPages < 1 || cPages >= 256)
3498 {
3499 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3500 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3501 }
3502
3503 /*
3504 * Let IPRT do the work.
3505 */
3506 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3507 if (RT_SUCCESS(rc))
3508 {
3509 int rc2;
3510 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3511 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3512 if (RT_SUCCESS(rc))
3513 {
3514 Mem.eType = MEMREF_TYPE_LOW;
3515 rc = supdrvMemAdd(&Mem, pSession);
3516 if (!rc)
3517 {
3518 for (iPage = 0; iPage < cPages; iPage++)
3519 {
3520 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3521 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3522 }
3523 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3524 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3525 return 0;
3526 }
3527
3528 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3529 AssertRC(rc2);
3530 }
3531
3532 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3533 AssertRC(rc2);
3534 }
3535
3536 return rc;
3537}
3538SUPR0_EXPORT_SYMBOL(SUPR0LowAlloc);
3539
3540
3541/**
3542 * Frees memory allocated using SUPR0LowAlloc().
3543 *
3544 * @returns IPRT status code.
3545 * @param pSession The session to which the memory was allocated.
3546 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3547 */
3548SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3549{
3550 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3551 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3552 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3553}
3554SUPR0_EXPORT_SYMBOL(SUPR0LowFree);
3555
3556
3557
3558/**
3559 * Allocates a chunk of memory with both R0 and R3 mappings.
3560 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3561 *
3562 * @returns IPRT status code.
3563 * @param pSession The session to associated the allocation with.
3564 * @param cb Number of bytes to allocate.
3565 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3566 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3567 */
3568SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3569{
3570 int rc;
3571 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3572 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3573
3574 /*
3575 * Validate input.
3576 */
3577 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3578 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3579 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3580 if (cb < 1 || cb >= _4M)
3581 {
3582 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3583 return VERR_INVALID_PARAMETER;
3584 }
3585
3586 /*
3587 * Let IPRT do the work.
3588 */
3589 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3590 if (RT_SUCCESS(rc))
3591 {
3592 int rc2;
3593 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3594 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3595 if (RT_SUCCESS(rc))
3596 {
3597 Mem.eType = MEMREF_TYPE_MEM;
3598 rc = supdrvMemAdd(&Mem, pSession);
3599 if (!rc)
3600 {
3601 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3602 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3603 return VINF_SUCCESS;
3604 }
3605
3606 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3607 AssertRC(rc2);
3608 }
3609
3610 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3611 AssertRC(rc2);
3612 }
3613
3614 return rc;
3615}
3616SUPR0_EXPORT_SYMBOL(SUPR0MemAlloc);
3617
3618
3619/**
3620 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3621 *
3622 * @returns IPRT status code.
3623 * @param pSession The session to which the memory was allocated.
3624 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3625 * @param paPages Where to store the physical addresses.
3626 */
3627SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3628{
3629 PSUPDRVBUNDLE pBundle;
3630 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3631
3632 /*
3633 * Validate input.
3634 */
3635 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3636 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3637 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3638
3639 /*
3640 * Search for the address.
3641 */
3642 RTSpinlockAcquire(pSession->Spinlock);
3643 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3644 {
3645 if (pBundle->cUsed > 0)
3646 {
3647 unsigned i;
3648 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3649 {
3650 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3651 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3652 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3653 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3654 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3655 )
3656 )
3657 {
3658 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3659 size_t iPage;
3660 for (iPage = 0; iPage < cPages; iPage++)
3661 {
3662 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3663 paPages[iPage].uReserved = 0;
3664 }
3665 RTSpinlockRelease(pSession->Spinlock);
3666 return VINF_SUCCESS;
3667 }
3668 }
3669 }
3670 }
3671 RTSpinlockRelease(pSession->Spinlock);
3672 Log(("Failed to find %p!!!\n", (void *)uPtr));
3673 return VERR_INVALID_PARAMETER;
3674}
3675SUPR0_EXPORT_SYMBOL(SUPR0MemGetPhys);
3676
3677
3678/**
3679 * Free memory allocated by SUPR0MemAlloc().
3680 *
3681 * @returns IPRT status code.
3682 * @param pSession The session owning the allocation.
3683 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3684 */
3685SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3686{
3687 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3688 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3689 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3690}
3691SUPR0_EXPORT_SYMBOL(SUPR0MemFree);
3692
3693
3694/**
3695 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3696 *
3697 * The memory is fixed and it's possible to query the physical addresses using
3698 * SUPR0MemGetPhys().
3699 *
3700 * @returns IPRT status code.
3701 * @param pSession The session to associated the allocation with.
3702 * @param cPages The number of pages to allocate.
3703 * @param fFlags Flags, reserved for the future. Must be zero.
3704 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3705 * NULL if no ring-3 mapping.
3706 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3707 * NULL if no ring-0 mapping.
3708 * @param paPages Where to store the addresses of the pages. Optional.
3709 */
3710SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3711{
3712 int rc;
3713 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3714 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3715
3716 /*
3717 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3718 */
3719 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3720 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3721 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3722 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3723 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3724 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3725 {
3726 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3727 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3728 }
3729
3730 /*
3731 * Let IPRT do the work.
3732 */
3733 if (ppvR0)
3734 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3735 else
3736 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3737 if (RT_SUCCESS(rc))
3738 {
3739 int rc2;
3740 if (ppvR3)
3741 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3742 else
3743 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3744 if (RT_SUCCESS(rc))
3745 {
3746 Mem.eType = MEMREF_TYPE_PAGE;
3747 rc = supdrvMemAdd(&Mem, pSession);
3748 if (!rc)
3749 {
3750 if (ppvR3)
3751 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3752 if (ppvR0)
3753 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3754 if (paPages)
3755 {
3756 uint32_t iPage = cPages;
3757 while (iPage-- > 0)
3758 {
3759 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3760 Assert(paPages[iPage] != NIL_RTHCPHYS);
3761 }
3762 }
3763 return VINF_SUCCESS;
3764 }
3765
3766 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3767 AssertRC(rc2);
3768 }
3769
3770 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3771 AssertRC(rc2);
3772 }
3773 return rc;
3774}
3775SUPR0_EXPORT_SYMBOL(SUPR0PageAllocEx);
3776
3777
3778/**
3779 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3780 * space.
3781 *
3782 * @returns IPRT status code.
3783 * @param pSession The session to associated the allocation with.
3784 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3785 * @param offSub Where to start mapping. Must be page aligned.
3786 * @param cbSub How much to map. Must be page aligned.
3787 * @param fFlags Flags, MBZ.
3788 * @param ppvR0 Where to return the address of the ring-0 mapping on
3789 * success.
3790 */
3791SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3792 uint32_t fFlags, PRTR0PTR ppvR0)
3793{
3794 int rc;
3795 PSUPDRVBUNDLE pBundle;
3796 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3797 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3798
3799 /*
3800 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3801 */
3802 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3803 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3804 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3805 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3806 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3807 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3808
3809 /*
3810 * Find the memory object.
3811 */
3812 RTSpinlockAcquire(pSession->Spinlock);
3813 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3814 {
3815 if (pBundle->cUsed > 0)
3816 {
3817 unsigned i;
3818 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3819 {
3820 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3821 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3822 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3823 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3824 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3825 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3826 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3827 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3828 {
3829 hMemObj = pBundle->aMem[i].MemObj;
3830 break;
3831 }
3832 }
3833 }
3834 }
3835 RTSpinlockRelease(pSession->Spinlock);
3836
3837 rc = VERR_INVALID_PARAMETER;
3838 if (hMemObj != NIL_RTR0MEMOBJ)
3839 {
3840 /*
3841 * Do some further input validations before calling IPRT.
3842 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3843 */
3844 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3845 if ( offSub < cbMemObj
3846 && cbSub <= cbMemObj
3847 && offSub + cbSub <= cbMemObj)
3848 {
3849 RTR0MEMOBJ hMapObj;
3850 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3851 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3852 if (RT_SUCCESS(rc))
3853 *ppvR0 = RTR0MemObjAddress(hMapObj);
3854 }
3855 else
3856 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3857
3858 }
3859 return rc;
3860}
3861SUPR0_EXPORT_SYMBOL(SUPR0PageMapKernel);
3862
3863
3864/**
3865 * Changes the page level protection of one or more pages previously allocated
3866 * by SUPR0PageAllocEx.
3867 *
3868 * @returns IPRT status code.
3869 * @param pSession The session to associated the allocation with.
3870 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3871 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3872 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3873 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3874 * @param offSub Where to start changing. Must be page aligned.
3875 * @param cbSub How much to change. Must be page aligned.
3876 * @param fProt The new page level protection, see RTMEM_PROT_*.
3877 */
3878SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3879{
3880 int rc;
3881 PSUPDRVBUNDLE pBundle;
3882 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3883 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3884 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3885
3886 /*
3887 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3888 */
3889 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3890 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3891 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3892 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3893 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3894
3895 /*
3896 * Find the memory object.
3897 */
3898 RTSpinlockAcquire(pSession->Spinlock);
3899 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3900 {
3901 if (pBundle->cUsed > 0)
3902 {
3903 unsigned i;
3904 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3905 {
3906 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3907 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3908 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3909 || pvR3 == NIL_RTR3PTR)
3910 && ( pvR0 == NIL_RTR0PTR
3911 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3912 && ( pvR3 == NIL_RTR3PTR
3913 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3914 {
3915 if (pvR0 != NIL_RTR0PTR)
3916 hMemObjR0 = pBundle->aMem[i].MemObj;
3917 if (pvR3 != NIL_RTR3PTR)
3918 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3919 break;
3920 }
3921 }
3922 }
3923 }
3924 RTSpinlockRelease(pSession->Spinlock);
3925
3926 rc = VERR_INVALID_PARAMETER;
3927 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3928 || hMemObjR3 != NIL_RTR0MEMOBJ)
3929 {
3930 /*
3931 * Do some further input validations before calling IPRT.
3932 */
3933 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3934 if ( offSub < cbMemObj
3935 && cbSub <= cbMemObj
3936 && offSub + cbSub <= cbMemObj)
3937 {
3938 rc = VINF_SUCCESS;
3939 if (hMemObjR3 != NIL_RTR0PTR)
3940 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3941 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3942 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3943 }
3944 else
3945 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3946
3947 }
3948 return rc;
3949
3950}
3951SUPR0_EXPORT_SYMBOL(SUPR0PageProtect);
3952
3953
3954/**
3955 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3956 *
3957 * @returns IPRT status code.
3958 * @param pSession The session owning the allocation.
3959 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3960 * SUPR0PageAllocEx().
3961 */
3962SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3963{
3964 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3965 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3966 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3967}
3968SUPR0_EXPORT_SYMBOL(SUPR0PageFree);
3969
3970
3971/**
3972 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3973 *
3974 * @param pDevExt The device extension.
3975 * @param pszFile The source file where the caller detected the bad
3976 * context.
3977 * @param uLine The line number in @a pszFile.
3978 * @param pszExtra Optional additional message to give further hints.
3979 */
3980void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3981{
3982 uint32_t cCalls;
3983
3984 /*
3985 * Shorten the filename before displaying the message.
3986 */
3987 for (;;)
3988 {
3989 const char *pszTmp = strchr(pszFile, '/');
3990 if (!pszTmp)
3991 pszTmp = strchr(pszFile, '\\');
3992 if (!pszTmp)
3993 break;
3994 pszFile = pszTmp + 1;
3995 }
3996 if (RT_VALID_PTR(pszExtra) && *pszExtra)
3997 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
3998 else
3999 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
4000
4001 /*
4002 * Record the incident so that we stand a chance of blocking I/O controls
4003 * before panicing the system.
4004 */
4005 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
4006 if (cCalls > UINT32_MAX - _1K)
4007 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
4008}
4009
4010
4011/**
4012 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
4013 *
4014 * @param pSession The session of the caller.
4015 * @param pszFile The source file where the caller detected the bad
4016 * context.
4017 * @param uLine The line number in @a pszFile.
4018 * @param pszExtra Optional additional message to give further hints.
4019 */
4020SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
4021{
4022 PSUPDRVDEVEXT pDevExt;
4023
4024 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
4025 pDevExt = pSession->pDevExt;
4026
4027 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
4028}
4029SUPR0_EXPORT_SYMBOL(SUPR0BadContext);
4030
4031
4032/**
4033 * Gets the paging mode of the current CPU.
4034 *
4035 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4036 */
4037SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4038{
4039 SUPPAGINGMODE enmMode;
4040
4041 RTR0UINTREG cr0 = ASMGetCR0();
4042 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4043 enmMode = SUPPAGINGMODE_INVALID;
4044 else
4045 {
4046 RTR0UINTREG cr4 = ASMGetCR4();
4047 uint32_t fNXEPlusLMA = 0;
4048 if (cr4 & X86_CR4_PAE)
4049 {
4050 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
4051 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
4052 {
4053 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4054 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4055 fNXEPlusLMA |= RT_BIT(0);
4056 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4057 fNXEPlusLMA |= RT_BIT(1);
4058 }
4059 }
4060
4061 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4062 {
4063 case 0:
4064 enmMode = SUPPAGINGMODE_32_BIT;
4065 break;
4066
4067 case X86_CR4_PGE:
4068 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4069 break;
4070
4071 case X86_CR4_PAE:
4072 enmMode = SUPPAGINGMODE_PAE;
4073 break;
4074
4075 case X86_CR4_PAE | RT_BIT(0):
4076 enmMode = SUPPAGINGMODE_PAE_NX;
4077 break;
4078
4079 case X86_CR4_PAE | X86_CR4_PGE:
4080 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4081 break;
4082
4083 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4084 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4085 break;
4086
4087 case RT_BIT(1) | X86_CR4_PAE:
4088 enmMode = SUPPAGINGMODE_AMD64;
4089 break;
4090
4091 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4092 enmMode = SUPPAGINGMODE_AMD64_NX;
4093 break;
4094
4095 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4096 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4097 break;
4098
4099 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4100 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4101 break;
4102
4103 default:
4104 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4105 enmMode = SUPPAGINGMODE_INVALID;
4106 break;
4107 }
4108 }
4109 return enmMode;
4110}
4111SUPR0_EXPORT_SYMBOL(SUPR0GetPagingMode);
4112
4113
4114/**
4115 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4116 *
4117 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4118 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4119 * for code with interrupts enabled.
4120 *
4121 * @returns the old CR4 value.
4122 *
4123 * @param fOrMask bits to be set in CR4.
4124 * @param fAndMask bits to be cleard in CR4.
4125 *
4126 * @remarks Must be called with preemption/interrupts disabled.
4127 */
4128SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4129{
4130#ifdef RT_OS_LINUX
4131 return supdrvOSChangeCR4(fOrMask, fAndMask);
4132#else
4133 RTCCUINTREG uOld = ASMGetCR4();
4134 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4135 if (uNew != uOld)
4136 ASMSetCR4(uNew);
4137 return uOld;
4138#endif
4139}
4140SUPR0_EXPORT_SYMBOL(SUPR0ChangeCR4);
4141
4142
4143/**
4144 * Enables or disabled hardware virtualization extensions using native OS APIs.
4145 *
4146 * @returns VBox status code.
4147 * @retval VINF_SUCCESS on success.
4148 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4149 *
4150 * @param fEnable Whether to enable or disable.
4151 */
4152SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4153{
4154#ifdef RT_OS_DARWIN
4155 return supdrvOSEnableVTx(fEnable);
4156#else
4157 RT_NOREF1(fEnable);
4158 return VERR_NOT_SUPPORTED;
4159#endif
4160}
4161SUPR0_EXPORT_SYMBOL(SUPR0EnableVTx);
4162
4163
4164/**
4165 * Suspends hardware virtualization extensions using the native OS API.
4166 *
4167 * This is called prior to entering raw-mode context.
4168 *
4169 * @returns @c true if suspended, @c false if not.
4170 */
4171SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4172{
4173#ifdef RT_OS_DARWIN
4174 return supdrvOSSuspendVTxOnCpu();
4175#else
4176 return false;
4177#endif
4178}
4179SUPR0_EXPORT_SYMBOL(SUPR0SuspendVTxOnCpu);
4180
4181
4182/**
4183 * Resumes hardware virtualization extensions using the native OS API.
4184 *
4185 * This is called after to entering raw-mode context.
4186 *
4187 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4188 */
4189SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4190{
4191#ifdef RT_OS_DARWIN
4192 supdrvOSResumeVTxOnCpu(fSuspended);
4193#else
4194 RT_NOREF1(fSuspended);
4195 Assert(!fSuspended);
4196#endif
4197}
4198SUPR0_EXPORT_SYMBOL(SUPR0ResumeVTxOnCpu);
4199
4200
4201SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4202{
4203#ifdef RT_OS_LINUX
4204 return supdrvOSGetCurrentGdtRw(pGdtRw);
4205#else
4206 NOREF(pGdtRw);
4207 return VERR_NOT_IMPLEMENTED;
4208#endif
4209}
4210SUPR0_EXPORT_SYMBOL(SUPR0GetCurrentGdtRw);
4211
4212
4213/**
4214 * Gets AMD-V and VT-x support for the calling CPU.
4215 *
4216 * @returns VBox status code.
4217 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4218 * (SUPVTCAPS_AMD_V) is supported.
4219 */
4220SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4221{
4222 Assert(pfCaps);
4223 *pfCaps = 0;
4224
4225 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4226 if (ASMHasCpuId())
4227 {
4228 /* Check the range of standard CPUID leafs. */
4229 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4230 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4231 if (ASMIsValidStdRange(uMaxLeaf))
4232 {
4233 /* Query the standard CPUID leaf. */
4234 uint32_t fFeatEcx, fFeatEdx, uDummy;
4235 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4236
4237 /* Check if the vendor is Intel (or compatible). */
4238 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4239 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4240 || ASMIsShanghaiCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4241 {
4242 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4243 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4244 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4245 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4246 {
4247 *pfCaps = SUPVTCAPS_VT_X;
4248 return VINF_SUCCESS;
4249 }
4250 return VERR_VMX_NO_VMX;
4251 }
4252
4253 /* Check if the vendor is AMD (or compatible). */
4254 if ( ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4255 || ASMIsHygonCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4256 {
4257 uint32_t fExtFeatEcx, uExtMaxId;
4258 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4259 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4260
4261 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4262 if ( ASMIsValidExtRange(uExtMaxId)
4263 && uExtMaxId >= 0x8000000a
4264 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4265 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4266 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4267 {
4268 *pfCaps = SUPVTCAPS_AMD_V;
4269 return VINF_SUCCESS;
4270 }
4271 return VERR_SVM_NO_SVM;
4272 }
4273 }
4274 }
4275 return VERR_UNSUPPORTED_CPU;
4276}
4277SUPR0_EXPORT_SYMBOL(SUPR0GetVTSupport);
4278
4279
4280/**
4281 * Checks if Intel VT-x feature is usable on this CPU.
4282 *
4283 * @returns VBox status code.
4284 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4285 * ambiguity that makes us unsure whether we
4286 * really can use VT-x or not.
4287 *
4288 * @remarks Must be called with preemption disabled.
4289 * The caller is also expected to check that the CPU is an Intel (or
4290 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4291 * function might throw a \#GP fault as it tries to read/write MSRs
4292 * that may not be present!
4293 */
4294SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4295{
4296 uint64_t fFeatMsr;
4297 bool fMaybeSmxMode;
4298 bool fMsrLocked;
4299 bool fSmxVmxAllowed;
4300 bool fVmxAllowed;
4301 bool fIsSmxModeAmbiguous;
4302 int rc;
4303
4304 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4305
4306 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4307 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4308 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4309 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4310 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4311 fIsSmxModeAmbiguous = false;
4312 rc = VERR_INTERNAL_ERROR_5;
4313
4314 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4315 if (fMsrLocked)
4316 {
4317 if (fVmxAllowed && fSmxVmxAllowed)
4318 rc = VINF_SUCCESS;
4319 else if (!fVmxAllowed && !fSmxVmxAllowed)
4320 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4321 else if (!fMaybeSmxMode)
4322 {
4323 if (fVmxAllowed)
4324 rc = VINF_SUCCESS;
4325 else
4326 rc = VERR_VMX_MSR_VMX_DISABLED;
4327 }
4328 else
4329 {
4330 /*
4331 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4332 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4333 * See @bugref{6873}.
4334 */
4335 Assert(fMaybeSmxMode == true);
4336 fIsSmxModeAmbiguous = true;
4337 rc = VINF_SUCCESS;
4338 }
4339 }
4340 else
4341 {
4342 /*
4343 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4344 * this MSR can no longer be modified.
4345 *
4346 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4347 * accurately. See @bugref{6873}.
4348 *
4349 * We need to check for SMX hardware support here, before writing the MSR as
4350 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4351 * for it.
4352 */
4353 uint32_t fFeaturesECX, uDummy;
4354#ifdef VBOX_STRICT
4355 /* Callers should have verified these at some point. */
4356 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4357 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4358 Assert(ASMIsValidStdRange(uMaxId));
4359 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4360 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4361 || ASMIsShanghaiCpuEx( uVendorEBX, uVendorECX, uVendorEDX));
4362#endif
4363 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4364 bool fSmxVmxHwSupport = false;
4365 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4366 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4367 fSmxVmxHwSupport = true;
4368
4369 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4370 | MSR_IA32_FEATURE_CONTROL_VMXON;
4371 if (fSmxVmxHwSupport)
4372 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4373
4374 /*
4375 * Commit.
4376 */
4377 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4378
4379 /*
4380 * Verify.
4381 */
4382 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4383 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4384 if (fMsrLocked)
4385 {
4386 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4387 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4388 if ( fVmxAllowed
4389 && ( !fSmxVmxHwSupport
4390 || fSmxVmxAllowed))
4391 rc = VINF_SUCCESS;
4392 else
4393 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4394 }
4395 else
4396 rc = VERR_VMX_MSR_LOCKING_FAILED;
4397 }
4398
4399 if (pfIsSmxModeAmbiguous)
4400 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4401
4402 return rc;
4403}
4404SUPR0_EXPORT_SYMBOL(SUPR0GetVmxUsability);
4405
4406
4407/**
4408 * Checks if AMD-V SVM feature is usable on this CPU.
4409 *
4410 * @returns VBox status code.
4411 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4412 *
4413 * @remarks Must be called with preemption disabled.
4414 */
4415SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4416{
4417 int rc;
4418 uint64_t fVmCr;
4419 uint64_t fEfer;
4420
4421 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4422 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4423 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4424 {
4425 rc = VINF_SUCCESS;
4426 if (fInitSvm)
4427 {
4428 /* Turn on SVM in the EFER MSR. */
4429 fEfer = ASMRdMsr(MSR_K6_EFER);
4430 if (fEfer & MSR_K6_EFER_SVME)
4431 rc = VERR_SVM_IN_USE;
4432 else
4433 {
4434 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4435
4436 /* Paranoia. */
4437 fEfer = ASMRdMsr(MSR_K6_EFER);
4438 if (fEfer & MSR_K6_EFER_SVME)
4439 {
4440 /* Restore previous value. */
4441 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4442 }
4443 else
4444 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4445 }
4446 }
4447 }
4448 else
4449 rc = VERR_SVM_DISABLED;
4450 return rc;
4451}
4452SUPR0_EXPORT_SYMBOL(SUPR0GetSvmUsability);
4453
4454
4455/**
4456 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4457 *
4458 * @returns VBox status code.
4459 * @retval VERR_VMX_NO_VMX
4460 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4461 * @retval VERR_VMX_MSR_VMX_DISABLED
4462 * @retval VERR_VMX_MSR_LOCKING_FAILED
4463 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4464 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4465 * @retval VERR_SVM_NO_SVM
4466 * @retval VERR_SVM_DISABLED
4467 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4468 * (centaur)/Shanghai CPU.
4469 *
4470 * @param pfCaps Where to store the capabilities.
4471 */
4472int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4473{
4474 int rc = VERR_UNSUPPORTED_CPU;
4475 bool fIsSmxModeAmbiguous = false;
4476 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4477
4478 /*
4479 * Input validation.
4480 */
4481 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4482 *pfCaps = 0;
4483
4484 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4485 RTThreadPreemptDisable(&PreemptState);
4486
4487 /* Check if VT-x/AMD-V is supported. */
4488 rc = SUPR0GetVTSupport(pfCaps);
4489 if (RT_SUCCESS(rc))
4490 {
4491 /* Check if VT-x is supported. */
4492 if (*pfCaps & SUPVTCAPS_VT_X)
4493 {
4494 /* Check if VT-x is usable. */
4495 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4496 if (RT_SUCCESS(rc))
4497 {
4498 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4499 VMXCTLSMSR vtCaps;
4500 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4501 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4502 {
4503 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4504 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4505 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4506 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4507 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4508 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4509 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4510 }
4511 }
4512 }
4513 /* Check if AMD-V is supported. */
4514 else if (*pfCaps & SUPVTCAPS_AMD_V)
4515 {
4516 /* Check is SVM is usable. */
4517 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4518 if (RT_SUCCESS(rc))
4519 {
4520 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4521 uint32_t uDummy, fSvmFeatures;
4522 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4523 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4524 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4525 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD)
4526 *pfCaps |= SUPVTCAPS_AMDV_VIRT_VMSAVE_VMLOAD;
4527 }
4528 }
4529 }
4530
4531 /* Restore preemption. */
4532 RTThreadPreemptRestore(&PreemptState);
4533
4534 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4535 if (fIsSmxModeAmbiguous)
4536 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4537
4538 return rc;
4539}
4540
4541
4542/**
4543 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4544 *
4545 * @returns VBox status code.
4546 * @retval VERR_VMX_NO_VMX
4547 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4548 * @retval VERR_VMX_MSR_VMX_DISABLED
4549 * @retval VERR_VMX_MSR_LOCKING_FAILED
4550 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4551 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4552 * @retval VERR_SVM_NO_SVM
4553 * @retval VERR_SVM_DISABLED
4554 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4555 * (centaur)/Shanghai CPU.
4556 *
4557 * @param pSession The session handle.
4558 * @param pfCaps Where to store the capabilities.
4559 */
4560SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4561{
4562 /*
4563 * Input validation.
4564 */
4565 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4566 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4567
4568 /*
4569 * Call common worker.
4570 */
4571 return supdrvQueryVTCapsInternal(pfCaps);
4572}
4573SUPR0_EXPORT_SYMBOL(SUPR0QueryVTCaps);
4574
4575
4576/**
4577 * Queries the CPU microcode revision.
4578 *
4579 * @returns VBox status code.
4580 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4581 * readable microcode rev.
4582 *
4583 * @param puRevision Where to store the microcode revision.
4584 */
4585static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4586{
4587 int rc = VERR_UNSUPPORTED_CPU;
4588 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4589
4590 /*
4591 * Input validation.
4592 */
4593 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4594
4595 *puRevision = 0;
4596
4597 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4598 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4599 RTThreadPreemptDisable(&PreemptState);
4600
4601 if (ASMHasCpuId())
4602 {
4603 uint32_t uDummy, uTFMSEAX;
4604 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4605
4606 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4607 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4608
4609 if (ASMIsValidStdRange(uMaxId))
4610 {
4611 uint64_t uRevMsr;
4612 if (ASMIsIntelCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4613 {
4614 /* Architectural MSR available on Pentium Pro and later. */
4615 if (ASMGetCpuFamily(uTFMSEAX) >= 6)
4616 {
4617 /* Revision is in the high dword. */
4618 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4619 *puRevision = RT_HIDWORD(uRevMsr);
4620 rc = VINF_SUCCESS;
4621 }
4622 }
4623 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4624 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4625 {
4626 /* Not well documented, but at least all AMD64 CPUs support this. */
4627 if (ASMGetCpuFamily(uTFMSEAX) >= 15)
4628 {
4629 /* Revision is in the low dword. */
4630 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4631 *puRevision = RT_LODWORD(uRevMsr);
4632 rc = VINF_SUCCESS;
4633 }
4634 }
4635 }
4636 }
4637
4638 RTThreadPreemptRestore(&PreemptState);
4639
4640 return rc;
4641}
4642
4643
4644/**
4645 * Queries the CPU microcode revision.
4646 *
4647 * @returns VBox status code.
4648 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4649 * readable microcode rev.
4650 *
4651 * @param pSession The session handle.
4652 * @param puRevision Where to store the microcode revision.
4653 */
4654SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4655{
4656 /*
4657 * Input validation.
4658 */
4659 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4660 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4661
4662 /*
4663 * Call common worker.
4664 */
4665 return supdrvQueryUcodeRev(puRevision);
4666}
4667SUPR0_EXPORT_SYMBOL(SUPR0QueryUcodeRev);
4668
4669
4670/**
4671 * Gets hardware-virtualization MSRs of the calling CPU.
4672 *
4673 * @returns VBox status code.
4674 * @param pMsrs Where to store the hardware-virtualization MSRs.
4675 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4676 * to explicitly check for the presence of VT-x/AMD-V before
4677 * querying MSRs.
4678 * @param fForce Force querying of MSRs from the hardware.
4679 */
4680SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4681{
4682 NOREF(fForce);
4683
4684 int rc;
4685 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4686
4687 /*
4688 * Input validation.
4689 */
4690 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4691
4692 /*
4693 * Disable preemption so we make sure we don't migrate CPUs and because
4694 * we access global data.
4695 */
4696 RTThreadPreemptDisable(&PreemptState);
4697
4698 /*
4699 * Query the MSRs from the hardware.
4700 */
4701 SUPHWVIRTMSRS Msrs;
4702 RT_ZERO(Msrs);
4703
4704 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4705 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4706 rc = SUPR0GetVTSupport(&fCaps);
4707 else
4708 rc = VINF_SUCCESS;
4709 if (RT_SUCCESS(rc))
4710 {
4711 if (fCaps & SUPVTCAPS_VT_X)
4712 {
4713 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4714 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4715 Msrs.u.vmx.PinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4716 Msrs.u.vmx.ProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4717 Msrs.u.vmx.ExitCtls.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4718 Msrs.u.vmx.EntryCtls.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4719 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4720 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4721 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4722 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4723 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4724 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4725
4726 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4727 {
4728 Msrs.u.vmx.TruePinCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4729 Msrs.u.vmx.TrueProcCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4730 Msrs.u.vmx.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4731 Msrs.u.vmx.TrueExitCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4732 }
4733
4734 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4735 {
4736 Msrs.u.vmx.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4737
4738 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4739 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4740
4741 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
4742 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4743 }
4744
4745 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
4746 Msrs.u.vmx.u64ProcCtls3 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS3);
4747 }
4748 else if (fCaps & SUPVTCAPS_AMD_V)
4749 {
4750 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4751 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4752 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4753 }
4754 else
4755 {
4756 RTThreadPreemptRestore(&PreemptState);
4757 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4758 VERR_INTERNAL_ERROR_2);
4759 }
4760
4761 /*
4762 * Copy the MSRs out.
4763 */
4764 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4765 }
4766
4767 RTThreadPreemptRestore(&PreemptState);
4768
4769 return rc;
4770}
4771SUPR0_EXPORT_SYMBOL(SUPR0GetHwvirtMsrs);
4772
4773
4774/**
4775 * Register a component factory with the support driver.
4776 *
4777 * This is currently restricted to kernel sessions only.
4778 *
4779 * @returns VBox status code.
4780 * @retval VINF_SUCCESS on success.
4781 * @retval VERR_NO_MEMORY if we're out of memory.
4782 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4783 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4784 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4785 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4786 *
4787 * @param pSession The SUPDRV session (must be a ring-0 session).
4788 * @param pFactory Pointer to the component factory registration structure.
4789 *
4790 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4791 */
4792SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4793{
4794 PSUPDRVFACTORYREG pNewReg;
4795 const char *psz;
4796 int rc;
4797
4798 /*
4799 * Validate parameters.
4800 */
4801 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4802 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4803 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4804 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4805 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4806 AssertReturn(psz, VERR_INVALID_PARAMETER);
4807
4808 /*
4809 * Allocate and initialize a new registration structure.
4810 */
4811 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4812 if (pNewReg)
4813 {
4814 pNewReg->pNext = NULL;
4815 pNewReg->pFactory = pFactory;
4816 pNewReg->pSession = pSession;
4817 pNewReg->cchName = psz - &pFactory->szName[0];
4818
4819 /*
4820 * Add it to the tail of the list after checking for prior registration.
4821 */
4822 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4823 if (RT_SUCCESS(rc))
4824 {
4825 PSUPDRVFACTORYREG pPrev = NULL;
4826 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4827 while (pCur && pCur->pFactory != pFactory)
4828 {
4829 pPrev = pCur;
4830 pCur = pCur->pNext;
4831 }
4832 if (!pCur)
4833 {
4834 if (pPrev)
4835 pPrev->pNext = pNewReg;
4836 else
4837 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4838 rc = VINF_SUCCESS;
4839 }
4840 else
4841 rc = VERR_ALREADY_EXISTS;
4842
4843 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4844 }
4845
4846 if (RT_FAILURE(rc))
4847 RTMemFree(pNewReg);
4848 }
4849 else
4850 rc = VERR_NO_MEMORY;
4851 return rc;
4852}
4853SUPR0_EXPORT_SYMBOL(SUPR0ComponentRegisterFactory);
4854
4855
4856/**
4857 * Deregister a component factory.
4858 *
4859 * @returns VBox status code.
4860 * @retval VINF_SUCCESS on success.
4861 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4862 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4863 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4864 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4865 *
4866 * @param pSession The SUPDRV session (must be a ring-0 session).
4867 * @param pFactory Pointer to the component factory registration structure
4868 * previously passed SUPR0ComponentRegisterFactory().
4869 *
4870 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4871 */
4872SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4873{
4874 int rc;
4875
4876 /*
4877 * Validate parameters.
4878 */
4879 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4880 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4881 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4882
4883 /*
4884 * Take the lock and look for the registration record.
4885 */
4886 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4887 if (RT_SUCCESS(rc))
4888 {
4889 PSUPDRVFACTORYREG pPrev = NULL;
4890 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4891 while (pCur && pCur->pFactory != pFactory)
4892 {
4893 pPrev = pCur;
4894 pCur = pCur->pNext;
4895 }
4896 if (pCur)
4897 {
4898 if (!pPrev)
4899 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4900 else
4901 pPrev->pNext = pCur->pNext;
4902
4903 pCur->pNext = NULL;
4904 pCur->pFactory = NULL;
4905 pCur->pSession = NULL;
4906 rc = VINF_SUCCESS;
4907 }
4908 else
4909 rc = VERR_NOT_FOUND;
4910
4911 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4912
4913 RTMemFree(pCur);
4914 }
4915 return rc;
4916}
4917SUPR0_EXPORT_SYMBOL(SUPR0ComponentDeregisterFactory);
4918
4919
4920/**
4921 * Queries a component factory.
4922 *
4923 * @returns VBox status code.
4924 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4925 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4926 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4927 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4928 *
4929 * @param pSession The SUPDRV session.
4930 * @param pszName The name of the component factory.
4931 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4932 * @param ppvFactoryIf Where to store the factory interface.
4933 */
4934SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4935{
4936 const char *pszEnd;
4937 size_t cchName;
4938 int rc;
4939
4940 /*
4941 * Validate parameters.
4942 */
4943 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4944
4945 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4946 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4947 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4948 cchName = pszEnd - pszName;
4949
4950 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4951 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4952 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4953
4954 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4955 *ppvFactoryIf = NULL;
4956
4957 /*
4958 * Take the lock and try all factories by this name.
4959 */
4960 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4961 if (RT_SUCCESS(rc))
4962 {
4963 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4964 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4965 while (pCur)
4966 {
4967 if ( pCur->cchName == cchName
4968 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4969 {
4970 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4971 if (pvFactory)
4972 {
4973 *ppvFactoryIf = pvFactory;
4974 rc = VINF_SUCCESS;
4975 break;
4976 }
4977 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4978 }
4979
4980 /* next */
4981 pCur = pCur->pNext;
4982 }
4983
4984 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4985 }
4986 return rc;
4987}
4988SUPR0_EXPORT_SYMBOL(SUPR0ComponentQueryFactory);
4989
4990
4991/**
4992 * Adds a memory object to the session.
4993 *
4994 * @returns IPRT status code.
4995 * @param pMem Memory tracking structure containing the
4996 * information to track.
4997 * @param pSession The session.
4998 */
4999static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
5000{
5001 PSUPDRVBUNDLE pBundle;
5002
5003 /*
5004 * Find free entry and record the allocation.
5005 */
5006 RTSpinlockAcquire(pSession->Spinlock);
5007 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5008 {
5009 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
5010 {
5011 unsigned i;
5012 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5013 {
5014 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
5015 {
5016 pBundle->cUsed++;
5017 pBundle->aMem[i] = *pMem;
5018 RTSpinlockRelease(pSession->Spinlock);
5019 return VINF_SUCCESS;
5020 }
5021 }
5022 AssertFailed(); /* !!this can't be happening!!! */
5023 }
5024 }
5025 RTSpinlockRelease(pSession->Spinlock);
5026
5027 /*
5028 * Need to allocate a new bundle.
5029 * Insert into the last entry in the bundle.
5030 */
5031 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
5032 if (!pBundle)
5033 return VERR_NO_MEMORY;
5034
5035 /* take last entry. */
5036 pBundle->cUsed++;
5037 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
5038
5039 /* insert into list. */
5040 RTSpinlockAcquire(pSession->Spinlock);
5041 pBundle->pNext = pSession->Bundle.pNext;
5042 pSession->Bundle.pNext = pBundle;
5043 RTSpinlockRelease(pSession->Spinlock);
5044
5045 return VINF_SUCCESS;
5046}
5047
5048
5049/**
5050 * Releases a memory object referenced by pointer and type.
5051 *
5052 * @returns IPRT status code.
5053 * @param pSession Session data.
5054 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
5055 * @param eType Memory type.
5056 */
5057static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
5058{
5059 PSUPDRVBUNDLE pBundle;
5060
5061 /*
5062 * Validate input.
5063 */
5064 if (!uPtr)
5065 {
5066 Log(("Illegal address %p\n", (void *)uPtr));
5067 return VERR_INVALID_PARAMETER;
5068 }
5069
5070 /*
5071 * Search for the address.
5072 */
5073 RTSpinlockAcquire(pSession->Spinlock);
5074 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5075 {
5076 if (pBundle->cUsed > 0)
5077 {
5078 unsigned i;
5079 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5080 {
5081 if ( pBundle->aMem[i].eType == eType
5082 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5083 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5084 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5085 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5086 )
5087 {
5088 /* Make a copy of it and release it outside the spinlock. */
5089 SUPDRVMEMREF Mem = pBundle->aMem[i];
5090 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5091 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5092 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5093 RTSpinlockRelease(pSession->Spinlock);
5094
5095 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5096 {
5097 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5098 AssertRC(rc); /** @todo figure out how to handle this. */
5099 }
5100 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5101 {
5102 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5103 AssertRC(rc); /** @todo figure out how to handle this. */
5104 }
5105 return VINF_SUCCESS;
5106 }
5107 }
5108 }
5109 }
5110 RTSpinlockRelease(pSession->Spinlock);
5111 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5112 return VERR_INVALID_PARAMETER;
5113}
5114
5115
5116/**
5117 * Opens an image. If it's the first time it's opened the call must upload
5118 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5119 *
5120 * This is the 1st step of the loading.
5121 *
5122 * @returns IPRT status code.
5123 * @param pDevExt Device globals.
5124 * @param pSession Session data.
5125 * @param pReq The open request.
5126 */
5127static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5128{
5129 int rc;
5130 PSUPDRVLDRIMAGE pImage;
5131 void *pv;
5132 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5133 SUPDRV_CHECK_SMAP_SETUP();
5134 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5135 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5136
5137 /*
5138 * Check if we got an instance of the image already.
5139 */
5140 supdrvLdrLock(pDevExt);
5141 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5142 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5143 {
5144 if ( pImage->szName[cchName] == '\0'
5145 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5146 {
5147 /** @todo Add an _1M (or something) per session reference. */
5148 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
5149 {
5150 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5151 * that indicates that the images are different. */
5152 pReq->u.Out.pvImageBase = pImage->pvImage;
5153 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5154 pReq->u.Out.fNativeLoader = pImage->fNative;
5155 supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5156 supdrvLdrUnlock(pDevExt);
5157 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5158 return VINF_SUCCESS;
5159 }
5160 supdrvLdrUnlock(pDevExt);
5161 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5162 return VERR_TOO_MANY_REFERENCES;
5163 }
5164 }
5165 /* (not found - add it!) */
5166
5167 /* If the loader interface is locked down, make userland fail early */
5168 if (pDevExt->fLdrLockedDown)
5169 {
5170 supdrvLdrUnlock(pDevExt);
5171 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5172 return VERR_PERMISSION_DENIED;
5173 }
5174
5175 /* Stop if caller doesn't wish to prepare loading things. */
5176 if (!pReq->u.In.cbImageBits)
5177 {
5178 supdrvLdrUnlock(pDevExt);
5179 Log(("supdrvIOCtl_LdrOpen: Returning VERR_MODULE_NOT_FOUND for '%s'!\n", pReq->u.In.szName));
5180 return VERR_MODULE_NOT_FOUND;
5181 }
5182
5183 /*
5184 * Allocate memory.
5185 */
5186 Assert(cchName < sizeof(pImage->szName));
5187 pv = RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5188 if (!pv)
5189 {
5190 supdrvLdrUnlock(pDevExt);
5191 Log(("supdrvIOCtl_LdrOpen: RTMemAllocZ() failed\n"));
5192 return VERR_NO_MEMORY;
5193 }
5194 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5195
5196 /*
5197 * Setup and link in the LDR stuff.
5198 */
5199 pImage = (PSUPDRVLDRIMAGE)pv;
5200 pImage->pvImage = NULL;
5201#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5202 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5203#else
5204 pImage->pvImageAlloc = NULL;
5205#endif
5206 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5207 pImage->cbImageBits = pReq->u.In.cbImageBits;
5208 pImage->cSymbols = 0;
5209 pImage->paSymbols = NULL;
5210 pImage->pachStrTab = NULL;
5211 pImage->cbStrTab = 0;
5212 pImage->cSegments = 0;
5213 pImage->paSegments = NULL;
5214 pImage->pfnModuleInit = NULL;
5215 pImage->pfnModuleTerm = NULL;
5216 pImage->pfnServiceReqHandler = NULL;
5217 pImage->uState = SUP_IOCTL_LDR_OPEN;
5218 pImage->cImgUsage = 0; /* Increased by supdrvLdrAddUsage later */
5219 pImage->pDevExt = pDevExt;
5220 pImage->pImageImport = NULL;
5221 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5222 pImage->pWrappedModInfo = NULL;
5223 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5224
5225 /*
5226 * Try load it using the native loader, if that isn't supported, fall back
5227 * on the older method.
5228 */
5229 pImage->fNative = true;
5230 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5231 if (rc == VERR_NOT_SUPPORTED)
5232 {
5233#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5234 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5235 if (RT_SUCCESS(rc))
5236 {
5237 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5238 pImage->fNative = false;
5239 }
5240#else
5241 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
5242 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
5243 pImage->fNative = false;
5244 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
5245#endif
5246 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5247 }
5248 if (RT_SUCCESS(rc))
5249 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5250 if (RT_FAILURE(rc))
5251 {
5252 supdrvLdrUnlock(pDevExt);
5253 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5254 RTMemFree(pImage);
5255 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5256 return rc;
5257 }
5258 Assert(RT_VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5259
5260 /*
5261 * Link it.
5262 */
5263 pImage->pNext = pDevExt->pLdrImages;
5264 pDevExt->pLdrImages = pImage;
5265
5266 pReq->u.Out.pvImageBase = pImage->pvImage;
5267 pReq->u.Out.fNeedsLoading = true;
5268 pReq->u.Out.fNativeLoader = pImage->fNative;
5269 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5270
5271 supdrvLdrUnlock(pDevExt);
5272 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5273 return VINF_SUCCESS;
5274}
5275
5276
5277/**
5278 * Formats a load error message.
5279 *
5280 * @returns @a rc
5281 * @param rc Return code.
5282 * @param pReq The request.
5283 * @param pszFormat The error message format string.
5284 * @param ... Argument to the format string.
5285 */
5286int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5287{
5288 va_list va;
5289 va_start(va, pszFormat);
5290 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5291 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5292 va_end(va);
5293 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5294 return rc;
5295}
5296
5297
5298/**
5299 * Worker that validates a pointer to an image entrypoint.
5300 *
5301 * Calls supdrvLdrLoadError on error.
5302 *
5303 * @returns IPRT status code.
5304 * @param pDevExt The device globals.
5305 * @param pImage The loader image.
5306 * @param pv The pointer into the image.
5307 * @param fMayBeNull Whether it may be NULL.
5308 * @param pszSymbol The entrypoint name or log name. If the symbol is
5309 * capitalized it signifies a specific symbol, otherwise it
5310 * for logging.
5311 * @param pbImageBits The image bits prepared by ring-3.
5312 * @param pReq The request for passing to supdrvLdrLoadError.
5313 *
5314 * @note Will leave the loader lock on failure!
5315 */
5316static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5317 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5318{
5319 if (!fMayBeNull || pv)
5320 {
5321 uint32_t iSeg;
5322
5323 /* Must be within the image bits: */
5324 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5325 if (uRva >= pImage->cbImageBits)
5326 {
5327 supdrvLdrUnlock(pDevExt);
5328 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5329 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5330 pv, pszSymbol, uRva, pImage->cbImageBits);
5331 }
5332
5333 /* Must be in an executable segment: */
5334 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5335 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5336 {
5337 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5338 break;
5339 supdrvLdrUnlock(pDevExt);
5340 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5341 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5342 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5343 pImage->paSegments[iSeg].fProt);
5344 }
5345 if (iSeg >= pImage->cSegments)
5346 {
5347 supdrvLdrUnlock(pDevExt);
5348 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5349 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5350 pv, pszSymbol, uRva);
5351 }
5352
5353 if (pImage->fNative)
5354 {
5355 /** @todo pass pReq along to the native code. */
5356 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5357 if (RT_FAILURE(rc))
5358 {
5359 supdrvLdrUnlock(pDevExt);
5360 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5361 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5362 }
5363 }
5364 }
5365 return VINF_SUCCESS;
5366}
5367
5368
5369/**
5370 * Loads the image bits.
5371 *
5372 * This is the 2nd step of the loading.
5373 *
5374 * @returns IPRT status code.
5375 * @param pDevExt Device globals.
5376 * @param pSession Session data.
5377 * @param pReq The request.
5378 */
5379static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5380{
5381 PSUPDRVLDRUSAGE pUsage;
5382 PSUPDRVLDRIMAGE pImage;
5383 PSUPDRVLDRIMAGE pImageImport;
5384 int rc;
5385 SUPDRV_CHECK_SMAP_SETUP();
5386 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5387 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5388
5389 /*
5390 * Find the ldr image.
5391 */
5392 supdrvLdrLock(pDevExt);
5393 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5394
5395 pUsage = pSession->pLdrUsage;
5396 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5397 pUsage = pUsage->pNext;
5398 if (!pUsage)
5399 {
5400 supdrvLdrUnlock(pDevExt);
5401 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5402 }
5403 pImage = pUsage->pImage;
5404
5405 /*
5406 * Validate input.
5407 */
5408 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5409 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5410 {
5411 supdrvLdrUnlock(pDevExt);
5412 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5413 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5414 }
5415
5416 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5417 {
5418 unsigned uState = pImage->uState;
5419 supdrvLdrUnlock(pDevExt);
5420 if (uState != SUP_IOCTL_LDR_LOAD)
5421 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5422 pReq->u.Out.uErrorMagic = 0;
5423 return VERR_ALREADY_LOADED;
5424 }
5425
5426 /* If the loader interface is locked down, don't load new images */
5427 if (pDevExt->fLdrLockedDown)
5428 {
5429 supdrvLdrUnlock(pDevExt);
5430 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5431 }
5432
5433 /*
5434 * If the new image is a dependant of VMMR0.r0, resolve it via the
5435 * caller's usage list and make sure it's in ready state.
5436 */
5437 pImageImport = NULL;
5438 if (pReq->u.In.fFlags & SUPLDRLOAD_F_DEP_VMMR0)
5439 {
5440 PSUPDRVLDRUSAGE pUsageDependency = pSession->pLdrUsage;
5441 while (pUsageDependency && pUsageDependency->pImage->pvImage != pDevExt->pvVMMR0)
5442 pUsageDependency = pUsageDependency->pNext;
5443 if (!pUsageDependency || !pDevExt->pvVMMR0)
5444 {
5445 supdrvLdrUnlock(pDevExt);
5446 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 not loaded by session");
5447 }
5448 pImageImport = pUsageDependency->pImage;
5449 if (pImageImport->uState != SUP_IOCTL_LDR_LOAD)
5450 {
5451 supdrvLdrUnlock(pDevExt);
5452 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 is not ready (state %#x)", pImageImport->uState);
5453 }
5454 }
5455
5456 /*
5457 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5458 */
5459 pImage->cSegments = pReq->u.In.cSegments;
5460 {
5461 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5462 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
5463 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5464 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5465 else
5466 {
5467 supdrvLdrUnlock(pDevExt);
5468 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5469 }
5470 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5471 }
5472
5473 /*
5474 * Validate entrypoints.
5475 */
5476 switch (pReq->u.In.eEPType)
5477 {
5478 case SUPLDRLOADEP_NOTHING:
5479 break;
5480
5481 case SUPLDRLOADEP_VMMR0:
5482 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5483 if (RT_FAILURE(rc))
5484 return rc;
5485 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5486 if (RT_FAILURE(rc))
5487 return rc;
5488
5489 /* Fail here if there is already a VMMR0 module. */
5490 if (pDevExt->pvVMMR0 != NULL)
5491 {
5492 supdrvLdrUnlock(pDevExt);
5493 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "There is already a VMMR0 module loaded (%p)", pDevExt->pvVMMR0);
5494 }
5495 break;
5496
5497 case SUPLDRLOADEP_SERVICE:
5498 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5499 if (RT_FAILURE(rc))
5500 return rc;
5501 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5502 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5503 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5504 {
5505 supdrvLdrUnlock(pDevExt);
5506 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5507 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5508 pReq->u.In.EP.Service.apvReserved[2]);
5509 }
5510 break;
5511
5512 default:
5513 supdrvLdrUnlock(pDevExt);
5514 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5515 }
5516
5517 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5518 if (RT_FAILURE(rc))
5519 return rc;
5520 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5521 if (RT_FAILURE(rc))
5522 return rc;
5523 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5524
5525 /*
5526 * Allocate and copy the tables if non-native.
5527 * (No need to do try/except as this is a buffered request.)
5528 */
5529 if (!pImage->fNative)
5530 {
5531 pImage->cbStrTab = pReq->u.In.cbStrTab;
5532 if (pImage->cbStrTab)
5533 {
5534 pImage->pachStrTab = (char *)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5535 if (!pImage->pachStrTab)
5536 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5537 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5538 }
5539
5540 pImage->cSymbols = pReq->u.In.cSymbols;
5541 if (RT_SUCCESS(rc) && pImage->cSymbols)
5542 {
5543 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5544 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
5545 if (!pImage->paSymbols)
5546 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5547 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5548 }
5549 }
5550
5551 /*
5552 * Copy the bits and apply permissions / complete native loading.
5553 */
5554 if (RT_SUCCESS(rc))
5555 {
5556 pImage->uState = SUP_IOCTL_LDR_LOAD;
5557 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5558 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5559
5560 if (pImage->fNative)
5561 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5562 else
5563 {
5564#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5565 uint32_t i;
5566 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5567
5568 for (i = 0; i < pImage->cSegments; i++)
5569 {
5570 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5571 pImage->paSegments[i].fProt);
5572 if (RT_SUCCESS(rc))
5573 continue;
5574 if (rc == VERR_NOT_SUPPORTED)
5575 rc = VINF_SUCCESS;
5576 else
5577 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5578 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5579 break;
5580 }
5581#else
5582 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5583#endif
5584 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5585 }
5586 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5587 }
5588
5589 /*
5590 * On success call the module initialization.
5591 */
5592 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5593 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5594 {
5595 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5596 pDevExt->pLdrInitImage = pImage;
5597 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5598 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5599 rc = pImage->pfnModuleInit(pImage);
5600 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5601 pDevExt->pLdrInitImage = NULL;
5602 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5603 if (RT_FAILURE(rc))
5604 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5605 }
5606 if (RT_SUCCESS(rc))
5607 {
5608 /*
5609 * Publish any standard entry points.
5610 */
5611 switch (pReq->u.In.eEPType)
5612 {
5613 case SUPLDRLOADEP_VMMR0:
5614 Assert(!pDevExt->pvVMMR0);
5615 Assert(!pDevExt->pfnVMMR0EntryFast);
5616 Assert(!pDevExt->pfnVMMR0EntryEx);
5617 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
5618 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
5619 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryFast);
5620 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
5621 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5622 break;
5623 case SUPLDRLOADEP_SERVICE:
5624 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5625 break;
5626 default:
5627 break;
5628 }
5629
5630 /*
5631 * Increase the usage counter of any imported image.
5632 */
5633 if (pImageImport)
5634 {
5635 pImageImport->cImgUsage++;
5636 if (pImageImport->cImgUsage == 2 && pImageImport->pWrappedModInfo)
5637 supdrvOSLdrRetainWrapperModule(pDevExt, pImageImport);
5638 pImage->pImageImport = pImageImport;
5639 }
5640
5641 /*
5642 * Done!
5643 */
5644 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5645 pReq->u.Out.uErrorMagic = 0;
5646 pReq->u.Out.szError[0] = '\0';
5647 }
5648 else
5649 {
5650 /* Inform the tracing component in case ModuleInit registered TPs. */
5651 supdrvTracerModuleUnloading(pDevExt, pImage);
5652
5653 pImage->uState = SUP_IOCTL_LDR_OPEN;
5654 pImage->pfnModuleInit = NULL;
5655 pImage->pfnModuleTerm = NULL;
5656 pImage->pfnServiceReqHandler= NULL;
5657 pImage->cbStrTab = 0;
5658 RTMemFree(pImage->pachStrTab);
5659 pImage->pachStrTab = NULL;
5660 RTMemFree(pImage->paSymbols);
5661 pImage->paSymbols = NULL;
5662 pImage->cSymbols = 0;
5663 }
5664
5665 supdrvLdrUnlock(pDevExt);
5666 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5667 return rc;
5668}
5669
5670
5671/**
5672 * Registers a .r0 module wrapped in a native one and manually loaded.
5673 *
5674 * @returns VINF_SUCCESS or error code (no info statuses).
5675 * @param pDevExt Device globals.
5676 * @param pWrappedModInfo The wrapped module info.
5677 * @param pvNative OS specific information.
5678 * @param phMod Where to store the module handle.
5679 */
5680int VBOXCALL supdrvLdrRegisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo,
5681 void *pvNative, void **phMod)
5682{
5683 size_t cchName;
5684 PSUPDRVLDRIMAGE pImage;
5685 PCSUPLDRWRAPMODSYMBOL paSymbols;
5686 uint16_t idx;
5687 const char *pszPrevSymbol;
5688 int rc;
5689 SUPDRV_CHECK_SMAP_SETUP();
5690 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5691
5692 /*
5693 * Validate input.
5694 */
5695 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
5696 *phMod = NULL;
5697 AssertPtrReturn(pDevExt, VERR_INTERNAL_ERROR_2);
5698
5699 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
5700 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5701 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5702 VERR_INVALID_MAGIC);
5703 AssertMsgReturn(pWrappedModInfo->uVersion == SUPLDRWRAPPEDMODULE_VERSION,
5704 ("Unsupported uVersion=%#x, current version %#x\n", pWrappedModInfo->uVersion, SUPLDRWRAPPEDMODULE_VERSION),
5705 VERR_VERSION_MISMATCH);
5706 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5707 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5708 VERR_INVALID_MAGIC);
5709 AssertMsgReturn(pWrappedModInfo->fFlags <= SUPLDRWRAPPEDMODULE_F_VMMR0, ("Unknown flags in: %#x\n", pWrappedModInfo->fFlags),
5710 VERR_INVALID_FLAGS);
5711
5712 /* szName: */
5713 AssertReturn(RTStrEnd(pWrappedModInfo->szName, sizeof(pWrappedModInfo->szName)) != NULL, VERR_INVALID_NAME);
5714 AssertReturn(supdrvIsLdrModuleNameValid(pWrappedModInfo->szName), VERR_INVALID_NAME);
5715 AssertCompile(sizeof(pImage->szName) == sizeof(pWrappedModInfo->szName));
5716 cchName = strlen(pWrappedModInfo->szName);
5717
5718 /* Image range: */
5719 AssertPtrReturn(pWrappedModInfo->pvImageStart, VERR_INVALID_POINTER);
5720 AssertPtrReturn(pWrappedModInfo->pvImageEnd, VERR_INVALID_POINTER);
5721 AssertReturn((uintptr_t)pWrappedModInfo->pvImageEnd > (uintptr_t)pWrappedModInfo->pvImageStart, VERR_INVALID_PARAMETER);
5722
5723 /* Symbol table: */
5724 AssertMsgReturn(pWrappedModInfo->cSymbols <= _8K, ("Too many symbols: %u, max 8192\n", pWrappedModInfo->cSymbols),
5725 VERR_TOO_MANY_SYMLINKS);
5726 pszPrevSymbol = "\x7f";
5727 paSymbols = pWrappedModInfo->paSymbols;
5728 idx = pWrappedModInfo->cSymbols;
5729 while (idx-- > 0)
5730 {
5731 const char *pszSymbol = paSymbols[idx].pszSymbol;
5732 AssertMsgReturn(RT_VALID_PTR(pszSymbol) && RT_VALID_PTR(paSymbols[idx].pfnValue),
5733 ("paSymbols[%u]: %p/%p\n", idx, pszSymbol, paSymbols[idx].pfnValue),
5734 VERR_INVALID_POINTER);
5735 AssertReturn(*pszSymbol != '\0', VERR_EMPTY_STRING);
5736 AssertMsgReturn(strcmp(pszSymbol, pszPrevSymbol) < 0,
5737 ("symbol table out of order at index %u: '%s' vs '%s'\n", idx, pszSymbol, pszPrevSymbol),
5738 VERR_WRONG_ORDER);
5739 pszPrevSymbol = pszSymbol;
5740 }
5741
5742 /* Standard entry points: */
5743 AssertPtrNullReturn(pWrappedModInfo->pfnModuleInit, VERR_INVALID_POINTER);
5744 AssertPtrNullReturn(pWrappedModInfo->pfnModuleTerm, VERR_INVALID_POINTER);
5745 AssertReturn((uintptr_t)pWrappedModInfo->pfnModuleInit != (uintptr_t)pWrappedModInfo->pfnModuleTerm || pWrappedModInfo->pfnModuleInit == NULL,
5746 VERR_INVALID_PARAMETER);
5747 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
5748 {
5749 AssertReturn(pWrappedModInfo->pfnServiceReqHandler == NULL, VERR_INVALID_PARAMETER);
5750 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryFast, VERR_INVALID_POINTER);
5751 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_POINTER);
5752 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast != pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_PARAMETER);
5753 }
5754 else
5755 {
5756 AssertPtrNullReturn(pWrappedModInfo->pfnServiceReqHandler, VERR_INVALID_POINTER);
5757 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast == NULL, VERR_INVALID_PARAMETER);
5758 AssertReturn(pWrappedModInfo->pfnVMMR0EntryEx == NULL, VERR_INVALID_PARAMETER);
5759 }
5760
5761 /*
5762 * Check if we got an instance of the image already.
5763 */
5764 supdrvLdrLock(pDevExt);
5765 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5766 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5767 {
5768 if ( pImage->szName[cchName] == '\0'
5769 && !memcmp(pImage->szName, pWrappedModInfo->szName, cchName))
5770 {
5771 supdrvLdrUnlock(pDevExt);
5772 Log(("supdrvLdrRegisterWrappedModule: '%s' already loaded!\n", pWrappedModInfo->szName));
5773 return VERR_ALREADY_LOADED;
5774 }
5775 }
5776 /* (not found - add it!) */
5777
5778 /* If the loader interface is locked down, make userland fail early */
5779 if (pDevExt->fLdrLockedDown)
5780 {
5781 supdrvLdrUnlock(pDevExt);
5782 Log(("supdrvLdrRegisterWrappedModule: Not adding '%s' to image list, loader interface is locked down!\n", pWrappedModInfo->szName));
5783 return VERR_PERMISSION_DENIED;
5784 }
5785
5786 /* Only one VMMR0: */
5787 if ( pDevExt->pvVMMR0 != NULL
5788 && (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0))
5789 {
5790 supdrvLdrUnlock(pDevExt);
5791 Log(("supdrvLdrRegisterWrappedModule: Rejecting '%s' as we already got a VMMR0 module!\n", pWrappedModInfo->szName));
5792 return VERR_ALREADY_EXISTS;
5793 }
5794
5795 /*
5796 * Allocate memory.
5797 */
5798 Assert(cchName < sizeof(pImage->szName));
5799 pImage = (PSUPDRVLDRIMAGE)RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5800 if (!pImage)
5801 {
5802 supdrvLdrUnlock(pDevExt);
5803 Log(("supdrvLdrRegisterWrappedModule: RTMemAllocZ() failed\n"));
5804 return VERR_NO_MEMORY;
5805 }
5806 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5807
5808 /*
5809 * Setup and link in the LDR stuff.
5810 */
5811 pImage->pvImage = (void *)pWrappedModInfo->pvImageStart;
5812#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5813 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5814#else
5815 pImage->pvImageAlloc = NULL;
5816#endif
5817 pImage->cbImageWithEverything
5818 = pImage->cbImageBits = (uintptr_t)pWrappedModInfo->pvImageEnd - (uintptr_t)pWrappedModInfo->pvImageStart;
5819 pImage->cSymbols = 0;
5820 pImage->paSymbols = NULL;
5821 pImage->pachStrTab = NULL;
5822 pImage->cbStrTab = 0;
5823 pImage->cSegments = 0;
5824 pImage->paSegments = NULL;
5825 pImage->pfnModuleInit = pWrappedModInfo->pfnModuleInit;
5826 pImage->pfnModuleTerm = pWrappedModInfo->pfnModuleTerm;
5827 pImage->pfnServiceReqHandler = NULL; /* Only setting this after module init */
5828 pImage->uState = SUP_IOCTL_LDR_LOAD;
5829 pImage->cImgUsage = 1; /* Held by the wrapper module till unload. */
5830 pImage->pDevExt = pDevExt;
5831 pImage->pImageImport = NULL;
5832 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5833 pImage->pWrappedModInfo = pWrappedModInfo;
5834 pImage->pvWrappedNative = pvNative;
5835 pImage->fNative = true;
5836 memcpy(pImage->szName, pWrappedModInfo->szName, cchName + 1);
5837
5838 /*
5839 * Link it.
5840 */
5841 pImage->pNext = pDevExt->pLdrImages;
5842 pDevExt->pLdrImages = pImage;
5843
5844 /*
5845 * Call module init function if found.
5846 */
5847 rc = VINF_SUCCESS;
5848 if (pImage->pfnModuleInit)
5849 {
5850 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5851 pDevExt->pLdrInitImage = pImage;
5852 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5853 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5854 rc = pImage->pfnModuleInit(pImage);
5855 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5856 pDevExt->pLdrInitImage = NULL;
5857 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5858 }
5859 if (RT_SUCCESS(rc))
5860 {
5861 /*
5862 * Update entry points.
5863 */
5864 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
5865 {
5866 Assert(!pDevExt->pvVMMR0);
5867 Assert(!pDevExt->pfnVMMR0EntryFast);
5868 Assert(!pDevExt->pfnVMMR0EntryEx);
5869 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
5870 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
5871 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryFast);
5872 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
5873 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryEx);
5874 }
5875 else
5876 pImage->pfnServiceReqHandler = pWrappedModInfo->pfnServiceReqHandler;
5877#ifdef IN_RING3
5878# error "WTF?"
5879#endif
5880 *phMod = pImage;
5881 }
5882 else
5883 {
5884 /*
5885 * Module init failed - bail, no module term callout.
5886 */
5887 SUPR0Printf("ModuleInit failed for '%s': %Rrc\n", pImage->szName, rc);
5888
5889 pImage->pfnModuleTerm = NULL;
5890 pImage->uState = SUP_IOCTL_LDR_OPEN;
5891 supdrvLdrFree(pDevExt, pImage);
5892 }
5893
5894 supdrvLdrUnlock(pDevExt);
5895 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5896 return VINF_SUCCESS;
5897}
5898
5899
5900/**
5901 * Decrements SUPDRVLDRIMAGE::cImgUsage when two or greater.
5902 *
5903 * @param pDevExt Device globals.
5904 * @param pImage The image.
5905 * @param cReference Number of references being removed.
5906 */
5907DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference)
5908{
5909 Assert(cReference > 0);
5910 Assert(pImage->cImgUsage > cReference);
5911 pImage->cImgUsage -= cReference;
5912 if (pImage->cImgUsage == 1 && pImage->pWrappedModInfo)
5913 supdrvOSLdrReleaseWrapperModule(pDevExt, pImage);
5914}
5915
5916
5917/**
5918 * Frees a previously loaded (prep'ed) image.
5919 *
5920 * @returns IPRT status code.
5921 * @param pDevExt Device globals.
5922 * @param pSession Session data.
5923 * @param pReq The request.
5924 */
5925static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
5926{
5927 int rc;
5928 PSUPDRVLDRUSAGE pUsagePrev;
5929 PSUPDRVLDRUSAGE pUsage;
5930 PSUPDRVLDRIMAGE pImage;
5931 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
5932
5933 /*
5934 * Find the ldr image.
5935 */
5936 supdrvLdrLock(pDevExt);
5937 pUsagePrev = NULL;
5938 pUsage = pSession->pLdrUsage;
5939 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5940 {
5941 pUsagePrev = pUsage;
5942 pUsage = pUsage->pNext;
5943 }
5944 if (!pUsage)
5945 {
5946 supdrvLdrUnlock(pDevExt);
5947 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
5948 return VERR_INVALID_HANDLE;
5949 }
5950 if (pUsage->cRing3Usage == 0)
5951 {
5952 supdrvLdrUnlock(pDevExt);
5953 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
5954 return VERR_CALLER_NO_REFERENCE;
5955 }
5956
5957 /*
5958 * Check if we can remove anything.
5959 */
5960 rc = VINF_SUCCESS;
5961 pImage = pUsage->pImage;
5962 Log(("SUP_IOCTL_LDR_FREE: pImage=%p %s cImgUsage=%d r3=%d r0=%u\n",
5963 pImage, pImage->szName, pImage->cImgUsage, pUsage->cRing3Usage, pUsage->cRing0Usage));
5964 if (pImage->cImgUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
5965 {
5966 /*
5967 * Check if there are any objects with destructors in the image, if
5968 * so leave it for the session cleanup routine so we get a chance to
5969 * clean things up in the right order and not leave them all dangling.
5970 */
5971 RTSpinlockAcquire(pDevExt->Spinlock);
5972 if (pImage->cImgUsage <= 1)
5973 {
5974 PSUPDRVOBJ pObj;
5975 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5976 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5977 {
5978 rc = VERR_DANGLING_OBJECTS;
5979 break;
5980 }
5981 }
5982 else
5983 {
5984 PSUPDRVUSAGE pGenUsage;
5985 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5986 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5987 {
5988 rc = VERR_DANGLING_OBJECTS;
5989 break;
5990 }
5991 }
5992 RTSpinlockRelease(pDevExt->Spinlock);
5993 if (rc == VINF_SUCCESS)
5994 {
5995 /* unlink it */
5996 if (pUsagePrev)
5997 pUsagePrev->pNext = pUsage->pNext;
5998 else
5999 pSession->pLdrUsage = pUsage->pNext;
6000
6001 /* free it */
6002 pUsage->pImage = NULL;
6003 pUsage->pNext = NULL;
6004 RTMemFree(pUsage);
6005
6006 /*
6007 * Dereference the image.
6008 */
6009 if (pImage->cImgUsage <= 1)
6010 supdrvLdrFree(pDevExt, pImage);
6011 else
6012 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6013 }
6014 else
6015 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
6016 }
6017 else
6018 {
6019 /*
6020 * Dereference both image and usage.
6021 */
6022 pUsage->cRing3Usage--;
6023 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6024 }
6025
6026 supdrvLdrUnlock(pDevExt);
6027 return rc;
6028}
6029
6030
6031/**
6032 * Deregisters a wrapped .r0 module.
6033 *
6034 * @param pDevExt Device globals.
6035 * @param pWrappedModInfo The wrapped module info.
6036 * @param phMod Where to store the module is stored (NIL'ed on
6037 * success).
6038 */
6039int VBOXCALL supdrvLdrDeregisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo, void **phMod)
6040{
6041 PSUPDRVLDRIMAGE pImage;
6042 uint32_t cSleeps;
6043
6044 /*
6045 * Validate input.
6046 */
6047 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
6048 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6049 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6050 VERR_INVALID_MAGIC);
6051 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6052 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6053 VERR_INVALID_MAGIC);
6054
6055 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6056 pImage = *(PSUPDRVLDRIMAGE *)phMod;
6057 if (!pImage)
6058 return VINF_SUCCESS;
6059 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
6060 AssertMsgReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, ("pImage=%p uMagic=%#x\n", pImage, pImage->uMagic),
6061 VERR_INVALID_MAGIC);
6062 AssertMsgReturn(pImage->pvImage == pWrappedModInfo->pvImageStart,
6063 ("pWrappedModInfo(%p)->pvImageStart=%p vs. pImage(=%p)->pvImage=%p\n",
6064 pWrappedModInfo, pWrappedModInfo->pvImageStart, pImage, pImage->pvImage),
6065 VERR_MISMATCH);
6066
6067 AssertPtrReturn(pDevExt, VERR_INVALID_POINTER);
6068
6069 /*
6070 * Try free it, but first we have to wait for its usage count to reach 1 (our).
6071 */
6072 supdrvLdrLock(pDevExt);
6073 for (cSleeps = 0; ; cSleeps++)
6074 {
6075 PSUPDRVLDRIMAGE pCur;
6076
6077 /* Check that the image is in the list. */
6078 for (pCur = pDevExt->pLdrImages; pCur; pCur = pCur->pNext)
6079 if (pCur == pImage)
6080 break;
6081 AssertBreak(pCur == pImage);
6082
6083 /* Anyone still using it? */
6084 if (pImage->cImgUsage <= 1)
6085 break;
6086
6087 /* Someone is using it, wait and check again. */
6088 if (!(cSleeps % 60))
6089 SUPR0Printf("supdrvLdrUnregisterWrappedModule: Still %u users of wrapped image '%s' ...\n",
6090 pImage->cImgUsage, pImage->szName);
6091 supdrvLdrUnlock(pDevExt);
6092 RTThreadSleep(1000);
6093 supdrvLdrLock(pDevExt);
6094 }
6095
6096 /* We're the last 'user', free it. */
6097 supdrvLdrFree(pDevExt, pImage);
6098
6099 supdrvLdrUnlock(pDevExt);
6100
6101 *phMod = NULL;
6102 return VINF_SUCCESS;
6103}
6104
6105
6106/**
6107 * Lock down the image loader interface.
6108 *
6109 * @returns IPRT status code.
6110 * @param pDevExt Device globals.
6111 */
6112static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
6113{
6114 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
6115
6116 supdrvLdrLock(pDevExt);
6117 if (!pDevExt->fLdrLockedDown)
6118 {
6119 pDevExt->fLdrLockedDown = true;
6120 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
6121 }
6122 supdrvLdrUnlock(pDevExt);
6123
6124 return VINF_SUCCESS;
6125}
6126
6127
6128/**
6129 * Worker for getting the address of a symbol in an image.
6130 *
6131 * @returns IPRT status code.
6132 * @param pDevExt Device globals.
6133 * @param pImage The image to search.
6134 * @param pszSymbol The symbol name.
6135 * @param cchSymbol The length of the symbol name.
6136 * @param ppvValue Where to return the symbol
6137 * @note Caller owns the loader lock.
6138 */
6139static int supdrvLdrQuerySymbolWorker(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage,
6140 const char *pszSymbol, size_t cchSymbol, void **ppvValue)
6141{
6142 int rc = VERR_SYMBOL_NOT_FOUND;
6143 if (pImage->fNative && !pImage->pWrappedModInfo)
6144 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cchSymbol, ppvValue);
6145 else if (pImage->fNative && pImage->pWrappedModInfo)
6146 {
6147 PCSUPLDRWRAPMODSYMBOL paSymbols = pImage->pWrappedModInfo->paSymbols;
6148 uint32_t iEnd = pImage->pWrappedModInfo->cSymbols;
6149 uint32_t iStart = 0;
6150 while (iStart < iEnd)
6151 {
6152 uint32_t const i = iStart + (iEnd - iStart) / 2;
6153 int const iDiff = strcmp(paSymbols[i].pszSymbol, pszSymbol);
6154 if (iDiff < 0)
6155 iStart = i + 1;
6156 else if (iDiff > 0)
6157 iEnd = i;
6158 else
6159 {
6160 *ppvValue = (void *)(uintptr_t)paSymbols[i].pfnValue;
6161 rc = VINF_SUCCESS;
6162 break;
6163 }
6164 }
6165#ifdef VBOX_STRICT
6166 if (rc != VINF_SUCCESS)
6167 for (iStart = 0, iEnd = pImage->pWrappedModInfo->cSymbols; iStart < iEnd; iStart++)
6168 Assert(strcmp(paSymbols[iStart].pszSymbol, pszSymbol));
6169#endif
6170 }
6171 else
6172 {
6173 const char *pchStrings = pImage->pachStrTab;
6174 PSUPLDRSYM paSyms = pImage->paSymbols;
6175 uint32_t i;
6176 Assert(!pImage->pWrappedModInfo);
6177 for (i = 0; i < pImage->cSymbols; i++)
6178 {
6179 if ( paSyms[i].offName + cchSymbol + 1 <= pImage->cbStrTab
6180 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cchSymbol + 1))
6181 {
6182 /*
6183 * Note! The int32_t is for native loading on solaris where the data
6184 * and text segments are in very different places.
6185 */
6186 *ppvValue = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
6187 rc = VINF_SUCCESS;
6188 break;
6189 }
6190 }
6191 }
6192 return rc;
6193}
6194
6195
6196/**
6197 * Queries the address of a symbol in an open image.
6198 *
6199 * @returns IPRT status code.
6200 * @param pDevExt Device globals.
6201 * @param pSession Session data.
6202 * @param pReq The request buffer.
6203 */
6204static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
6205{
6206 PSUPDRVLDRIMAGE pImage;
6207 PSUPDRVLDRUSAGE pUsage;
6208 const size_t cchSymbol = strlen(pReq->u.In.szSymbol);
6209 void *pvSymbol = NULL;
6210 int rc;
6211 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
6212
6213 /*
6214 * Find the ldr image.
6215 */
6216 supdrvLdrLock(pDevExt);
6217
6218 pUsage = pSession->pLdrUsage;
6219 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
6220 pUsage = pUsage->pNext;
6221 if (pUsage)
6222 {
6223 pImage = pUsage->pImage;
6224 if (pImage->uState == SUP_IOCTL_LDR_LOAD)
6225 {
6226 /*
6227 * Search the image exports / symbol strings.
6228 */
6229 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pReq->u.In.szSymbol, cchSymbol, &pvSymbol);
6230 }
6231 else
6232 {
6233 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", pImage->uState, pImage->uState));
6234 rc = VERR_WRONG_ORDER;
6235 }
6236 }
6237 else
6238 {
6239 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
6240 rc = VERR_INVALID_HANDLE;
6241 }
6242
6243 supdrvLdrUnlock(pDevExt);
6244
6245 pReq->u.Out.pvSymbol = pvSymbol;
6246 return rc;
6247}
6248
6249
6250/**
6251 * Gets the address of a symbol in an open image or the support driver.
6252 *
6253 * @returns VBox status code.
6254 * @param pDevExt Device globals.
6255 * @param pSession Session data.
6256 * @param pReq The request buffer.
6257 */
6258static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
6259{
6260 const char *pszSymbol = pReq->u.In.pszSymbol;
6261 const char *pszModule = pReq->u.In.pszModule;
6262 size_t cchSymbol;
6263 char const *pszEnd;
6264 uint32_t i;
6265 int rc;
6266
6267 /*
6268 * Input validation.
6269 */
6270 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
6271 pszEnd = RTStrEnd(pszSymbol, 512);
6272 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6273 cchSymbol = pszEnd - pszSymbol;
6274
6275 if (pszModule)
6276 {
6277 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
6278 pszEnd = RTStrEnd(pszModule, 64);
6279 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6280 }
6281 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
6282
6283 if ( !pszModule
6284 || !strcmp(pszModule, "SupDrv"))
6285 {
6286 /*
6287 * Search the support driver export table.
6288 */
6289 rc = VERR_SYMBOL_NOT_FOUND;
6290 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6291 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6292 {
6293 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
6294 rc = VINF_SUCCESS;
6295 break;
6296 }
6297 }
6298 else
6299 {
6300 /*
6301 * Find the loader image.
6302 */
6303 PSUPDRVLDRIMAGE pImage;
6304
6305 supdrvLdrLock(pDevExt);
6306
6307 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6308 if (!strcmp(pImage->szName, pszModule))
6309 break;
6310 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
6311 {
6312 /*
6313 * Search the image exports / symbol strings. Do usage counting on the session.
6314 */
6315 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pszSymbol, cchSymbol, (void **)&pReq->u.Out.pfnSymbol);
6316 if (RT_SUCCESS(rc))
6317 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
6318 }
6319 else
6320 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
6321
6322 supdrvLdrUnlock(pDevExt);
6323 }
6324 return rc;
6325}
6326
6327
6328/**
6329 * Looks up a symbol in g_aFunctions
6330 *
6331 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
6332 * @param pszSymbol The symbol to look up.
6333 * @param puValue Where to return the value.
6334 */
6335int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
6336{
6337 uint32_t i;
6338 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6339 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6340 {
6341 *puValue = (uintptr_t)g_aFunctions[i].pfn;
6342 return VINF_SUCCESS;
6343 }
6344
6345 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
6346 {
6347 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
6348 return VINF_SUCCESS;
6349 }
6350
6351 return VERR_SYMBOL_NOT_FOUND;
6352}
6353
6354
6355/**
6356 * Adds a usage reference in the specified session of an image.
6357 *
6358 * Called while owning the loader semaphore.
6359 *
6360 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
6361 * @param pDevExt Pointer to device extension.
6362 * @param pSession Session in question.
6363 * @param pImage Image which the session is using.
6364 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
6365 */
6366static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
6367{
6368 PSUPDRVLDRUSAGE pUsage;
6369 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
6370
6371 /*
6372 * Referenced it already?
6373 */
6374 pUsage = pSession->pLdrUsage;
6375 while (pUsage)
6376 {
6377 if (pUsage->pImage == pImage)
6378 {
6379 if (fRing3Usage)
6380 pUsage->cRing3Usage++;
6381 else
6382 pUsage->cRing0Usage++;
6383 Assert(pImage->cImgUsage > 1 || !pImage->pWrappedModInfo);
6384 pImage->cImgUsage++;
6385 return VINF_SUCCESS;
6386 }
6387 pUsage = pUsage->pNext;
6388 }
6389
6390 /*
6391 * Allocate new usage record.
6392 */
6393 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
6394 AssertReturn(pUsage, VERR_NO_MEMORY);
6395 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
6396 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
6397 pUsage->pImage = pImage;
6398 pUsage->pNext = pSession->pLdrUsage;
6399 pSession->pLdrUsage = pUsage;
6400
6401 /*
6402 * Wrapped modules needs to retain a native module reference.
6403 */
6404 pImage->cImgUsage++;
6405 if (pImage->cImgUsage == 2 && pImage->pWrappedModInfo)
6406 supdrvOSLdrRetainWrapperModule(pDevExt, pImage);
6407
6408 return VINF_SUCCESS;
6409}
6410
6411
6412/**
6413 * Frees a load image.
6414 *
6415 * @param pDevExt Pointer to device extension.
6416 * @param pImage Pointer to the image we're gonna free.
6417 * This image must exit!
6418 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
6419 */
6420static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
6421{
6422 unsigned cLoops;
6423 for (cLoops = 0; ; cLoops++)
6424 {
6425 PSUPDRVLDRIMAGE pImagePrev;
6426 PSUPDRVLDRIMAGE pImageImport;
6427 LogFlow(("supdrvLdrFree: pImage=%p %s [loop %u]\n", pImage, pImage->szName, cLoops));
6428 AssertBreak(cLoops < 2);
6429
6430 /*
6431 * Warn if we're releasing images while the image loader interface is
6432 * locked down -- we won't be able to reload them!
6433 */
6434 if (pDevExt->fLdrLockedDown)
6435 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6436
6437 /* find it - arg. should've used doubly linked list. */
6438 Assert(pDevExt->pLdrImages);
6439 pImagePrev = NULL;
6440 if (pDevExt->pLdrImages != pImage)
6441 {
6442 pImagePrev = pDevExt->pLdrImages;
6443 while (pImagePrev->pNext != pImage)
6444 pImagePrev = pImagePrev->pNext;
6445 Assert(pImagePrev->pNext == pImage);
6446 }
6447
6448 /* unlink */
6449 if (pImagePrev)
6450 pImagePrev->pNext = pImage->pNext;
6451 else
6452 pDevExt->pLdrImages = pImage->pNext;
6453
6454 /* check if this is VMMR0.r0 unset its entry point pointers. */
6455 if (pDevExt->pvVMMR0 == pImage->pvImage)
6456 {
6457 pDevExt->pvVMMR0 = NULL;
6458 pDevExt->pfnVMMR0EntryFast = NULL;
6459 pDevExt->pfnVMMR0EntryEx = NULL;
6460 }
6461
6462 /* check for objects with destructors in this image. (Shouldn't happen.) */
6463 if (pDevExt->pObjs)
6464 {
6465 unsigned cObjs = 0;
6466 PSUPDRVOBJ pObj;
6467 RTSpinlockAcquire(pDevExt->Spinlock);
6468 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6469 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6470 {
6471 pObj->pfnDestructor = NULL;
6472 cObjs++;
6473 }
6474 RTSpinlockRelease(pDevExt->Spinlock);
6475 if (cObjs)
6476 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6477 }
6478
6479 /* call termination function if fully loaded. */
6480 if ( pImage->pfnModuleTerm
6481 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6482 {
6483 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6484 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6485 pImage->pfnModuleTerm(pImage);
6486 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6487 }
6488
6489 /* Inform the tracing component. */
6490 supdrvTracerModuleUnloading(pDevExt, pImage);
6491
6492 /* Do native unload if appropriate, then inform the native code about the
6493 unloading (mainly for non-native loading case). */
6494 if (pImage->fNative)
6495 supdrvOSLdrUnload(pDevExt, pImage);
6496 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6497
6498 /* free the image */
6499 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6500 pImage->cImgUsage = 0;
6501 pImage->pDevExt = NULL;
6502 pImage->pNext = NULL;
6503 pImage->uState = SUP_IOCTL_LDR_FREE;
6504#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
6505 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6506 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6507#else
6508 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
6509 pImage->pvImageAlloc = NULL;
6510#endif
6511 pImage->pvImage = NULL;
6512 RTMemFree(pImage->pachStrTab);
6513 pImage->pachStrTab = NULL;
6514 RTMemFree(pImage->paSymbols);
6515 pImage->paSymbols = NULL;
6516 RTMemFree(pImage->paSegments);
6517 pImage->paSegments = NULL;
6518
6519 pImageImport = pImage->pImageImport;
6520 pImage->pImageImport = NULL;
6521
6522 RTMemFree(pImage);
6523
6524 /*
6525 * Deal with any import image.
6526 */
6527 if (!pImageImport)
6528 break;
6529 if (pImageImport->cImgUsage > 1)
6530 {
6531 supdrvLdrSubtractUsage(pDevExt, pImageImport, 1);
6532 break;
6533 }
6534 pImage = pImageImport;
6535 }
6536}
6537
6538
6539/**
6540 * Acquires the loader lock.
6541 *
6542 * @returns IPRT status code.
6543 * @param pDevExt The device extension.
6544 * @note Not recursive on all platforms yet.
6545 */
6546DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6547{
6548#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6549 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6550#else
6551 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6552#endif
6553 AssertRC(rc);
6554 return rc;
6555}
6556
6557
6558/**
6559 * Releases the loader lock.
6560 *
6561 * @returns IPRT status code.
6562 * @param pDevExt The device extension.
6563 */
6564DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6565{
6566#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6567 return RTSemMutexRelease(pDevExt->mtxLdr);
6568#else
6569 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6570#endif
6571}
6572
6573
6574/**
6575 * Acquires the global loader lock.
6576 *
6577 * This can be useful when accessing structures being modified by the ModuleInit
6578 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6579 *
6580 * @returns VBox status code.
6581 * @param pSession The session doing the locking.
6582 *
6583 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6584 */
6585SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6586{
6587 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6588 return supdrvLdrLock(pSession->pDevExt);
6589}
6590SUPR0_EXPORT_SYMBOL(SUPR0LdrLock);
6591
6592
6593/**
6594 * Releases the global loader lock.
6595 *
6596 * Must correspond to a SUPR0LdrLock call!
6597 *
6598 * @returns VBox status code.
6599 * @param pSession The session doing the locking.
6600 *
6601 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6602 */
6603SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6604{
6605 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6606 return supdrvLdrUnlock(pSession->pDevExt);
6607}
6608SUPR0_EXPORT_SYMBOL(SUPR0LdrUnlock);
6609
6610
6611/**
6612 * For checking lock ownership in Assert() statements during ModuleInit and
6613 * ModuleTerm.
6614 *
6615 * @returns Whether we own the loader lock or not.
6616 * @param hMod The module in question.
6617 * @param fWantToHear For hosts where it is difficult to know who owns the
6618 * lock, this will be returned instead.
6619 */
6620SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6621{
6622 PSUPDRVDEVEXT pDevExt;
6623 RTNATIVETHREAD hOwner;
6624
6625 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6626 AssertPtrReturn(pImage, fWantToHear);
6627 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6628
6629 pDevExt = pImage->pDevExt;
6630 AssertPtrReturn(pDevExt, fWantToHear);
6631
6632 /*
6633 * Expecting this to be called at init/term time only, so this will be sufficient.
6634 */
6635 hOwner = pDevExt->hLdrInitThread;
6636 if (hOwner == NIL_RTNATIVETHREAD)
6637 hOwner = pDevExt->hLdrTermThread;
6638 if (hOwner != NIL_RTNATIVETHREAD)
6639 return hOwner == RTThreadNativeSelf();
6640
6641 /*
6642 * Neither of the two semaphore variants currently offers very good
6643 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6644 */
6645#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6646 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6647#else
6648 return fWantToHear;
6649#endif
6650}
6651SUPR0_EXPORT_SYMBOL(SUPR0LdrIsLockOwnerByMod);
6652
6653
6654/**
6655 * Locates and retains the given module for ring-0 usage.
6656 *
6657 * @returns VBox status code.
6658 * @param pSession The session to associate the module reference with.
6659 * @param pszName The module name (no path).
6660 * @param phMod Where to return the module handle. The module is
6661 * referenced and a call to SUPR0LdrModRelease() is
6662 * necessary when done with it.
6663 */
6664SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6665{
6666 int rc;
6667 size_t cchName;
6668 PSUPDRVDEVEXT pDevExt;
6669
6670 /*
6671 * Validate input.
6672 */
6673 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6674 *phMod = NULL;
6675 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6676 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6677 cchName = strlen(pszName);
6678 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6679 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6680
6681 /*
6682 * Do the lookup.
6683 */
6684 pDevExt = pSession->pDevExt;
6685 rc = supdrvLdrLock(pDevExt);
6686 if (RT_SUCCESS(rc))
6687 {
6688 PSUPDRVLDRIMAGE pImage;
6689 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6690 {
6691 if ( pImage->szName[cchName] == '\0'
6692 && !memcmp(pImage->szName, pszName, cchName))
6693 {
6694 /*
6695 * Check the state and make sure we don't overflow the reference counter before return it.
6696 */
6697 uint32_t uState = pImage->uState;
6698 if (uState == SUP_IOCTL_LDR_LOAD)
6699 {
6700 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6701 {
6702 supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6703 *phMod = pImage;
6704 supdrvLdrUnlock(pDevExt);
6705 return VINF_SUCCESS;
6706 }
6707 supdrvLdrUnlock(pDevExt);
6708 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6709 return VERR_TOO_MANY_REFERENCES;
6710 }
6711 supdrvLdrUnlock(pDevExt);
6712 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6713 return VERR_INVALID_STATE;
6714 }
6715 }
6716 supdrvLdrUnlock(pDevExt);
6717 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6718 rc = VERR_MODULE_NOT_FOUND;
6719 }
6720 return rc;
6721}
6722SUPR0_EXPORT_SYMBOL(SUPR0LdrModByName);
6723
6724
6725/**
6726 * Retains a ring-0 module reference.
6727 *
6728 * Release reference when done by calling SUPR0LdrModRelease().
6729 *
6730 * @returns VBox status code.
6731 * @param pSession The session to reference the module in. A usage
6732 * record is added if needed.
6733 * @param hMod The handle to the module to retain.
6734 */
6735SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6736{
6737 PSUPDRVDEVEXT pDevExt;
6738 PSUPDRVLDRIMAGE pImage;
6739 int rc;
6740
6741 /* Validate input a little. */
6742 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6743 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6744 pImage = (PSUPDRVLDRIMAGE)hMod;
6745 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6746
6747 /* Reference the module: */
6748 pDevExt = pSession->pDevExt;
6749 rc = supdrvLdrLock(pDevExt);
6750 if (RT_SUCCESS(rc))
6751 {
6752 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6753 {
6754 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6755 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6756 else
6757 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6758 }
6759 else
6760 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6761 supdrvLdrUnlock(pDevExt);
6762 }
6763 return rc;
6764}
6765SUPR0_EXPORT_SYMBOL(SUPR0LdrModRetain);
6766
6767
6768/**
6769 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6770 * SUPR0LdrModRetain().
6771 *
6772 * @returns VBox status code.
6773 * @param pSession The session that the module was retained in.
6774 * @param hMod The module handle. NULL is silently ignored.
6775 */
6776SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6777{
6778 PSUPDRVDEVEXT pDevExt;
6779 PSUPDRVLDRIMAGE pImage;
6780 int rc;
6781
6782 /*
6783 * Validate input.
6784 */
6785 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6786 if (!hMod)
6787 return VINF_SUCCESS;
6788 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6789 pImage = (PSUPDRVLDRIMAGE)hMod;
6790 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6791
6792 /*
6793 * Take the loader lock and revalidate the module:
6794 */
6795 pDevExt = pSession->pDevExt;
6796 rc = supdrvLdrLock(pDevExt);
6797 if (RT_SUCCESS(rc))
6798 {
6799 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6800 {
6801 /*
6802 * Find the usage record for the module:
6803 */
6804 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6805 PSUPDRVLDRUSAGE pUsage;
6806
6807 rc = VERR_MODULE_NOT_FOUND;
6808 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6809 {
6810 if (pUsage->pImage == pImage)
6811 {
6812 /*
6813 * Drop a ring-0 reference:
6814 */
6815 Assert(pImage->cImgUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6816 if (pUsage->cRing0Usage > 0)
6817 {
6818 if (pImage->cImgUsage > 1)
6819 {
6820 pUsage->cRing0Usage -= 1;
6821 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6822 rc = VINF_SUCCESS;
6823 }
6824 else
6825 {
6826 Assert(!pImage->pWrappedModInfo /* (The wrapper kmod has the last reference.) */);
6827 supdrvLdrFree(pDevExt, pImage);
6828
6829 if (pPrevUsage)
6830 pPrevUsage->pNext = pUsage->pNext;
6831 else
6832 pSession->pLdrUsage = pUsage->pNext;
6833 pUsage->pNext = NULL;
6834 pUsage->pImage = NULL;
6835 pUsage->cRing0Usage = 0;
6836 pUsage->cRing3Usage = 0;
6837 RTMemFree(pUsage);
6838
6839 rc = VINF_OBJECT_DESTROYED;
6840 }
6841 }
6842 else
6843 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6844 break;
6845 }
6846 pPrevUsage = pUsage;
6847 }
6848 }
6849 else
6850 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6851 supdrvLdrUnlock(pDevExt);
6852 }
6853 return rc;
6854
6855}
6856SUPR0_EXPORT_SYMBOL(SUPR0LdrModRelease);
6857
6858
6859/**
6860 * Implements the service call request.
6861 *
6862 * @returns VBox status code.
6863 * @param pDevExt The device extension.
6864 * @param pSession The calling session.
6865 * @param pReq The request packet, valid.
6866 */
6867static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6868{
6869#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
6870 int rc;
6871
6872 /*
6873 * Find the module first in the module referenced by the calling session.
6874 */
6875 rc = supdrvLdrLock(pDevExt);
6876 if (RT_SUCCESS(rc))
6877 {
6878 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
6879 PSUPDRVLDRUSAGE pUsage;
6880
6881 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6882 if ( pUsage->pImage->pfnServiceReqHandler
6883 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
6884 {
6885 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
6886 break;
6887 }
6888 supdrvLdrUnlock(pDevExt);
6889
6890 if (pfnServiceReqHandler)
6891 {
6892 /*
6893 * Call it.
6894 */
6895 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
6896 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
6897 else
6898 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
6899 }
6900 else
6901 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
6902 }
6903
6904 /* log it */
6905 if ( RT_FAILURE(rc)
6906 && rc != VERR_INTERRUPTED
6907 && rc != VERR_TIMEOUT)
6908 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6909 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6910 else
6911 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6912 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6913 return rc;
6914#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6915 RT_NOREF3(pDevExt, pSession, pReq);
6916 return VERR_NOT_IMPLEMENTED;
6917#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6918}
6919
6920
6921/**
6922 * Implements the logger settings request.
6923 *
6924 * @returns VBox status code.
6925 * @param pReq The request.
6926 */
6927static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
6928{
6929 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
6930 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
6931 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
6932 PRTLOGGER pLogger = NULL;
6933 int rc;
6934
6935 /*
6936 * Some further validation.
6937 */
6938 switch (pReq->u.In.fWhat)
6939 {
6940 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6941 case SUPLOGGERSETTINGS_WHAT_CREATE:
6942 break;
6943
6944 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6945 if (*pszGroup || *pszFlags || *pszDest)
6946 return VERR_INVALID_PARAMETER;
6947 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
6948 return VERR_ACCESS_DENIED;
6949 break;
6950
6951 default:
6952 return VERR_INTERNAL_ERROR;
6953 }
6954
6955 /*
6956 * Get the logger.
6957 */
6958 switch (pReq->u.In.fWhich)
6959 {
6960 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6961 pLogger = RTLogGetDefaultInstance();
6962 break;
6963
6964 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6965 pLogger = RTLogRelGetDefaultInstance();
6966 break;
6967
6968 default:
6969 return VERR_INTERNAL_ERROR;
6970 }
6971
6972 /*
6973 * Do the job.
6974 */
6975 switch (pReq->u.In.fWhat)
6976 {
6977 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6978 if (pLogger)
6979 {
6980 rc = RTLogFlags(pLogger, pszFlags);
6981 if (RT_SUCCESS(rc))
6982 rc = RTLogGroupSettings(pLogger, pszGroup);
6983 NOREF(pszDest);
6984 }
6985 else
6986 rc = VERR_NOT_FOUND;
6987 break;
6988
6989 case SUPLOGGERSETTINGS_WHAT_CREATE:
6990 {
6991 if (pLogger)
6992 rc = VERR_ALREADY_EXISTS;
6993 else
6994 {
6995 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
6996
6997 rc = RTLogCreate(&pLogger,
6998 0 /* fFlags */,
6999 pszGroup,
7000 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
7001 ? "VBOX_LOG"
7002 : "VBOX_RELEASE_LOG",
7003 RT_ELEMENTS(s_apszGroups),
7004 s_apszGroups,
7005 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
7006 NULL);
7007 if (RT_SUCCESS(rc))
7008 {
7009 rc = RTLogFlags(pLogger, pszFlags);
7010 NOREF(pszDest);
7011 if (RT_SUCCESS(rc))
7012 {
7013 switch (pReq->u.In.fWhich)
7014 {
7015 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7016 pLogger = RTLogSetDefaultInstance(pLogger);
7017 break;
7018 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7019 pLogger = RTLogRelSetDefaultInstance(pLogger);
7020 break;
7021 }
7022 }
7023 RTLogDestroy(pLogger);
7024 }
7025 }
7026 break;
7027 }
7028
7029 case SUPLOGGERSETTINGS_WHAT_DESTROY:
7030 switch (pReq->u.In.fWhich)
7031 {
7032 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7033 pLogger = RTLogSetDefaultInstance(NULL);
7034 break;
7035 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7036 pLogger = RTLogRelSetDefaultInstance(NULL);
7037 break;
7038 }
7039 rc = RTLogDestroy(pLogger);
7040 break;
7041
7042 default:
7043 {
7044 rc = VERR_INTERNAL_ERROR;
7045 break;
7046 }
7047 }
7048
7049 return rc;
7050}
7051
7052
7053/**
7054 * Implements the MSR prober operations.
7055 *
7056 * @returns VBox status code.
7057 * @param pDevExt The device extension.
7058 * @param pReq The request.
7059 */
7060static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
7061{
7062#ifdef SUPDRV_WITH_MSR_PROBER
7063 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
7064 int rc;
7065
7066 switch (pReq->u.In.enmOp)
7067 {
7068 case SUPMSRPROBEROP_READ:
7069 {
7070 uint64_t uValue;
7071 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
7072 if (RT_SUCCESS(rc))
7073 {
7074 pReq->u.Out.uResults.Read.uValue = uValue;
7075 pReq->u.Out.uResults.Read.fGp = false;
7076 }
7077 else if (rc == VERR_ACCESS_DENIED)
7078 {
7079 pReq->u.Out.uResults.Read.uValue = 0;
7080 pReq->u.Out.uResults.Read.fGp = true;
7081 rc = VINF_SUCCESS;
7082 }
7083 break;
7084 }
7085
7086 case SUPMSRPROBEROP_WRITE:
7087 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
7088 if (RT_SUCCESS(rc))
7089 pReq->u.Out.uResults.Write.fGp = false;
7090 else if (rc == VERR_ACCESS_DENIED)
7091 {
7092 pReq->u.Out.uResults.Write.fGp = true;
7093 rc = VINF_SUCCESS;
7094 }
7095 break;
7096
7097 case SUPMSRPROBEROP_MODIFY:
7098 case SUPMSRPROBEROP_MODIFY_FASTER:
7099 rc = supdrvOSMsrProberModify(idCpu, pReq);
7100 break;
7101
7102 default:
7103 return VERR_INVALID_FUNCTION;
7104 }
7105 RT_NOREF1(pDevExt);
7106 return rc;
7107#else
7108 RT_NOREF2(pDevExt, pReq);
7109 return VERR_NOT_IMPLEMENTED;
7110#endif
7111}
7112
7113
7114/**
7115 * Resume built-in keyboard on MacBook Air and Pro hosts.
7116 * If there is no built-in keyboard device, return success anyway.
7117 *
7118 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
7119 */
7120static int supdrvIOCtl_ResumeSuspendedKbds(void)
7121{
7122#if defined(RT_OS_DARWIN)
7123 return supdrvDarwinResumeSuspendedKbds();
7124#else
7125 return VERR_NOT_IMPLEMENTED;
7126#endif
7127}
7128
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette