VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 106625

Last change on this file since 106625 was 106625, checked in by vboxsync, 5 weeks ago

SUPDrv: Making it build on win.arm64... jiraref:VBP-1253

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 268.7 KB
Line 
1/* $Id: SUPDrv.cpp 106625 2024-10-23 15:45:04Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define LOG_GROUP LOG_GROUP_SUP_DRV
42#define SUPDRV_AGNOSTIC
43#include "SUPDrvInternal.h"
44#ifndef PAGE_SHIFT
45# include <iprt/param.h>
46#endif
47#include <iprt/asm.h>
48#include <iprt/asm-math.h>
49#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
50# include <iprt/asm-amd64-x86.h>
51#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
52# include <iprt/asm-arm.h>
53#else
54# error "Port me!"
55#endif
56#include <iprt/cpuset.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
58# include <iprt/dbg.h>
59#endif
60#include <iprt/handletable.h>
61#include <iprt/mem.h>
62#include <iprt/mp.h>
63#include <iprt/power.h>
64#include <iprt/process.h>
65#include <iprt/semaphore.h>
66#include <iprt/spinlock.h>
67#include <iprt/thread.h>
68#include <iprt/uuid.h>
69#include <iprt/net.h>
70#include <iprt/crc.h>
71#include <iprt/string.h>
72#include <iprt/timer.h>
73#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
74# include <iprt/rand.h>
75# include <iprt/path.h>
76#endif
77#include <iprt/uint128.h>
78#include <iprt/x86.h>
79
80#include <VBox/param.h>
81#include <VBox/log.h>
82#include <VBox/err.h>
83#include <VBox/vmm/hm_vmx.h>
84
85#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
86# include "dtrace/SUPDrv.h"
87#else
88# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
89# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
90# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
91# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
92#endif
93
94#ifdef __cplusplus
95# if __cplusplus >= 201100 || RT_MSC_PREREQ(RT_MSC_VER_VS2019)
96# define SUPDRV_CAN_COUNT_FUNCTION_ARGS
97# ifdef _MSC_VER
98# pragma warning(push)
99# pragma warning(disable:4577)
100# include <type_traits>
101# pragma warning(pop)
102
103# elif defined(RT_OS_DARWIN)
104# define _LIBCPP_CSTDDEF
105# include <__nullptr>
106# include <type_traits>
107
108# else
109# include <type_traits>
110# endif
111# endif
112#endif
113
114
115/*
116 * Logging assignments:
117 * Log - useful stuff, like failures.
118 * LogFlow - program flow, except the really noisy bits.
119 * Log2 - Cleanup.
120 * Log3 - Loader flow noise.
121 * Log4 - Call VMMR0 flow noise.
122 * Log5 - Native yet-to-be-defined noise.
123 * Log6 - Native ioctl flow noise.
124 *
125 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
126 * instantiation in log-vbox.c(pp).
127 */
128
129
130/*********************************************************************************************************************************
131* Defined Constants And Macros *
132*********************************************************************************************************************************/
133/** @def VBOX_SVN_REV
134 * The makefile should define this if it can. */
135#ifndef VBOX_SVN_REV
136# define VBOX_SVN_REV 0
137#endif
138
139/** @ SUPDRV_CHECK_SMAP_SETUP
140 * SMAP check setup. */
141/** @def SUPDRV_CHECK_SMAP_CHECK
142 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
143 * will be logged and @a a_BadExpr is executed. */
144#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
145# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
146# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
147 do { \
148 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
149 { \
150 RTCCUINTREG fEfl = ASMGetFlags(); \
151 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
152 { /* likely */ } \
153 else \
154 { \
155 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
156 a_BadExpr; \
157 } \
158 } \
159 } while (0)
160#else
161# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
162# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
163#endif
164
165
166/*********************************************************************************************************************************
167* Internal Functions *
168*********************************************************************************************************************************/
169static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
170static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
171static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
172static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
173static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
174static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
175static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
176static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
177static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
178static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
179static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
180DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference);
181static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
182DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
183DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
184static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
185static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
186static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
187static int supdrvIOCtl_ResumeSuspendedKbds(void);
188
189
190/*********************************************************************************************************************************
191* Global Variables *
192*********************************************************************************************************************************/
193/** @def SUPEXP_CHECK_ARGS
194 * This is for checking the argument count of the function in the entry,
195 * just to make sure we don't accidentally export something the wrapper
196 * can't deal with.
197 *
198 * Using some C++11 magic to do the counting.
199 *
200 * The error is reported by overflowing the SUPFUNC::cArgs field, so the
201 * warnings can probably be a little mysterious.
202 *
203 * @note Doesn't work for CLANG 11. Works for Visual C++, unless there
204 * are function pointers in the argument list.
205 */
206#if defined(SUPDRV_CAN_COUNT_FUNCTION_ARGS) && RT_CLANG_PREREQ(99, 0)
207template <typename RetType, typename ... Types>
208constexpr std::integral_constant<unsigned, sizeof ...(Types)>
209CountFunctionArguments(RetType(RTCALL *)(Types ...))
210{
211 return std::integral_constant<unsigned, sizeof ...(Types)>{};
212}
213# define SUPEXP_CHECK_ARGS(a_cArgs, a_Name) \
214 ((a_cArgs) >= decltype(CountFunctionArguments(a_Name))::value ? (uint8_t)(a_cArgs) : 1023)
215
216#else
217# define SUPEXP_CHECK_ARGS(a_cArgs, a_Name) a_cArgs
218#endif
219
220/** @name Function table entry macros.
221 * @note The SUPEXP_STK_BACKF macro is because VC++ has trouble with functions
222 * with function pointer arguments (probably noexcept related).
223 * @{ */
224#define SUPEXP_CUSTOM(a_cArgs, a_Name, a_Value) { #a_Name, a_cArgs, (void *)(uintptr_t)(a_Value) }
225#define SUPEXP_STK_OKAY(a_cArgs, a_Name) { #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
226#if 0
227# define SUPEXP_STK_BACK(a_cArgs, a_Name) { "StkBack_" #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
228# define SUPEXP_STK_BACKF(a_cArgs, a_Name) { "StkBack_" #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
229#else
230# define SUPEXP_STK_BACK(a_cArgs, a_Name) { #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
231# ifdef _MSC_VER
232# define SUPEXP_STK_BACKF(a_cArgs, a_Name) { #a_Name, a_cArgs, (void *)(uintptr_t)a_Name }
233# else
234# define SUPEXP_STK_BACKF(a_cArgs, a_Name) { #a_Name, SUPEXP_CHECK_ARGS(a_cArgs, a_Name), (void *)(uintptr_t)a_Name }
235# endif
236#endif
237/** @} */
238
239/**
240 * Array of the R0 SUP API.
241 *
242 * While making changes to these exports, make sure to update the IOC
243 * minor version (SUPDRV_IOC_VERSION).
244 *
245 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
246 * produce definition files from which import libraries are generated.
247 * Take care when commenting things and especially with \#ifdef'ing.
248 */
249static SUPFUNC g_aFunctions[] =
250{
251/* SED: START */
252 /* name function */
253 /* Entries with absolute addresses determined at runtime, fixup
254 code makes ugly ASSUMPTIONS about the order here: */
255 SUPEXP_CUSTOM( 0, SUPR0AbsIs64bit, 0), /* only-amd64, only-x86 */
256 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelCS, 0), /* only-amd64, only-x86 */
257 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelSS, 0), /* only-amd64, only-x86 */
258 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelDS, 0), /* only-amd64, only-x86 */
259 SUPEXP_CUSTOM( 0, SUPR0AbsKernelCS, 0), /* only-amd64, only-x86 */
260 SUPEXP_CUSTOM( 0, SUPR0AbsKernelSS, 0), /* only-amd64, only-x86 */
261 SUPEXP_CUSTOM( 0, SUPR0AbsKernelDS, 0), /* only-amd64, only-x86 */
262 SUPEXP_CUSTOM( 0, SUPR0AbsKernelES, 0), /* only-amd64, only-x86 */
263 SUPEXP_CUSTOM( 0, SUPR0AbsKernelFS, 0), /* only-amd64, only-x86 */
264 SUPEXP_CUSTOM( 0, SUPR0AbsKernelGS, 0), /* only-amd64, only-x86 */
265 /* Normal function & data pointers: */
266 SUPEXP_CUSTOM( 0, g_pSUPGlobalInfoPage, &g_pSUPGlobalInfoPage), /* SED: DATA */
267 SUPEXP_STK_OKAY( 0, SUPGetGIP),
268 SUPEXP_STK_BACK( 1, SUPReadTscWithDelta),
269 SUPEXP_STK_BACK( 1, SUPGetTscDeltaSlow),
270 SUPEXP_STK_BACK( 1, SUPGetCpuHzFromGipForAsyncMode),
271 SUPEXP_STK_OKAY( 3, SUPIsTscFreqCompatible),
272 SUPEXP_STK_OKAY( 3, SUPIsTscFreqCompatibleEx),
273 SUPEXP_STK_BACK( 4, SUPR0BadContext),
274 SUPEXP_STK_BACK( 2, SUPR0ComponentDeregisterFactory),
275 SUPEXP_STK_BACK( 4, SUPR0ComponentQueryFactory),
276 SUPEXP_STK_BACK( 2, SUPR0ComponentRegisterFactory),
277 SUPEXP_STK_BACK( 5, SUPR0ContAlloc),
278 SUPEXP_STK_BACK( 2, SUPR0ContFree),
279 SUPEXP_STK_BACK( 2, SUPR0ChangeCR4), /* only-amd64, only-x86 */
280 SUPEXP_STK_BACK( 1, SUPR0EnableVTx), /* only-amd64, only-x86 */
281 SUPEXP_STK_OKAY( 1, SUPR0FpuBegin),
282 SUPEXP_STK_OKAY( 1, SUPR0FpuEnd),
283 SUPEXP_STK_BACK( 0, SUPR0SuspendVTxOnCpu), /* only-amd64, only-x86 */
284 SUPEXP_STK_BACK( 1, SUPR0ResumeVTxOnCpu), /* only-amd64, only-x86 */
285 SUPEXP_STK_OKAY( 1, SUPR0GetCurrentGdtRw), /* only-amd64, only-x86 */
286 SUPEXP_STK_OKAY( 0, SUPR0GetKernelFeatures),
287 SUPEXP_STK_BACK( 3, SUPR0GetHwvirtMsrs), /* only-amd64, only-x86 */
288 SUPEXP_STK_BACK( 0, SUPR0GetPagingMode),
289 SUPEXP_STK_BACK( 1, SUPR0GetSvmUsability), /* only-amd64, only-x86 */
290 SUPEXP_STK_BACK( 1, SUPR0GetVTSupport), /* only-amd64, only-x86 */
291 SUPEXP_STK_BACK( 1, SUPR0GetVmxUsability), /* only-amd64, only-x86 */
292 SUPEXP_STK_BACK( 2, SUPR0LdrIsLockOwnerByMod),
293 SUPEXP_STK_BACK( 1, SUPR0LdrLock),
294 SUPEXP_STK_BACK( 1, SUPR0LdrUnlock),
295 SUPEXP_STK_BACK( 3, SUPR0LdrModByName),
296 SUPEXP_STK_BACK( 2, SUPR0LdrModRelease),
297 SUPEXP_STK_BACK( 2, SUPR0LdrModRetain),
298 SUPEXP_STK_BACK( 4, SUPR0LockMem),
299 SUPEXP_STK_BACK( 5, SUPR0LowAlloc),
300 SUPEXP_STK_BACK( 2, SUPR0LowFree),
301 SUPEXP_STK_BACK( 4, SUPR0MemAlloc),
302 SUPEXP_STK_BACK( 2, SUPR0MemFree),
303 SUPEXP_STK_BACK( 3, SUPR0MemGetPhys),
304 SUPEXP_STK_BACK( 2, SUPR0ObjAddRef),
305 SUPEXP_STK_BACK( 3, SUPR0ObjAddRefEx),
306 SUPEXP_STK_BACKF( 5, SUPR0ObjRegister),
307 SUPEXP_STK_BACK( 2, SUPR0ObjRelease),
308 SUPEXP_STK_BACK( 3, SUPR0ObjVerifyAccess),
309 SUPEXP_STK_BACK( 6, SUPR0PageAllocEx),
310 SUPEXP_STK_BACK( 2, SUPR0PageFree),
311 SUPEXP_STK_BACK( 6, SUPR0PageMapKernel),
312 SUPEXP_STK_BACK( 6, SUPR0PageProtect),
313#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
314 SUPEXP_STK_OKAY( 2, SUPR0HCPhysToVirt), /* only-linux, only-solaris, only-freebsd */
315#endif
316 SUPEXP_STK_BACK( 2, SUPR0PrintfV),
317 SUPEXP_STK_BACK( 1, SUPR0GetSessionGVM),
318 SUPEXP_STK_BACK( 1, SUPR0GetSessionVM),
319 SUPEXP_STK_BACK( 3, SUPR0SetSessionVM),
320 SUPEXP_STK_BACK( 1, SUPR0GetSessionUid),
321 SUPEXP_STK_BACK( 6, SUPR0TscDeltaMeasureBySetIndex),
322 SUPEXP_STK_BACK( 1, SUPR0TracerDeregisterDrv),
323 SUPEXP_STK_BACK( 2, SUPR0TracerDeregisterImpl),
324 SUPEXP_STK_BACK( 6, SUPR0TracerFireProbe),
325 SUPEXP_STK_BACK( 3, SUPR0TracerRegisterDrv),
326 SUPEXP_STK_BACK( 4, SUPR0TracerRegisterImpl),
327 SUPEXP_STK_BACK( 2, SUPR0TracerRegisterModule),
328 SUPEXP_STK_BACK( 2, SUPR0TracerUmodProbeFire),
329 SUPEXP_STK_BACK( 2, SUPR0UnlockMem),
330#ifdef RT_OS_WINDOWS
331 SUPEXP_STK_BACK( 4, SUPR0IoCtlSetupForHandle), /* only-windows */
332 SUPEXP_STK_BACK( 9, SUPR0IoCtlPerform), /* only-windows */
333 SUPEXP_STK_BACK( 1, SUPR0IoCtlCleanup), /* only-windows */
334#endif
335 SUPEXP_STK_BACK( 2, SUPSemEventClose),
336 SUPEXP_STK_BACK( 2, SUPSemEventCreate),
337 SUPEXP_STK_BACK( 1, SUPSemEventGetResolution),
338 SUPEXP_STK_BACK( 2, SUPSemEventMultiClose),
339 SUPEXP_STK_BACK( 2, SUPSemEventMultiCreate),
340 SUPEXP_STK_BACK( 1, SUPSemEventMultiGetResolution),
341 SUPEXP_STK_BACK( 2, SUPSemEventMultiReset),
342 SUPEXP_STK_BACK( 2, SUPSemEventMultiSignal),
343 SUPEXP_STK_BACK( 3, SUPSemEventMultiWait),
344 SUPEXP_STK_BACK( 3, SUPSemEventMultiWaitNoResume),
345 SUPEXP_STK_BACK( 3, SUPSemEventMultiWaitNsAbsIntr),
346 SUPEXP_STK_BACK( 3, SUPSemEventMultiWaitNsRelIntr),
347 SUPEXP_STK_BACK( 2, SUPSemEventSignal),
348 SUPEXP_STK_BACK( 3, SUPSemEventWait),
349 SUPEXP_STK_BACK( 3, SUPSemEventWaitNoResume),
350 SUPEXP_STK_BACK( 3, SUPSemEventWaitNsAbsIntr),
351 SUPEXP_STK_BACK( 3, SUPSemEventWaitNsRelIntr),
352
353 SUPEXP_STK_BACK( 0, RTAssertAreQuiet),
354 SUPEXP_STK_BACK( 0, RTAssertMayPanic),
355 SUPEXP_STK_BACK( 4, RTAssertMsg1),
356 SUPEXP_STK_BACK( 2, RTAssertMsg2AddV),
357 SUPEXP_STK_BACK( 2, RTAssertMsg2V),
358 SUPEXP_STK_BACK( 1, RTAssertSetMayPanic),
359 SUPEXP_STK_BACK( 1, RTAssertSetQuiet),
360 SUPEXP_STK_OKAY( 2, RTCrc32),
361 SUPEXP_STK_OKAY( 1, RTCrc32Finish),
362 SUPEXP_STK_OKAY( 3, RTCrc32Process),
363 SUPEXP_STK_OKAY( 0, RTCrc32Start),
364 SUPEXP_STK_OKAY( 1, RTErrConvertFromErrno),
365 SUPEXP_STK_OKAY( 1, RTErrConvertToErrno),
366 SUPEXP_STK_BACK( 4, RTHandleTableAllocWithCtx),
367 SUPEXP_STK_BACK( 1, RTHandleTableCreate),
368 SUPEXP_STK_BACKF( 6, RTHandleTableCreateEx),
369 SUPEXP_STK_BACKF( 3, RTHandleTableDestroy),
370 SUPEXP_STK_BACK( 3, RTHandleTableFreeWithCtx),
371 SUPEXP_STK_BACK( 3, RTHandleTableLookupWithCtx),
372 SUPEXP_STK_BACK( 4, RTLogBulkNestedWrite),
373 SUPEXP_STK_BACK( 5, RTLogBulkUpdate),
374 SUPEXP_STK_BACK( 2, RTLogCheckGroupFlags),
375 SUPEXP_STK_BACKF( 17, RTLogCreateExV),
376 SUPEXP_STK_BACK( 1, RTLogDestroy),
377 SUPEXP_STK_BACK( 0, RTLogDefaultInstance),
378 SUPEXP_STK_BACK( 1, RTLogDefaultInstanceEx),
379 SUPEXP_STK_BACK( 1, SUPR0DefaultLogInstanceEx),
380 SUPEXP_STK_BACK( 0, RTLogGetDefaultInstance),
381 SUPEXP_STK_BACK( 1, RTLogGetDefaultInstanceEx),
382 SUPEXP_STK_BACK( 1, SUPR0GetDefaultLogInstanceEx),
383 SUPEXP_STK_BACK( 5, RTLogLoggerExV),
384 SUPEXP_STK_BACK( 2, RTLogPrintfV),
385 SUPEXP_STK_BACK( 0, RTLogRelGetDefaultInstance),
386 SUPEXP_STK_BACK( 1, RTLogRelGetDefaultInstanceEx),
387 SUPEXP_STK_BACK( 1, SUPR0GetDefaultLogRelInstanceEx),
388 SUPEXP_STK_BACK( 2, RTLogSetDefaultInstanceThread),
389 SUPEXP_STK_BACKF( 2, RTLogSetFlushCallback),
390 SUPEXP_STK_BACK( 2, RTLogSetR0ProgramStart),
391 SUPEXP_STK_BACK( 3, RTLogSetR0ThreadNameV),
392 SUPEXP_STK_BACK( 5, RTMemAllocExTag),
393 SUPEXP_STK_BACK( 2, RTMemAllocTag),
394 SUPEXP_STK_BACK( 2, RTMemAllocVarTag),
395 SUPEXP_STK_BACK( 2, RTMemAllocZTag),
396 SUPEXP_STK_BACK( 2, RTMemAllocZVarTag),
397 SUPEXP_STK_BACK( 4, RTMemDupExTag),
398 SUPEXP_STK_BACK( 3, RTMemDupTag),
399 SUPEXP_STK_BACK( 1, RTMemFree),
400 SUPEXP_STK_BACK( 2, RTMemFreeEx),
401 SUPEXP_STK_BACK( 3, RTMemReallocTag),
402 SUPEXP_STK_BACK( 0, RTMpCpuId),
403 SUPEXP_STK_BACK( 1, RTMpCpuIdFromSetIndex),
404 SUPEXP_STK_BACK( 1, RTMpCpuIdToSetIndex),
405 SUPEXP_STK_BACK( 0, RTMpCurSetIndex),
406 SUPEXP_STK_BACK( 1, RTMpCurSetIndexAndId),
407 SUPEXP_STK_BACK( 0, RTMpGetArraySize),
408 SUPEXP_STK_BACK( 0, RTMpGetCount),
409 SUPEXP_STK_BACK( 0, RTMpGetMaxCpuId),
410 SUPEXP_STK_BACK( 0, RTMpGetOnlineCount),
411 SUPEXP_STK_BACK( 1, RTMpGetOnlineSet),
412 SUPEXP_STK_BACK( 1, RTMpGetSet),
413 SUPEXP_STK_BACK( 1, RTMpIsCpuOnline),
414 SUPEXP_STK_BACK( 1, RTMpIsCpuPossible),
415 SUPEXP_STK_BACK( 0, RTMpIsCpuWorkPending),
416 SUPEXP_STK_BACKF( 2, RTMpNotificationDeregister),
417 SUPEXP_STK_BACKF( 2, RTMpNotificationRegister),
418 SUPEXP_STK_BACKF( 3, RTMpOnAll),
419 SUPEXP_STK_BACKF( 3, RTMpOnOthers),
420 SUPEXP_STK_BACKF( 4, RTMpOnSpecific),
421 SUPEXP_STK_BACK( 1, RTMpPokeCpu),
422 SUPEXP_STK_OKAY( 4, RTNetIPv4AddDataChecksum),
423 SUPEXP_STK_OKAY( 2, RTNetIPv4AddTCPChecksum),
424 SUPEXP_STK_OKAY( 2, RTNetIPv4AddUDPChecksum),
425 SUPEXP_STK_OKAY( 1, RTNetIPv4FinalizeChecksum),
426 SUPEXP_STK_OKAY( 1, RTNetIPv4HdrChecksum),
427 SUPEXP_STK_OKAY( 4, RTNetIPv4IsDHCPValid),
428 SUPEXP_STK_OKAY( 4, RTNetIPv4IsHdrValid),
429 SUPEXP_STK_OKAY( 4, RTNetIPv4IsTCPSizeValid),
430 SUPEXP_STK_OKAY( 6, RTNetIPv4IsTCPValid),
431 SUPEXP_STK_OKAY( 3, RTNetIPv4IsUDPSizeValid),
432 SUPEXP_STK_OKAY( 5, RTNetIPv4IsUDPValid),
433 SUPEXP_STK_OKAY( 1, RTNetIPv4PseudoChecksum),
434 SUPEXP_STK_OKAY( 4, RTNetIPv4PseudoChecksumBits),
435 SUPEXP_STK_OKAY( 3, RTNetIPv4TCPChecksum),
436 SUPEXP_STK_OKAY( 3, RTNetIPv4UDPChecksum),
437 SUPEXP_STK_OKAY( 1, RTNetIPv6PseudoChecksum),
438 SUPEXP_STK_OKAY( 4, RTNetIPv6PseudoChecksumBits),
439 SUPEXP_STK_OKAY( 3, RTNetIPv6PseudoChecksumEx),
440 SUPEXP_STK_OKAY( 4, RTNetTCPChecksum),
441 SUPEXP_STK_OKAY( 2, RTNetUDPChecksum),
442 SUPEXP_STK_BACKF( 2, RTPowerNotificationDeregister),
443 SUPEXP_STK_BACKF( 2, RTPowerNotificationRegister),
444 SUPEXP_STK_BACK( 0, RTProcSelf),
445 SUPEXP_STK_BACK( 0, RTR0AssertPanicSystem),
446#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
447 SUPEXP_STK_BACK( 2, RTR0DbgKrnlInfoOpen), /* only-darwin, only-solaris, only-windows */
448 SUPEXP_STK_BACK( 5, RTR0DbgKrnlInfoQueryMember), /* only-darwin, only-solaris, only-windows */
449# if defined(RT_OS_SOLARIS)
450 SUPEXP_STK_BACK( 4, RTR0DbgKrnlInfoQuerySize), /* only-solaris */
451# endif
452 SUPEXP_STK_BACK( 4, RTR0DbgKrnlInfoQuerySymbol), /* only-darwin, only-solaris, only-windows */
453 SUPEXP_STK_BACK( 1, RTR0DbgKrnlInfoRelease), /* only-darwin, only-solaris, only-windows */
454 SUPEXP_STK_BACK( 1, RTR0DbgKrnlInfoRetain), /* only-darwin, only-solaris, only-windows */
455#endif
456 SUPEXP_STK_BACK( 0, RTR0MemAreKrnlAndUsrDifferent),
457 SUPEXP_STK_BACK( 1, RTR0MemKernelIsValidAddr),
458 SUPEXP_STK_BACK( 3, RTR0MemKernelCopyFrom),
459 SUPEXP_STK_BACK( 3, RTR0MemKernelCopyTo),
460 SUPEXP_STK_OKAY( 1, RTR0MemObjAddress),
461 SUPEXP_STK_OKAY( 1, RTR0MemObjAddressR3),
462 SUPEXP_STK_BACK( 5, RTR0MemObjAllocContTag),
463 SUPEXP_STK_BACK( 5, RTR0MemObjAllocLargeTag),
464 SUPEXP_STK_BACK( 4, RTR0MemObjAllocLowTag),
465 SUPEXP_STK_BACK( 4, RTR0MemObjAllocPageTag),
466 SUPEXP_STK_BACK( 5, RTR0MemObjAllocPhysExTag),
467 SUPEXP_STK_BACK( 4, RTR0MemObjAllocPhysNCTag),
468 SUPEXP_STK_BACK( 4, RTR0MemObjAllocPhysTag),
469 SUPEXP_STK_BACK( 5, RTR0MemObjEnterPhysTag),
470 SUPEXP_STK_BACK( 2, RTR0MemObjFree),
471 SUPEXP_STK_BACK( 2, RTR0MemObjGetPagePhysAddr),
472 SUPEXP_STK_OKAY( 1, RTR0MemObjIsMapping),
473 SUPEXP_STK_BACK( 6, RTR0MemObjLockUserTag),
474 SUPEXP_STK_BACK( 5, RTR0MemObjLockKernelTag),
475 SUPEXP_STK_BACK( 8, RTR0MemObjMapKernelExTag),
476 SUPEXP_STK_BACK( 6, RTR0MemObjMapKernelTag),
477 SUPEXP_STK_BACK( 9, RTR0MemObjMapUserExTag),
478 SUPEXP_STK_BACK( 7, RTR0MemObjMapUserTag),
479 SUPEXP_STK_BACK( 4, RTR0MemObjProtect),
480 SUPEXP_STK_OKAY( 1, RTR0MemObjSize),
481 SUPEXP_STK_OKAY( 1, RTR0MemObjWasZeroInitialized),
482 SUPEXP_STK_OKAY( 2, RTR0MemObjZeroInitialize),
483 SUPEXP_STK_BACK( 3, RTR0MemUserCopyFrom),
484 SUPEXP_STK_BACK( 3, RTR0MemUserCopyTo),
485 SUPEXP_STK_BACK( 1, RTR0MemUserIsValidAddr),
486 SUPEXP_STK_BACK( 0, RTR0ProcHandleSelf),
487 SUPEXP_STK_BACK( 1, RTSemEventCreate),
488 SUPEXP_STK_BACK( 1, RTSemEventDestroy),
489 SUPEXP_STK_BACK( 0, RTSemEventGetResolution),
490 SUPEXP_STK_BACK( 0, RTSemEventIsSignalSafe),
491 SUPEXP_STK_BACK( 1, RTSemEventMultiCreate),
492 SUPEXP_STK_BACK( 1, RTSemEventMultiDestroy),
493 SUPEXP_STK_BACK( 0, RTSemEventMultiGetResolution),
494 SUPEXP_STK_BACK( 0, RTSemEventMultiIsSignalSafe),
495 SUPEXP_STK_BACK( 1, RTSemEventMultiReset),
496 SUPEXP_STK_BACK( 1, RTSemEventMultiSignal),
497 SUPEXP_STK_BACK( 2, RTSemEventMultiWait),
498 SUPEXP_STK_BACK( 3, RTSemEventMultiWaitEx),
499 SUPEXP_STK_BACK( 7, RTSemEventMultiWaitExDebug),
500 SUPEXP_STK_BACK( 2, RTSemEventMultiWaitNoResume),
501 SUPEXP_STK_BACK( 1, RTSemEventSignal),
502 SUPEXP_STK_BACK( 2, RTSemEventWait),
503 SUPEXP_STK_BACK( 3, RTSemEventWaitEx),
504 SUPEXP_STK_BACK( 7, RTSemEventWaitExDebug),
505 SUPEXP_STK_BACK( 2, RTSemEventWaitNoResume),
506 SUPEXP_STK_BACK( 1, RTSemFastMutexCreate),
507 SUPEXP_STK_BACK( 1, RTSemFastMutexDestroy),
508 SUPEXP_STK_BACK( 1, RTSemFastMutexRelease),
509 SUPEXP_STK_BACK( 1, RTSemFastMutexRequest),
510 SUPEXP_STK_BACK( 1, RTSemMutexCreate),
511 SUPEXP_STK_BACK( 1, RTSemMutexDestroy),
512 SUPEXP_STK_BACK( 1, RTSemMutexRelease),
513 SUPEXP_STK_BACK( 2, RTSemMutexRequest),
514 SUPEXP_STK_BACK( 6, RTSemMutexRequestDebug),
515 SUPEXP_STK_BACK( 2, RTSemMutexRequestNoResume),
516 SUPEXP_STK_BACK( 6, RTSemMutexRequestNoResumeDebug),
517 SUPEXP_STK_BACK( 1, RTSpinlockAcquire),
518 SUPEXP_STK_BACK( 3, RTSpinlockCreate),
519 SUPEXP_STK_BACK( 1, RTSpinlockDestroy),
520 SUPEXP_STK_BACK( 1, RTSpinlockRelease),
521 SUPEXP_STK_OKAY( 3, RTStrCopy),
522 SUPEXP_STK_BACK( 2, RTStrDupTag),
523 SUPEXP_STK_BACK( 6, RTStrFormatNumber),
524 SUPEXP_STK_BACK( 1, RTStrFormatTypeDeregister),
525 SUPEXP_STK_BACKF( 3, RTStrFormatTypeRegister),
526 SUPEXP_STK_BACKF( 2, RTStrFormatTypeSetUser),
527 SUPEXP_STK_BACKF( 6, RTStrFormatV),
528 SUPEXP_STK_BACK( 1, RTStrFree),
529 SUPEXP_STK_OKAY( 3, RTStrNCmp),
530 SUPEXP_STK_BACKF( 6, RTStrPrintfExV),
531 SUPEXP_STK_BACK( 4, RTStrPrintfV),
532 SUPEXP_STK_BACKF( 6, RTStrPrintf2ExV),
533 SUPEXP_STK_BACK( 4, RTStrPrintf2V),
534 SUPEXP_STK_BACKF( 7, RTThreadCreate),
535 SUPEXP_STK_BACK( 1, RTThreadCtxHookIsEnabled),
536 SUPEXP_STK_BACKF( 4, RTThreadCtxHookCreate),
537 SUPEXP_STK_BACK( 1, RTThreadCtxHookDestroy),
538 SUPEXP_STK_BACK( 1, RTThreadCtxHookDisable),
539 SUPEXP_STK_BACK( 1, RTThreadCtxHookEnable),
540 SUPEXP_STK_BACK( 1, RTThreadGetName),
541 SUPEXP_STK_BACK( 1, RTThreadGetNative),
542 SUPEXP_STK_BACK( 1, RTThreadGetType),
543 SUPEXP_STK_BACK( 1, RTThreadIsInInterrupt),
544 SUPEXP_STK_BACK( 0, RTThreadNativeSelf),
545 SUPEXP_STK_BACK( 1, RTThreadPreemptDisable),
546 SUPEXP_STK_BACK( 1, RTThreadPreemptIsEnabled),
547 SUPEXP_STK_BACK( 1, RTThreadPreemptIsPending),
548 SUPEXP_STK_BACK( 0, RTThreadPreemptIsPendingTrusty),
549 SUPEXP_STK_BACK( 0, RTThreadPreemptIsPossible),
550 SUPEXP_STK_BACK( 1, RTThreadPreemptRestore),
551 SUPEXP_STK_BACK( 1, RTThreadQueryTerminationStatus),
552 SUPEXP_STK_BACK( 0, RTThreadSelf),
553 SUPEXP_STK_BACK( 0, RTThreadSelfName),
554 SUPEXP_STK_BACK( 1, RTThreadSleep),
555 SUPEXP_STK_BACK( 1, RTThreadUserReset),
556 SUPEXP_STK_BACK( 1, RTThreadUserSignal),
557 SUPEXP_STK_BACK( 2, RTThreadUserWait),
558 SUPEXP_STK_BACK( 2, RTThreadUserWaitNoResume),
559 SUPEXP_STK_BACK( 3, RTThreadWait),
560 SUPEXP_STK_BACK( 3, RTThreadWaitNoResume),
561 SUPEXP_STK_BACK( 0, RTThreadYield),
562 SUPEXP_STK_BACK( 1, RTTimeNow),
563 SUPEXP_STK_BACK( 0, RTTimerCanDoHighResolution),
564 SUPEXP_STK_BACK( 2, RTTimerChangeInterval),
565 SUPEXP_STK_BACKF( 4, RTTimerCreate),
566 SUPEXP_STK_BACKF( 5, RTTimerCreateEx),
567 SUPEXP_STK_BACK( 1, RTTimerDestroy),
568 SUPEXP_STK_BACK( 0, RTTimerGetSystemGranularity),
569 SUPEXP_STK_BACK( 1, RTTimerReleaseSystemGranularity),
570 SUPEXP_STK_BACK( 2, RTTimerRequestSystemGranularity),
571 SUPEXP_STK_BACK( 2, RTTimerStart),
572 SUPEXP_STK_BACK( 1, RTTimerStop),
573 SUPEXP_STK_BACK( 0, RTTimeSystemMilliTS),
574 SUPEXP_STK_BACK( 0, RTTimeSystemNanoTS),
575 SUPEXP_STK_OKAY( 2, RTUuidCompare),
576 SUPEXP_STK_OKAY( 2, RTUuidCompareStr),
577 SUPEXP_STK_OKAY( 2, RTUuidFromStr),
578/* SED: END */
579};
580
581#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
582/**
583 * Drag in the rest of IRPT since we share it with the
584 * rest of the kernel modules on darwin.
585 */
586struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
587{
588 /* VBoxNetAdp */
589 { (PFNRT)RTRandBytes },
590 /* VBoxUSB */
591 { (PFNRT)RTPathStripFilename },
592#if !defined(RT_OS_FREEBSD)
593 { (PFNRT)RTHandleTableAlloc },
594 { (PFNRT)RTStrPurgeEncoding },
595#endif
596 { NULL }
597};
598#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
599
600
601
602/**
603 * Initializes the device extentsion structure.
604 *
605 * @returns IPRT status code.
606 * @param pDevExt The device extension to initialize.
607 * @param cbSession The size of the session structure. The size of
608 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
609 * defined because we're skipping the OS specific members
610 * then.
611 */
612int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
613{
614 int rc;
615
616#ifdef SUPDRV_WITH_RELEASE_LOGGER
617 /*
618 * Create the release log.
619 */
620 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
621 PRTLOGGER pRelLogger;
622 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
623 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
624 if (RT_SUCCESS(rc))
625 RTLogRelSetDefaultInstance(pRelLogger);
626 /** @todo Add native hook for getting logger config parameters and setting
627 * them. On linux we should use the module parameter stuff... */
628#endif
629
630#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
631 /*
632 * Require SSE2 to be present.
633 */
634 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
635 {
636 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
637 return VERR_UNSUPPORTED_CPU;
638 }
639#endif
640
641 /*
642 * Initialize it.
643 */
644 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
645 pDevExt->Spinlock = NIL_RTSPINLOCK;
646 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
647 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
648#ifdef SUPDRV_USE_MUTEX_FOR_LDR
649 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
650#else
651 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
652#endif
653#ifdef SUPDRV_USE_MUTEX_FOR_GIP
654 pDevExt->mtxGip = NIL_RTSEMMUTEX;
655 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
656#else
657 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
658 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
659#endif
660
661 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
662 if (RT_SUCCESS(rc))
663 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
664 if (RT_SUCCESS(rc))
665 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
666
667 if (RT_SUCCESS(rc))
668#ifdef SUPDRV_USE_MUTEX_FOR_LDR
669 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
670#else
671 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
672#endif
673 if (RT_SUCCESS(rc))
674#ifdef SUPDRV_USE_MUTEX_FOR_GIP
675 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
676#else
677 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
678#endif
679 if (RT_SUCCESS(rc))
680 {
681 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
682 if (RT_SUCCESS(rc))
683 {
684#ifdef SUPDRV_USE_MUTEX_FOR_GIP
685 rc = RTSemMutexCreate(&pDevExt->mtxGip);
686#else
687 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
688#endif
689 if (RT_SUCCESS(rc))
690 {
691 rc = supdrvGipCreate(pDevExt);
692 if (RT_SUCCESS(rc))
693 {
694 rc = supdrvTracerInit(pDevExt);
695 if (RT_SUCCESS(rc))
696 {
697 pDevExt->pLdrInitImage = NULL;
698 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
699 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
700 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
701 pDevExt->cbSession = (uint32_t)cbSession;
702
703 /*
704 * Fixup the absolute symbols.
705 *
706 * Because of the table indexing assumptions we'll have a little #ifdef orgy
707 * here rather than distributing this to OS specific files. At least for now.
708 */
709#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
710# ifdef RT_OS_DARWIN
711# if ARCH_BITS == 32
712 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
713 {
714 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
715 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
716 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
717 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
718 }
719 else
720 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
721 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
722 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
723 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
724 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
725 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
726 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
727# else /* 64-bit darwin: */
728 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
729 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
730 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
731 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
732 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
733 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
734 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
735 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
736 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
737 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
738
739# endif
740# else /* !RT_OS_DARWIN */
741# if ARCH_BITS == 64
742 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
743 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
744 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
745 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
746# else
747 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
748# endif
749 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
750 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
751 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
752 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
753 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
754 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
755# endif /* !RT_OS_DARWIN */
756#endif /* AMD64 || X86 */
757 return VINF_SUCCESS;
758 }
759
760 supdrvGipDestroy(pDevExt);
761 }
762
763#ifdef SUPDRV_USE_MUTEX_FOR_GIP
764 RTSemMutexDestroy(pDevExt->mtxGip);
765 pDevExt->mtxGip = NIL_RTSEMMUTEX;
766#else
767 RTSemFastMutexDestroy(pDevExt->mtxGip);
768 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
769#endif
770 }
771 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
772 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
773 }
774 }
775
776#ifdef SUPDRV_USE_MUTEX_FOR_GIP
777 RTSemMutexDestroy(pDevExt->mtxTscDelta);
778 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
779#else
780 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
781 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
782#endif
783#ifdef SUPDRV_USE_MUTEX_FOR_LDR
784 RTSemMutexDestroy(pDevExt->mtxLdr);
785 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
786#else
787 RTSemFastMutexDestroy(pDevExt->mtxLdr);
788 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
789#endif
790 RTSpinlockDestroy(pDevExt->Spinlock);
791 pDevExt->Spinlock = NIL_RTSPINLOCK;
792 RTSpinlockDestroy(pDevExt->hGipSpinlock);
793 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
794 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
795 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
796
797#ifdef SUPDRV_WITH_RELEASE_LOGGER
798 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
799 RTLogDestroy(RTLogSetDefaultInstance(NULL));
800#endif
801
802 return rc;
803}
804
805
806/**
807 * Delete the device extension (e.g. cleanup members).
808 *
809 * @param pDevExt The device extension to delete.
810 */
811void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
812{
813 PSUPDRVOBJ pObj;
814 PSUPDRVUSAGE pUsage;
815
816 /*
817 * Kill mutexes and spinlocks.
818 */
819#ifdef SUPDRV_USE_MUTEX_FOR_GIP
820 RTSemMutexDestroy(pDevExt->mtxGip);
821 pDevExt->mtxGip = NIL_RTSEMMUTEX;
822 RTSemMutexDestroy(pDevExt->mtxTscDelta);
823 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
824#else
825 RTSemFastMutexDestroy(pDevExt->mtxGip);
826 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
827 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
828 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
829#endif
830#ifdef SUPDRV_USE_MUTEX_FOR_LDR
831 RTSemMutexDestroy(pDevExt->mtxLdr);
832 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
833#else
834 RTSemFastMutexDestroy(pDevExt->mtxLdr);
835 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
836#endif
837 RTSpinlockDestroy(pDevExt->Spinlock);
838 pDevExt->Spinlock = NIL_RTSPINLOCK;
839 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
840 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
841 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
842 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
843
844 /*
845 * Free lists.
846 */
847 /* objects. */
848 pObj = pDevExt->pObjs;
849 Assert(!pObj); /* (can trigger on forced unloads) */
850 pDevExt->pObjs = NULL;
851 while (pObj)
852 {
853 void *pvFree = pObj;
854 pObj = pObj->pNext;
855 RTMemFree(pvFree);
856 }
857
858 /* usage records. */
859 pUsage = pDevExt->pUsageFree;
860 pDevExt->pUsageFree = NULL;
861 while (pUsage)
862 {
863 void *pvFree = pUsage;
864 pUsage = pUsage->pNext;
865 RTMemFree(pvFree);
866 }
867
868 /* kill the GIP. */
869 supdrvGipDestroy(pDevExt);
870 RTSpinlockDestroy(pDevExt->hGipSpinlock);
871 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
872
873 supdrvTracerTerm(pDevExt);
874
875#ifdef SUPDRV_WITH_RELEASE_LOGGER
876 /* destroy the loggers. */
877 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
878 RTLogDestroy(RTLogSetDefaultInstance(NULL));
879#endif
880}
881
882
883/**
884 * Create session.
885 *
886 * @returns IPRT status code.
887 * @param pDevExt Device extension.
888 * @param fUser Flag indicating whether this is a user or kernel
889 * session.
890 * @param fUnrestricted Unrestricted access (system) or restricted access
891 * (user)?
892 * @param ppSession Where to store the pointer to the session data.
893 */
894int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
895{
896 int rc;
897 PSUPDRVSESSION pSession;
898
899 if (!SUP_IS_DEVEXT_VALID(pDevExt))
900 return VERR_INVALID_PARAMETER;
901
902 /*
903 * Allocate memory for the session data.
904 */
905 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
906 if (pSession)
907 {
908 /* Initialize session data. */
909 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
910 if (!rc)
911 {
912 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
913 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
914 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
915 if (RT_SUCCESS(rc))
916 {
917 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
918 pSession->pDevExt = pDevExt;
919 pSession->u32Cookie = BIRD_INV;
920 pSession->fUnrestricted = fUnrestricted;
921 /*pSession->fInHashTable = false; */
922 pSession->cRefs = 1;
923 /*pSession->pCommonNextHash = NULL;
924 pSession->ppOsSessionPtr = NULL; */
925 if (fUser)
926 {
927 pSession->Process = RTProcSelf();
928 pSession->R0Process = RTR0ProcHandleSelf();
929 }
930 else
931 {
932 pSession->Process = NIL_RTPROCESS;
933 pSession->R0Process = NIL_RTR0PROCESS;
934 }
935 /*pSession->pLdrUsage = NULL;
936 pSession->pVM = NULL;
937 pSession->pUsage = NULL;
938 pSession->pGip = NULL;
939 pSession->fGipReferenced = false;
940 pSession->Bundle.cUsed = 0; */
941 pSession->Uid = NIL_RTUID;
942 pSession->Gid = NIL_RTGID;
943 /*pSession->uTracerData = 0;*/
944 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
945 RTListInit(&pSession->TpProviders);
946 /*pSession->cTpProviders = 0;*/
947 /*pSession->cTpProbesFiring = 0;*/
948 RTListInit(&pSession->TpUmods);
949 /*RT_ZERO(pSession->apTpLookupTable);*/
950
951 VBOXDRV_SESSION_CREATE(pSession, fUser);
952 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
953 return VINF_SUCCESS;
954 }
955
956 RTSpinlockDestroy(pSession->Spinlock);
957 }
958 RTMemFree(pSession);
959 *ppSession = NULL;
960 Log(("Failed to create spinlock, rc=%d!\n", rc));
961 }
962 else
963 rc = VERR_NO_MEMORY;
964
965 return rc;
966}
967
968
969/**
970 * Cleans up the session in the context of the process to which it belongs, the
971 * caller will free the session and the session spinlock.
972 *
973 * This should normally occur when the session is closed or as the process
974 * exits. Careful reference counting in the OS specfic code makes sure that
975 * there cannot be any races between process/handle cleanup callbacks and
976 * threads doing I/O control calls.
977 *
978 * @param pDevExt The device extension.
979 * @param pSession Session data.
980 */
981static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
982{
983 int rc;
984 PSUPDRVBUNDLE pBundle;
985 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
986
987 Assert(!pSession->fInHashTable);
988 Assert(!pSession->ppOsSessionPtr);
989 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
990 ("R0Process=%p cur=%p; curpid=%u\n",
991 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
992
993 /*
994 * Remove logger instances related to this session.
995 */
996 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
997
998 /*
999 * Destroy the handle table.
1000 */
1001 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
1002 AssertRC(rc);
1003 pSession->hHandleTable = NIL_RTHANDLETABLE;
1004
1005 /*
1006 * Release object references made in this session.
1007 * In theory there should be noone racing us in this session.
1008 */
1009 Log2(("release objects - start\n"));
1010 if (pSession->pUsage)
1011 {
1012 PSUPDRVUSAGE pUsage;
1013 RTSpinlockAcquire(pDevExt->Spinlock);
1014
1015 while ((pUsage = pSession->pUsage) != NULL)
1016 {
1017 PSUPDRVOBJ pObj = pUsage->pObj;
1018 pSession->pUsage = pUsage->pNext;
1019
1020 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1021 if (pUsage->cUsage < pObj->cUsage)
1022 {
1023 pObj->cUsage -= pUsage->cUsage;
1024 RTSpinlockRelease(pDevExt->Spinlock);
1025 }
1026 else
1027 {
1028 /* Destroy the object and free the record. */
1029 if (pDevExt->pObjs == pObj)
1030 pDevExt->pObjs = pObj->pNext;
1031 else
1032 {
1033 PSUPDRVOBJ pObjPrev;
1034 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1035 if (pObjPrev->pNext == pObj)
1036 {
1037 pObjPrev->pNext = pObj->pNext;
1038 break;
1039 }
1040 Assert(pObjPrev);
1041 }
1042 RTSpinlockRelease(pDevExt->Spinlock);
1043
1044 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
1045 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
1046 if (pObj->pfnDestructor)
1047 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1048 RTMemFree(pObj);
1049 }
1050
1051 /* free it and continue. */
1052 RTMemFree(pUsage);
1053
1054 RTSpinlockAcquire(pDevExt->Spinlock);
1055 }
1056
1057 RTSpinlockRelease(pDevExt->Spinlock);
1058 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
1059 }
1060 Log2(("release objects - done\n"));
1061
1062 /*
1063 * Make sure the associated VM pointers are NULL.
1064 */
1065 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
1066 {
1067 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
1068 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
1069 pSession->pSessionGVM = NULL;
1070 pSession->pSessionVM = NULL;
1071 pSession->pFastIoCtrlVM = NULL;
1072 }
1073
1074 /*
1075 * Do tracer cleanups related to this session.
1076 */
1077 Log2(("release tracer stuff - start\n"));
1078 supdrvTracerCleanupSession(pDevExt, pSession);
1079 Log2(("release tracer stuff - end\n"));
1080
1081 /*
1082 * Release memory allocated in the session.
1083 *
1084 * We do not serialize this as we assume that the application will
1085 * not allocated memory while closing the file handle object.
1086 */
1087 Log2(("freeing memory:\n"));
1088 pBundle = &pSession->Bundle;
1089 while (pBundle)
1090 {
1091 PSUPDRVBUNDLE pToFree;
1092 unsigned i;
1093
1094 /*
1095 * Check and unlock all entries in the bundle.
1096 */
1097 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
1098 {
1099 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
1100 {
1101 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1102 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1103 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1104 {
1105 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1106 AssertRC(rc); /** @todo figure out how to handle this. */
1107 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1108 }
1109 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1110 AssertRC(rc); /** @todo figure out how to handle this. */
1111 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1112 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1113 }
1114 }
1115
1116 /*
1117 * Advance and free previous bundle.
1118 */
1119 pToFree = pBundle;
1120 pBundle = pBundle->pNext;
1121
1122 pToFree->pNext = NULL;
1123 pToFree->cUsed = 0;
1124 if (pToFree != &pSession->Bundle)
1125 RTMemFree(pToFree);
1126 }
1127 Log2(("freeing memory - done\n"));
1128
1129 /*
1130 * Deregister component factories.
1131 */
1132 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1133 Log2(("deregistering component factories:\n"));
1134 if (pDevExt->pComponentFactoryHead)
1135 {
1136 PSUPDRVFACTORYREG pPrev = NULL;
1137 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1138 while (pCur)
1139 {
1140 if (pCur->pSession == pSession)
1141 {
1142 /* unlink it */
1143 PSUPDRVFACTORYREG pNext = pCur->pNext;
1144 if (pPrev)
1145 pPrev->pNext = pNext;
1146 else
1147 pDevExt->pComponentFactoryHead = pNext;
1148
1149 /* free it */
1150 pCur->pNext = NULL;
1151 pCur->pSession = NULL;
1152 pCur->pFactory = NULL;
1153 RTMemFree(pCur);
1154
1155 /* next */
1156 pCur = pNext;
1157 }
1158 else
1159 {
1160 /* next */
1161 pPrev = pCur;
1162 pCur = pCur->pNext;
1163 }
1164 }
1165 }
1166 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1167 Log2(("deregistering component factories - done\n"));
1168
1169 /*
1170 * Loaded images needs to be dereferenced and possibly freed up.
1171 */
1172 supdrvLdrLock(pDevExt);
1173 Log2(("freeing images:\n"));
1174 if (pSession->pLdrUsage)
1175 {
1176 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1177 pSession->pLdrUsage = NULL;
1178 while (pUsage)
1179 {
1180 void *pvFree = pUsage;
1181 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1182 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1183 if (pImage->cImgUsage > cUsage)
1184 supdrvLdrSubtractUsage(pDevExt, pImage, cUsage);
1185 else
1186 supdrvLdrFree(pDevExt, pImage);
1187 pUsage->pImage = NULL;
1188 pUsage = pUsage->pNext;
1189 RTMemFree(pvFree);
1190 }
1191 }
1192 supdrvLdrUnlock(pDevExt);
1193 Log2(("freeing images - done\n"));
1194
1195 /*
1196 * Unmap the GIP.
1197 */
1198 Log2(("umapping GIP:\n"));
1199 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1200 {
1201 SUPR0GipUnmap(pSession);
1202 pSession->fGipReferenced = 0;
1203 }
1204 Log2(("umapping GIP - done\n"));
1205}
1206
1207
1208/**
1209 * Common code for freeing a session when the reference count reaches zero.
1210 *
1211 * @param pDevExt Device extension.
1212 * @param pSession Session data.
1213 * This data will be freed by this routine.
1214 */
1215static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1216{
1217 VBOXDRV_SESSION_CLOSE(pSession);
1218
1219 /*
1220 * Cleanup the session first.
1221 */
1222 supdrvCleanupSession(pDevExt, pSession);
1223 supdrvOSCleanupSession(pDevExt, pSession);
1224
1225 /*
1226 * Free the rest of the session stuff.
1227 */
1228 RTSpinlockDestroy(pSession->Spinlock);
1229 pSession->Spinlock = NIL_RTSPINLOCK;
1230 pSession->pDevExt = NULL;
1231 RTMemFree(pSession);
1232 LogFlow(("supdrvDestroySession: returns\n"));
1233}
1234
1235
1236/**
1237 * Inserts the session into the global hash table.
1238 *
1239 * @retval VINF_SUCCESS on success.
1240 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1241 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1242 * session (asserted).
1243 * @retval VERR_DUPLICATE if there is already a session for that pid.
1244 *
1245 * @param pDevExt The device extension.
1246 * @param pSession The session.
1247 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1248 * available and used. This will set to point to the
1249 * session while under the protection of the session
1250 * hash table spinlock. It will also be kept in
1251 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1252 * cleanup use.
1253 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1254 */
1255int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1256 void *pvUser)
1257{
1258 PSUPDRVSESSION pCur;
1259 unsigned iHash;
1260
1261 /*
1262 * Validate input.
1263 */
1264 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1265 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1266
1267 /*
1268 * Calculate the hash table index and acquire the spinlock.
1269 */
1270 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1271
1272 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1273
1274 /*
1275 * If there are a collisions, we need to carefully check if we got a
1276 * duplicate. There can only be one open session per process.
1277 */
1278 pCur = pDevExt->apSessionHashTab[iHash];
1279 if (pCur)
1280 {
1281 while (pCur && pCur->Process != pSession->Process)
1282 pCur = pCur->pCommonNextHash;
1283
1284 if (pCur)
1285 {
1286 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1287 if (pCur == pSession)
1288 {
1289 Assert(pSession->fInHashTable);
1290 AssertFailed();
1291 return VERR_WRONG_ORDER;
1292 }
1293 Assert(!pSession->fInHashTable);
1294 if (pCur->R0Process == pSession->R0Process)
1295 return VERR_RESOURCE_IN_USE;
1296 return VERR_DUPLICATE;
1297 }
1298 }
1299 Assert(!pSession->fInHashTable);
1300 Assert(!pSession->ppOsSessionPtr);
1301
1302 /*
1303 * Insert it, doing a callout to the OS specific code in case it has
1304 * anything it wishes to do while we're holding the spinlock.
1305 */
1306 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1307 pDevExt->apSessionHashTab[iHash] = pSession;
1308 pSession->fInHashTable = true;
1309 ASMAtomicIncS32(&pDevExt->cSessions);
1310
1311 pSession->ppOsSessionPtr = ppOsSessionPtr;
1312 if (ppOsSessionPtr)
1313 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1314
1315 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1316
1317 /*
1318 * Retain a reference for the pointer in the session table.
1319 */
1320 ASMAtomicIncU32(&pSession->cRefs);
1321
1322 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1323 return VINF_SUCCESS;
1324}
1325
1326
1327/**
1328 * Removes the session from the global hash table.
1329 *
1330 * @retval VINF_SUCCESS on success.
1331 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1332 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1333 * session (asserted).
1334 *
1335 * @param pDevExt The device extension.
1336 * @param pSession The session. The caller is expected to have a reference
1337 * to this so it won't croak on us when we release the hash
1338 * table reference.
1339 * @param pvUser OS specific context value for the
1340 * supdrvOSSessionHashTabInserted callback.
1341 */
1342int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1343{
1344 PSUPDRVSESSION pCur;
1345 unsigned iHash;
1346 int32_t cRefs;
1347
1348 /*
1349 * Validate input.
1350 */
1351 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1352 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1353
1354 /*
1355 * Calculate the hash table index and acquire the spinlock.
1356 */
1357 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1358
1359 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1360
1361 /*
1362 * Unlink it.
1363 */
1364 pCur = pDevExt->apSessionHashTab[iHash];
1365 if (pCur == pSession)
1366 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1367 else
1368 {
1369 PSUPDRVSESSION pPrev = pCur;
1370 while (pCur && pCur != pSession)
1371 {
1372 pPrev = pCur;
1373 pCur = pCur->pCommonNextHash;
1374 }
1375 if (pCur)
1376 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1377 else
1378 {
1379 Assert(!pSession->fInHashTable);
1380 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1381 return VERR_NOT_FOUND;
1382 }
1383 }
1384
1385 pSession->pCommonNextHash = NULL;
1386 pSession->fInHashTable = false;
1387
1388 ASMAtomicDecS32(&pDevExt->cSessions);
1389
1390 /*
1391 * Clear OS specific session pointer if available and do the OS callback.
1392 */
1393 if (pSession->ppOsSessionPtr)
1394 {
1395 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1396 pSession->ppOsSessionPtr = NULL;
1397 }
1398
1399 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1400
1401 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1402
1403 /*
1404 * Drop the reference the hash table had to the session. This shouldn't
1405 * be the last reference!
1406 */
1407 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1408 Assert(cRefs > 0 && cRefs < _1M);
1409 if (cRefs == 0)
1410 supdrvDestroySession(pDevExt, pSession);
1411
1412 return VINF_SUCCESS;
1413}
1414
1415
1416/**
1417 * Looks up the session for the current process in the global hash table or in
1418 * OS specific pointer.
1419 *
1420 * @returns Pointer to the session with a reference that the caller must
1421 * release. If no valid session was found, NULL is returned.
1422 *
1423 * @param pDevExt The device extension.
1424 * @param Process The process ID.
1425 * @param R0Process The ring-0 process handle.
1426 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1427 * this is used instead of the hash table. For
1428 * additional safety it must then be equal to the
1429 * SUPDRVSESSION::ppOsSessionPtr member.
1430 * This can be NULL even if the OS has a session
1431 * pointer.
1432 */
1433PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1434 PSUPDRVSESSION *ppOsSessionPtr)
1435{
1436 PSUPDRVSESSION pCur;
1437 unsigned iHash;
1438
1439 /*
1440 * Validate input.
1441 */
1442 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1443
1444 /*
1445 * Calculate the hash table index and acquire the spinlock.
1446 */
1447 iHash = SUPDRV_SESSION_HASH(Process);
1448
1449 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1450
1451 /*
1452 * If an OS session pointer is provided, always use it.
1453 */
1454 if (ppOsSessionPtr)
1455 {
1456 pCur = *ppOsSessionPtr;
1457 if ( pCur
1458 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1459 || pCur->Process != Process
1460 || pCur->R0Process != R0Process) )
1461 pCur = NULL;
1462 }
1463 else
1464 {
1465 /*
1466 * Otherwise, do the hash table lookup.
1467 */
1468 pCur = pDevExt->apSessionHashTab[iHash];
1469 while ( pCur
1470 && ( pCur->Process != Process
1471 || pCur->R0Process != R0Process) )
1472 pCur = pCur->pCommonNextHash;
1473 }
1474
1475 /*
1476 * Retain the session.
1477 */
1478 if (pCur)
1479 {
1480 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1481 NOREF(cRefs);
1482 Assert(cRefs > 1 && cRefs < _1M);
1483 }
1484
1485 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1486
1487 return pCur;
1488}
1489
1490
1491/**
1492 * Retain a session to make sure it doesn't go away while it is in use.
1493 *
1494 * @returns New reference count on success, UINT32_MAX on failure.
1495 * @param pSession Session data.
1496 */
1497uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1498{
1499 uint32_t cRefs;
1500 AssertPtrReturn(pSession, UINT32_MAX);
1501 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1502
1503 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1504 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1505 return cRefs;
1506}
1507
1508
1509/**
1510 * Releases a given session.
1511 *
1512 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1513 * @param pSession Session data.
1514 */
1515uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1516{
1517 uint32_t cRefs;
1518 AssertPtrReturn(pSession, UINT32_MAX);
1519 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1520
1521 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1522 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1523 if (cRefs == 0)
1524 supdrvDestroySession(pSession->pDevExt, pSession);
1525 return cRefs;
1526}
1527
1528
1529/**
1530 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1531 *
1532 * @returns IPRT status code, see SUPR0ObjAddRef.
1533 * @param hHandleTable The handle table handle. Ignored.
1534 * @param pvObj The object pointer.
1535 * @param pvCtx Context, the handle type. Ignored.
1536 * @param pvUser Session pointer.
1537 */
1538static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1539{
1540 NOREF(pvCtx);
1541 NOREF(hHandleTable);
1542 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1543}
1544
1545
1546/**
1547 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1548 *
1549 * @param hHandleTable The handle table handle. Ignored.
1550 * @param h The handle value. Ignored.
1551 * @param pvObj The object pointer.
1552 * @param pvCtx Context, the handle type. Ignored.
1553 * @param pvUser Session pointer.
1554 */
1555static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1556{
1557 NOREF(pvCtx);
1558 NOREF(h);
1559 NOREF(hHandleTable);
1560 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1561}
1562
1563
1564/**
1565 * Fast path I/O Control worker.
1566 *
1567 * @returns VBox status code that should be passed down to ring-3 unchanged.
1568 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1569 * @param idCpu VMCPU id.
1570 * @param pDevExt Device extention.
1571 * @param pSession Session data.
1572 */
1573int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1574{
1575 /*
1576 * Validate input and check that the VM has a session.
1577 */
1578 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1579 {
1580 PVM pVM = pSession->pSessionVM;
1581 PGVM pGVM = pSession->pSessionGVM;
1582 if (RT_LIKELY( pGVM != NULL
1583 && pVM != NULL
1584 && pVM == pSession->pFastIoCtrlVM))
1585 {
1586 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1587 {
1588 /*
1589 * Make the call.
1590 */
1591 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1592 return VINF_SUCCESS;
1593 }
1594
1595 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1596 }
1597 else
1598 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1599 pGVM, pVM, pSession->pFastIoCtrlVM);
1600 }
1601 else
1602 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1603 return VERR_INTERNAL_ERROR;
1604}
1605
1606
1607/**
1608 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1609 *
1610 * Check if pszStr contains any character of pszChars. We would use strpbrk
1611 * here if this function would be contained in the RedHat kABI white list, see
1612 * http://www.kerneldrivers.org/RHEL5.
1613 *
1614 * @returns true if fine, false if not.
1615 * @param pszName The module name to check.
1616 */
1617static bool supdrvIsLdrModuleNameValid(const char *pszName)
1618{
1619 int chCur;
1620 while ((chCur = *pszName++) != '\0')
1621 {
1622 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1623 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1624 while (offInv-- > 0)
1625 if (s_szInvalidChars[offInv] == chCur)
1626 return false;
1627 }
1628 return true;
1629}
1630
1631
1632
1633/**
1634 * I/O Control inner worker (tracing reasons).
1635 *
1636 * @returns IPRT status code.
1637 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1638 *
1639 * @param uIOCtl Function number.
1640 * @param pDevExt Device extention.
1641 * @param pSession Session data.
1642 * @param pReqHdr The request header.
1643 */
1644static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1645{
1646 /*
1647 * Validation macros
1648 */
1649#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1650 do { \
1651 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1652 { \
1653 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1654 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1655 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1656 } \
1657 } while (0)
1658
1659#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1660
1661#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1662 do { \
1663 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1664 { \
1665 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1666 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1667 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1668 } \
1669 } while (0)
1670
1671#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1672 do { \
1673 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1674 { \
1675 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1676 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1677 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1678 } \
1679 } while (0)
1680
1681#define REQ_CHECK_EXPR(Name, expr) \
1682 do { \
1683 if (RT_UNLIKELY(!(expr))) \
1684 { \
1685 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1686 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1687 } \
1688 } while (0)
1689
1690#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1691 do { \
1692 if (RT_UNLIKELY(!(expr))) \
1693 { \
1694 OSDBGPRINT( fmt ); \
1695 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1696 } \
1697 } while (0)
1698
1699 /*
1700 * The switch.
1701 */
1702 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1703 {
1704 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1705 {
1706 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1707 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1708 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1709 {
1710 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1711 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1712 return 0;
1713 }
1714
1715#if 0
1716 /*
1717 * Call out to the OS specific code and let it do permission checks on the
1718 * client process.
1719 */
1720 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1721 {
1722 pReq->u.Out.u32Cookie = 0xffffffff;
1723 pReq->u.Out.u32SessionCookie = 0xffffffff;
1724 pReq->u.Out.u32SessionVersion = 0xffffffff;
1725 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1726 pReq->u.Out.pSession = NULL;
1727 pReq->u.Out.cFunctions = 0;
1728 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1729 return 0;
1730 }
1731#endif
1732
1733 /*
1734 * Match the version.
1735 * The current logic is very simple, match the major interface version.
1736 */
1737 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1738 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1739 {
1740 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1741 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1742 pReq->u.Out.u32Cookie = 0xffffffff;
1743 pReq->u.Out.u32SessionCookie = 0xffffffff;
1744 pReq->u.Out.u32SessionVersion = 0xffffffff;
1745 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1746 pReq->u.Out.pSession = NULL;
1747 pReq->u.Out.cFunctions = 0;
1748 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1749 return 0;
1750 }
1751
1752 /*
1753 * Fill in return data and be gone.
1754 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1755 * u32SessionVersion <= u32ReqVersion!
1756 */
1757 /** @todo Somehow validate the client and negotiate a secure cookie... */
1758 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1759 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1760 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1761 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1762 pReq->u.Out.pSession = pSession;
1763 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1764 pReq->Hdr.rc = VINF_SUCCESS;
1765 return 0;
1766 }
1767
1768 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1769 {
1770 /* validate */
1771 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1772 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1773
1774 /* execute */
1775 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1776 RT_BCOPY_UNFORTIFIED(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1777 pReq->Hdr.rc = VINF_SUCCESS;
1778 return 0;
1779 }
1780
1781 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1782 {
1783 /* validate */
1784 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1785 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1786 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1787 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1788 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1789
1790 /* execute */
1791 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1792 if (RT_FAILURE(pReq->Hdr.rc))
1793 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1794 return 0;
1795 }
1796
1797 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1798 {
1799 /* validate */
1800 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1801 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1802
1803 /* execute */
1804 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1805 return 0;
1806 }
1807
1808 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1809 {
1810 /* validate */
1811 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1812 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1813
1814 /* execute */
1815 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1816 if (RT_FAILURE(pReq->Hdr.rc))
1817 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1818 return 0;
1819 }
1820
1821 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1822 {
1823 /* validate */
1824 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1825 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1826
1827 /* execute */
1828 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1829 return 0;
1830 }
1831
1832 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1833 {
1834 /* validate */
1835 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1836 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1837 if ( pReq->u.In.cbImageWithEverything != 0
1838 || pReq->u.In.cbImageBits != 0)
1839 {
1840 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1841 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1842 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1843 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1844 }
1845 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1846 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1847 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1848 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1849
1850 /* execute */
1851 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1852 return 0;
1853 }
1854
1855 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1856 {
1857 /* validate */
1858 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1859 uint8_t const * const pbSrcImage = pReq->u.In.abImage;
1860 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1861 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1862 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1863 || ( pReq->u.In.cSymbols <= 16384
1864 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1865 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1866 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1867 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1868 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1869 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1870 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1871 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1872 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1873 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1874 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1875 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1876 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1877 && pReq->u.In.cSegments <= 128
1878 && pReq->u.In.cSegments <= (pReq->u.In.cbImageBits + PAGE_SIZE - 1) / PAGE_SIZE
1879 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1880 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1881 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1882 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1883 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1884
1885 if (pReq->u.In.cSymbols)
1886 {
1887 uint32_t i;
1888 PSUPLDRSYM paSyms = (PSUPLDRSYM)(&pbSrcImage[pReq->u.In.offSymbols]);
1889 for (i = 0; i < pReq->u.In.cSymbols; i++)
1890 {
1891 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1892 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1893 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1894 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1895 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)(&pbSrcImage[pReq->u.In.offStrTab + paSyms[i].offName]),
1896 pReq->u.In.cbStrTab - paSyms[i].offName),
1897 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1898 }
1899 }
1900 {
1901 uint32_t i;
1902 uint32_t offPrevEnd = 0;
1903 PSUPLDRSEG paSegs = (PSUPLDRSEG)(&pbSrcImage[pReq->u.In.offSegments]);
1904 for (i = 0; i < pReq->u.In.cSegments; i++)
1905 {
1906 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1907 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1908 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1909 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1910 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1911 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1912 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1913 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1914 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1915 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1916 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1917 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1918 }
1919 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1920 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1921 }
1922 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fFlags & ~SUPLDRLOAD_F_VALID_MASK),
1923 ("SUP_IOCTL_LDR_LOAD: fFlags=%#x\n", (unsigned)pReq->u.In.fFlags));
1924
1925 /* execute */
1926 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1927 return 0;
1928 }
1929
1930 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1931 {
1932 /* validate */
1933 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1934 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1935
1936 /* execute */
1937 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1938 return 0;
1939 }
1940
1941 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1942 {
1943 /* validate */
1944 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1945
1946 /* execute */
1947 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1948 return 0;
1949 }
1950
1951 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1952 {
1953 /* validate */
1954 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1955 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1956 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1957
1958 /* execute */
1959 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1960 return 0;
1961 }
1962
1963 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1964 {
1965 /* validate */
1966 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1967 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1968 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1969
1970 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1971 {
1972 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1973
1974 /* execute */
1975 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1976 {
1977 if (pReq->u.In.pVMR0 == NULL)
1978 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1979 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1980 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1981 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1982 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1983 else
1984 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1985 }
1986 else
1987 pReq->Hdr.rc = VERR_WRONG_ORDER;
1988 }
1989 else
1990 {
1991 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1992 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1993 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1994 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1995 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1996
1997 /* execute */
1998 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1999 {
2000 if (pReq->u.In.pVMR0 == NULL)
2001 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
2002 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2003 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
2004 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
2005 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2006 else
2007 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
2008 }
2009 else
2010 pReq->Hdr.rc = VERR_WRONG_ORDER;
2011 }
2012
2013 if ( RT_FAILURE(pReq->Hdr.rc)
2014 && pReq->Hdr.rc != VERR_INTERRUPTED
2015 && pReq->Hdr.rc != VERR_TIMEOUT)
2016 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2017 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2018 else
2019 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2020 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2021 return 0;
2022 }
2023
2024 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
2025 {
2026 /* validate */
2027 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
2028 PSUPVMMR0REQHDR pVMMReq;
2029 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2030 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2031
2032 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
2033 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
2034 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
2035 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
2036 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
2037
2038 /* execute */
2039 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
2040 {
2041 if (pReq->u.In.pVMR0 == NULL)
2042 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2043 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
2044 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
2045 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
2046 else
2047 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
2048 }
2049 else
2050 pReq->Hdr.rc = VERR_WRONG_ORDER;
2051
2052 if ( RT_FAILURE(pReq->Hdr.rc)
2053 && pReq->Hdr.rc != VERR_INTERRUPTED
2054 && pReq->Hdr.rc != VERR_TIMEOUT)
2055 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2056 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2057 else
2058 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2059 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2060 return 0;
2061 }
2062
2063 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
2064 {
2065 /* validate */
2066 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
2067 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
2068
2069 /* execute */
2070 pReq->Hdr.rc = VINF_SUCCESS;
2071 pReq->u.Out.enmMode = SUPR0GetPagingMode();
2072 return 0;
2073 }
2074
2075 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
2076 {
2077 /* validate */
2078 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
2079 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
2080 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
2081
2082 /* execute */
2083 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
2084 if (RT_FAILURE(pReq->Hdr.rc))
2085 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2086 return 0;
2087 }
2088
2089 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
2090 {
2091 /* validate */
2092 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
2093 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
2094
2095 /* execute */
2096 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
2097 return 0;
2098 }
2099
2100 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
2101 {
2102 /* validate */
2103 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
2104 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
2105
2106 /* execute */
2107 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
2108 if (RT_SUCCESS(pReq->Hdr.rc))
2109 pReq->u.Out.pGipR0 = pDevExt->pGip;
2110 return 0;
2111 }
2112
2113 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2114 {
2115 /* validate */
2116 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2117 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2118
2119 /* execute */
2120 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2121 return 0;
2122 }
2123
2124 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2125 {
2126 /* validate */
2127 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2128 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2129 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2130 || ( RT_VALID_PTR(pReq->u.In.pVMR0)
2131 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2132 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2133
2134 /* execute */
2135 RTSpinlockAcquire(pDevExt->Spinlock);
2136 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2137 {
2138 if (pSession->pFastIoCtrlVM == NULL)
2139 {
2140 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2141 RTSpinlockRelease(pDevExt->Spinlock);
2142 pReq->Hdr.rc = VINF_SUCCESS;
2143 }
2144 else
2145 {
2146 RTSpinlockRelease(pDevExt->Spinlock);
2147 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2148 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2149 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2150 }
2151 }
2152 else
2153 {
2154 RTSpinlockRelease(pDevExt->Spinlock);
2155 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2156 pSession->pSessionVM, pReq->u.In.pVMR0));
2157 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2158 }
2159 return 0;
2160 }
2161
2162 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2163 {
2164 /* validate */
2165 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2166 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2167 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2168 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2169 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2170 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2171 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2172 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2173 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2174
2175 /* execute */
2176 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2177 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2178 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2179 &pReq->u.Out.aPages[0]);
2180 if (RT_FAILURE(pReq->Hdr.rc))
2181 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2182 return 0;
2183 }
2184
2185 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2186 {
2187 /* validate */
2188 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2189 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2190 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2191 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2192 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2193 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2194
2195 /* execute */
2196 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2197 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2198 if (RT_FAILURE(pReq->Hdr.rc))
2199 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2200 return 0;
2201 }
2202
2203 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2204 {
2205 /* validate */
2206 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2207 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2208 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2209 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2210 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2211 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2212 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2213
2214 /* execute */
2215 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2216 return 0;
2217 }
2218
2219 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2220 {
2221 /* validate */
2222 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2223 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2224
2225 /* execute */
2226 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2227 return 0;
2228 }
2229
2230 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2231 {
2232 /* validate */
2233 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2234 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2235 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2236
2237 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2238 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2239 else
2240 {
2241 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2242 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2243 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2244 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2245 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2246 }
2247 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2248
2249 /* execute */
2250 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2251 return 0;
2252 }
2253
2254 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2255 {
2256 /* validate */
2257 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2258 size_t cbStrTab;
2259 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2260 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2261 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2262 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2263 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2264 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2265 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2266 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2267 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2268 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2269 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2270
2271 /* execute */
2272 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2273 return 0;
2274 }
2275
2276 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2277 {
2278 /* validate */
2279 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2280 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2281 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2282
2283 /* execute */
2284 switch (pReq->u.In.uType)
2285 {
2286 case SUP_SEM_TYPE_EVENT:
2287 {
2288 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2289 switch (pReq->u.In.uOp)
2290 {
2291 case SUPSEMOP2_WAIT_MS_REL:
2292 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2293 break;
2294 case SUPSEMOP2_WAIT_NS_ABS:
2295 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2296 break;
2297 case SUPSEMOP2_WAIT_NS_REL:
2298 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2299 break;
2300 case SUPSEMOP2_SIGNAL:
2301 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2302 break;
2303 case SUPSEMOP2_CLOSE:
2304 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2305 break;
2306 case SUPSEMOP2_RESET:
2307 default:
2308 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2309 break;
2310 }
2311 break;
2312 }
2313
2314 case SUP_SEM_TYPE_EVENT_MULTI:
2315 {
2316 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2317 switch (pReq->u.In.uOp)
2318 {
2319 case SUPSEMOP2_WAIT_MS_REL:
2320 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2321 break;
2322 case SUPSEMOP2_WAIT_NS_ABS:
2323 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2324 break;
2325 case SUPSEMOP2_WAIT_NS_REL:
2326 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2327 break;
2328 case SUPSEMOP2_SIGNAL:
2329 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2330 break;
2331 case SUPSEMOP2_CLOSE:
2332 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2333 break;
2334 case SUPSEMOP2_RESET:
2335 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2336 break;
2337 default:
2338 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2339 break;
2340 }
2341 break;
2342 }
2343
2344 default:
2345 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2346 break;
2347 }
2348 return 0;
2349 }
2350
2351 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2352 {
2353 /* validate */
2354 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2355 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2356 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2357
2358 /* execute */
2359 switch (pReq->u.In.uType)
2360 {
2361 case SUP_SEM_TYPE_EVENT:
2362 {
2363 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2364 switch (pReq->u.In.uOp)
2365 {
2366 case SUPSEMOP3_CREATE:
2367 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2368 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2369 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2370 break;
2371 case SUPSEMOP3_GET_RESOLUTION:
2372 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2373 pReq->Hdr.rc = VINF_SUCCESS;
2374 pReq->Hdr.cbOut = sizeof(*pReq);
2375 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2376 break;
2377 default:
2378 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2379 break;
2380 }
2381 break;
2382 }
2383
2384 case SUP_SEM_TYPE_EVENT_MULTI:
2385 {
2386 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2387 switch (pReq->u.In.uOp)
2388 {
2389 case SUPSEMOP3_CREATE:
2390 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2391 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2392 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2393 break;
2394 case SUPSEMOP3_GET_RESOLUTION:
2395 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2396 pReq->Hdr.rc = VINF_SUCCESS;
2397 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2398 break;
2399 default:
2400 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2401 break;
2402 }
2403 break;
2404 }
2405
2406 default:
2407 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2408 break;
2409 }
2410 return 0;
2411 }
2412
2413 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2414 {
2415 /* validate */
2416 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2417 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2418
2419 /* execute */
2420 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2421 if (RT_FAILURE(pReq->Hdr.rc))
2422 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2423 return 0;
2424 }
2425
2426 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2427 {
2428 /* validate */
2429 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2430 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2431
2432 /* execute */
2433 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2434 return 0;
2435 }
2436
2437 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2438 {
2439 /* validate */
2440 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2441
2442 /* execute */
2443 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2444 return 0;
2445 }
2446
2447 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2448 {
2449 /* validate */
2450 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2451 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2452
2453 /* execute */
2454 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2455 return 0;
2456 }
2457
2458 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2459 {
2460 /* validate */
2461 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2462 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2463 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2464 return VERR_INVALID_PARAMETER;
2465
2466 /* execute */
2467 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2468 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2469 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2470 pReq->u.In.szName, pReq->u.In.fFlags);
2471 return 0;
2472 }
2473
2474 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2475 {
2476 /* validate */
2477 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2478 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2479
2480 /* execute */
2481 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2482 return 0;
2483 }
2484
2485 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2486 {
2487 /* validate */
2488 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2489 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2490
2491 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2492 pReqHdr->rc = VINF_SUCCESS;
2493 return 0;
2494 }
2495
2496 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2497 {
2498 /* validate */
2499 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2500 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2501 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2502 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2503
2504 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2505 return 0;
2506 }
2507
2508 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2509 {
2510 /* validate */
2511 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2512
2513 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2514 return 0;
2515 }
2516
2517 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2518 {
2519 /* validate */
2520 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2521 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2522
2523 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2524 return 0;
2525 }
2526
2527 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2528 {
2529 /* validate */
2530 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2531 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2532
2533 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2534 return 0;
2535 }
2536
2537 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2538 {
2539 /* validate */
2540 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2541 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2542
2543 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2544 return 0;
2545 }
2546
2547 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2548 {
2549 /* validate */
2550 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2551 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2552
2553 /* execute */
2554 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2555 if (RT_FAILURE(pReq->Hdr.rc))
2556 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2557 return 0;
2558 }
2559
2560 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2561 {
2562 /* validate */
2563 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2564 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2565 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2566 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2567 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2568
2569 /* execute */
2570 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2571 if (RT_FAILURE(pReq->Hdr.rc))
2572 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2573 return 0;
2574 }
2575
2576 default:
2577 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2578 break;
2579 }
2580 return VERR_GENERAL_FAILURE;
2581}
2582
2583
2584/**
2585 * I/O Control inner worker for the restricted operations.
2586 *
2587 * @returns IPRT status code.
2588 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2589 *
2590 * @param uIOCtl Function number.
2591 * @param pDevExt Device extention.
2592 * @param pSession Session data.
2593 * @param pReqHdr The request header.
2594 */
2595static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2596{
2597 /*
2598 * The switch.
2599 */
2600 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2601 {
2602 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2603 {
2604 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2605 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2606 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2607 {
2608 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2609 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2610 return 0;
2611 }
2612
2613 /*
2614 * Match the version.
2615 * The current logic is very simple, match the major interface version.
2616 */
2617 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2618 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2619 {
2620 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2621 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2622 pReq->u.Out.u32Cookie = 0xffffffff;
2623 pReq->u.Out.u32SessionCookie = 0xffffffff;
2624 pReq->u.Out.u32SessionVersion = 0xffffffff;
2625 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2626 pReq->u.Out.pSession = NULL;
2627 pReq->u.Out.cFunctions = 0;
2628 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2629 return 0;
2630 }
2631
2632 /*
2633 * Fill in return data and be gone.
2634 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2635 * u32SessionVersion <= u32ReqVersion!
2636 */
2637 /** @todo Somehow validate the client and negotiate a secure cookie... */
2638 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2639 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2640 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2641 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2642 pReq->u.Out.pSession = NULL;
2643 pReq->u.Out.cFunctions = 0;
2644 pReq->Hdr.rc = VINF_SUCCESS;
2645 return 0;
2646 }
2647
2648 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2649 {
2650 /* validate */
2651 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2652 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2653
2654 /* execute */
2655 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2656 if (RT_FAILURE(pReq->Hdr.rc))
2657 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2658 return 0;
2659 }
2660
2661 default:
2662 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2663 break;
2664 }
2665 return VERR_GENERAL_FAILURE;
2666}
2667
2668
2669/**
2670 * I/O Control worker.
2671 *
2672 * @returns IPRT status code.
2673 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2674 *
2675 * @param uIOCtl Function number.
2676 * @param pDevExt Device extention.
2677 * @param pSession Session data.
2678 * @param pReqHdr The request header.
2679 * @param cbReq The size of the request buffer.
2680 */
2681int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2682{
2683 int rc;
2684 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2685
2686 /*
2687 * Validate the request.
2688 */
2689 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2690 {
2691 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2692 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2693 return VERR_INVALID_PARAMETER;
2694 }
2695 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2696 || pReqHdr->cbIn < sizeof(*pReqHdr)
2697 || pReqHdr->cbIn > cbReq
2698 || pReqHdr->cbOut < sizeof(*pReqHdr)
2699 || pReqHdr->cbOut > cbReq))
2700 {
2701 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2702 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2703 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2704 return VERR_INVALID_PARAMETER;
2705 }
2706 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2707 {
2708 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2709 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2710 return VERR_INVALID_PARAMETER;
2711 }
2712 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2713 {
2714 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2715 {
2716 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2717 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2718 return VERR_INVALID_PARAMETER;
2719 }
2720 }
2721 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2722 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2723 {
2724 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2725 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2726 return VERR_INVALID_PARAMETER;
2727 }
2728
2729 /*
2730 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2731 */
2732 if (pSession->fUnrestricted)
2733 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2734 else
2735 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2736
2737 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2738 return rc;
2739}
2740
2741
2742/**
2743 * Inter-Driver Communication (IDC) worker.
2744 *
2745 * @returns VBox status code.
2746 * @retval VINF_SUCCESS on success.
2747 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2748 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2749 *
2750 * @param uReq The request (function) code.
2751 * @param pDevExt Device extention.
2752 * @param pSession Session data.
2753 * @param pReqHdr The request header.
2754 */
2755int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2756{
2757 /*
2758 * The OS specific code has already validated the pSession
2759 * pointer, and the request size being greater or equal to
2760 * size of the header.
2761 *
2762 * So, just check that pSession is a kernel context session.
2763 */
2764 if (RT_UNLIKELY( pSession
2765 && pSession->R0Process != NIL_RTR0PROCESS))
2766 return VERR_INVALID_PARAMETER;
2767
2768/*
2769 * Validation macro.
2770 */
2771#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2772 do { \
2773 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2774 { \
2775 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2776 (long)pReqHdr->cb, (long)(cbExpect))); \
2777 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2778 } \
2779 } while (0)
2780
2781 switch (uReq)
2782 {
2783 case SUPDRV_IDC_REQ_CONNECT:
2784 {
2785 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2786 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2787
2788 /*
2789 * Validate the cookie and other input.
2790 */
2791 if (pReq->Hdr.pSession != NULL)
2792 {
2793 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2794 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2795 }
2796 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2797 {
2798 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2799 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2800 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2801 }
2802 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2803 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2804 {
2805 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2806 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2807 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2808 }
2809 if (pSession != NULL)
2810 {
2811 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2812 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2813 }
2814
2815 /*
2816 * Match the version.
2817 * The current logic is very simple, match the major interface version.
2818 */
2819 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2820 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2821 {
2822 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2823 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2824 pReq->u.Out.pSession = NULL;
2825 pReq->u.Out.uSessionVersion = 0xffffffff;
2826 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2827 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2828 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2829 return VINF_SUCCESS;
2830 }
2831
2832 pReq->u.Out.pSession = NULL;
2833 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2834 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2835 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2836
2837 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2838 if (RT_FAILURE(pReq->Hdr.rc))
2839 {
2840 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2841 return VINF_SUCCESS;
2842 }
2843
2844 pReq->u.Out.pSession = pSession;
2845 pReq->Hdr.pSession = pSession;
2846
2847 return VINF_SUCCESS;
2848 }
2849
2850 case SUPDRV_IDC_REQ_DISCONNECT:
2851 {
2852 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2853
2854 supdrvSessionRelease(pSession);
2855 return pReqHdr->rc = VINF_SUCCESS;
2856 }
2857
2858 case SUPDRV_IDC_REQ_GET_SYMBOL:
2859 {
2860 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2861 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2862
2863 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2864 return VINF_SUCCESS;
2865 }
2866
2867 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2868 {
2869 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2870 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2871
2872 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2873 return VINF_SUCCESS;
2874 }
2875
2876 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2877 {
2878 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2879 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2880
2881 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2882 return VINF_SUCCESS;
2883 }
2884
2885 default:
2886 Log(("Unknown IDC %#lx\n", (long)uReq));
2887 break;
2888 }
2889
2890#undef REQ_CHECK_IDC_SIZE
2891 return VERR_NOT_SUPPORTED;
2892}
2893
2894
2895/**
2896 * Register a object for reference counting.
2897 * The object is registered with one reference in the specified session.
2898 *
2899 * @returns Unique identifier on success (pointer).
2900 * All future reference must use this identifier.
2901 * @returns NULL on failure.
2902 * @param pSession The caller's session.
2903 * @param enmType The object type.
2904 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2905 * @param pvUser1 The first user argument.
2906 * @param pvUser2 The second user argument.
2907 */
2908SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2909{
2910 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2911 PSUPDRVOBJ pObj;
2912 PSUPDRVUSAGE pUsage;
2913
2914 /*
2915 * Validate the input.
2916 */
2917 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2918 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2919 AssertPtrReturn(pfnDestructor, NULL);
2920
2921 /*
2922 * Allocate and initialize the object.
2923 */
2924 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2925 if (!pObj)
2926 return NULL;
2927 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2928 pObj->enmType = enmType;
2929 pObj->pNext = NULL;
2930 pObj->cUsage = 1;
2931 pObj->pfnDestructor = pfnDestructor;
2932 pObj->pvUser1 = pvUser1;
2933 pObj->pvUser2 = pvUser2;
2934 pObj->CreatorUid = pSession->Uid;
2935 pObj->CreatorGid = pSession->Gid;
2936 pObj->CreatorProcess= pSession->Process;
2937 supdrvOSObjInitCreator(pObj, pSession);
2938
2939 /*
2940 * Allocate the usage record.
2941 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2942 */
2943 RTSpinlockAcquire(pDevExt->Spinlock);
2944
2945 pUsage = pDevExt->pUsageFree;
2946 if (pUsage)
2947 pDevExt->pUsageFree = pUsage->pNext;
2948 else
2949 {
2950 RTSpinlockRelease(pDevExt->Spinlock);
2951 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2952 if (!pUsage)
2953 {
2954 RTMemFree(pObj);
2955 return NULL;
2956 }
2957 RTSpinlockAcquire(pDevExt->Spinlock);
2958 }
2959
2960 /*
2961 * Insert the object and create the session usage record.
2962 */
2963 /* The object. */
2964 pObj->pNext = pDevExt->pObjs;
2965 pDevExt->pObjs = pObj;
2966
2967 /* The session record. */
2968 pUsage->cUsage = 1;
2969 pUsage->pObj = pObj;
2970 pUsage->pNext = pSession->pUsage;
2971 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2972 pSession->pUsage = pUsage;
2973
2974 RTSpinlockRelease(pDevExt->Spinlock);
2975
2976 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2977 return pObj;
2978}
2979SUPR0_EXPORT_SYMBOL(SUPR0ObjRegister);
2980
2981
2982/**
2983 * Increment the reference counter for the object associating the reference
2984 * with the specified session.
2985 *
2986 * @returns IPRT status code.
2987 * @param pvObj The identifier returned by SUPR0ObjRegister().
2988 * @param pSession The session which is referencing the object.
2989 *
2990 * @remarks The caller should not own any spinlocks and must carefully protect
2991 * itself against potential race with the destructor so freed memory
2992 * isn't accessed here.
2993 */
2994SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2995{
2996 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2997}
2998SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRef);
2999
3000
3001/**
3002 * Increment the reference counter for the object associating the reference
3003 * with the specified session.
3004 *
3005 * @returns IPRT status code.
3006 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
3007 * couldn't be allocated. (If you see this you're not doing the right
3008 * thing and it won't ever work reliably.)
3009 *
3010 * @param pvObj The identifier returned by SUPR0ObjRegister().
3011 * @param pSession The session which is referencing the object.
3012 * @param fNoBlocking Set if it's not OK to block. Never try to make the
3013 * first reference to an object in a session with this
3014 * argument set.
3015 *
3016 * @remarks The caller should not own any spinlocks and must carefully protect
3017 * itself against potential race with the destructor so freed memory
3018 * isn't accessed here.
3019 */
3020SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
3021{
3022 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3023 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3024 int rc = VINF_SUCCESS;
3025 PSUPDRVUSAGE pUsagePre;
3026 PSUPDRVUSAGE pUsage;
3027
3028 /*
3029 * Validate the input.
3030 * Be ready for the destruction race (someone might be stuck in the
3031 * destructor waiting a lock we own).
3032 */
3033 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3034 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
3035 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
3036 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
3037 VERR_INVALID_PARAMETER);
3038
3039 RTSpinlockAcquire(pDevExt->Spinlock);
3040
3041 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
3042 {
3043 RTSpinlockRelease(pDevExt->Spinlock);
3044
3045 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
3046 return VERR_WRONG_ORDER;
3047 }
3048
3049 /*
3050 * Preallocate the usage record if we can.
3051 */
3052 pUsagePre = pDevExt->pUsageFree;
3053 if (pUsagePre)
3054 pDevExt->pUsageFree = pUsagePre->pNext;
3055 else if (!fNoBlocking)
3056 {
3057 RTSpinlockRelease(pDevExt->Spinlock);
3058 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
3059 if (!pUsagePre)
3060 return VERR_NO_MEMORY;
3061
3062 RTSpinlockAcquire(pDevExt->Spinlock);
3063 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
3064 {
3065 RTSpinlockRelease(pDevExt->Spinlock);
3066
3067 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
3068 return VERR_WRONG_ORDER;
3069 }
3070 }
3071
3072 /*
3073 * Reference the object.
3074 */
3075 pObj->cUsage++;
3076
3077 /*
3078 * Look for the session record.
3079 */
3080 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
3081 {
3082 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3083 if (pUsage->pObj == pObj)
3084 break;
3085 }
3086 if (pUsage)
3087 pUsage->cUsage++;
3088 else if (pUsagePre)
3089 {
3090 /* create a new session record. */
3091 pUsagePre->cUsage = 1;
3092 pUsagePre->pObj = pObj;
3093 pUsagePre->pNext = pSession->pUsage;
3094 pSession->pUsage = pUsagePre;
3095 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
3096
3097 pUsagePre = NULL;
3098 }
3099 else
3100 {
3101 pObj->cUsage--;
3102 rc = VERR_TRY_AGAIN;
3103 }
3104
3105 /*
3106 * Put any unused usage record into the free list..
3107 */
3108 if (pUsagePre)
3109 {
3110 pUsagePre->pNext = pDevExt->pUsageFree;
3111 pDevExt->pUsageFree = pUsagePre;
3112 }
3113
3114 RTSpinlockRelease(pDevExt->Spinlock);
3115
3116 return rc;
3117}
3118SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRefEx);
3119
3120
3121/**
3122 * Decrement / destroy a reference counter record for an object.
3123 *
3124 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3125 *
3126 * @returns IPRT status code.
3127 * @retval VINF_SUCCESS if not destroyed.
3128 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3129 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3130 * string builds.
3131 *
3132 * @param pvObj The identifier returned by SUPR0ObjRegister().
3133 * @param pSession The session which is referencing the object.
3134 */
3135SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3136{
3137 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3138 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3139 int rc = VERR_INVALID_PARAMETER;
3140 PSUPDRVUSAGE pUsage;
3141 PSUPDRVUSAGE pUsagePrev;
3142
3143 /*
3144 * Validate the input.
3145 */
3146 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3147 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3148 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3149 VERR_INVALID_PARAMETER);
3150
3151 /*
3152 * Acquire the spinlock and look for the usage record.
3153 */
3154 RTSpinlockAcquire(pDevExt->Spinlock);
3155
3156 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3157 pUsage;
3158 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3159 {
3160 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3161 if (pUsage->pObj == pObj)
3162 {
3163 rc = VINF_SUCCESS;
3164 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3165 if (pUsage->cUsage > 1)
3166 {
3167 pObj->cUsage--;
3168 pUsage->cUsage--;
3169 }
3170 else
3171 {
3172 /*
3173 * Free the session record.
3174 */
3175 if (pUsagePrev)
3176 pUsagePrev->pNext = pUsage->pNext;
3177 else
3178 pSession->pUsage = pUsage->pNext;
3179 pUsage->pNext = pDevExt->pUsageFree;
3180 pDevExt->pUsageFree = pUsage;
3181
3182 /* What about the object? */
3183 if (pObj->cUsage > 1)
3184 pObj->cUsage--;
3185 else
3186 {
3187 /*
3188 * Object is to be destroyed, unlink it.
3189 */
3190 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3191 rc = VINF_OBJECT_DESTROYED;
3192 if (pDevExt->pObjs == pObj)
3193 pDevExt->pObjs = pObj->pNext;
3194 else
3195 {
3196 PSUPDRVOBJ pObjPrev;
3197 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3198 if (pObjPrev->pNext == pObj)
3199 {
3200 pObjPrev->pNext = pObj->pNext;
3201 break;
3202 }
3203 Assert(pObjPrev);
3204 }
3205 }
3206 }
3207 break;
3208 }
3209 }
3210
3211 RTSpinlockRelease(pDevExt->Spinlock);
3212
3213 /*
3214 * Call the destructor and free the object if required.
3215 */
3216 if (rc == VINF_OBJECT_DESTROYED)
3217 {
3218 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3219 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3220 if (pObj->pfnDestructor)
3221 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3222 RTMemFree(pObj);
3223 }
3224
3225 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3226 return rc;
3227}
3228SUPR0_EXPORT_SYMBOL(SUPR0ObjRelease);
3229
3230
3231/**
3232 * Verifies that the current process can access the specified object.
3233 *
3234 * @returns The following IPRT status code:
3235 * @retval VINF_SUCCESS if access was granted.
3236 * @retval VERR_PERMISSION_DENIED if denied access.
3237 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3238 *
3239 * @param pvObj The identifier returned by SUPR0ObjRegister().
3240 * @param pSession The session which wishes to access the object.
3241 * @param pszObjName Object string name. This is optional and depends on the object type.
3242 *
3243 * @remark The caller is responsible for making sure the object isn't removed while
3244 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3245 */
3246SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3247{
3248 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3249 int rc;
3250
3251 /*
3252 * Validate the input.
3253 */
3254 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3255 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3256 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3257 VERR_INVALID_PARAMETER);
3258
3259 /*
3260 * Check access. (returns true if a decision has been made.)
3261 */
3262 rc = VERR_INTERNAL_ERROR;
3263 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3264 return rc;
3265
3266 /*
3267 * Default policy is to allow the user to access his own
3268 * stuff but nothing else.
3269 */
3270 if (pObj->CreatorUid == pSession->Uid)
3271 return VINF_SUCCESS;
3272 return VERR_PERMISSION_DENIED;
3273}
3274SUPR0_EXPORT_SYMBOL(SUPR0ObjVerifyAccess);
3275
3276
3277/**
3278 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3279 *
3280 * @returns The associated VM pointer.
3281 * @param pSession The session of the current thread.
3282 */
3283SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3284{
3285 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3286 return pSession->pSessionVM;
3287}
3288SUPR0_EXPORT_SYMBOL(SUPR0GetSessionVM);
3289
3290
3291/**
3292 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3293 *
3294 * @returns The associated GVM pointer.
3295 * @param pSession The session of the current thread.
3296 */
3297SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3298{
3299 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3300 return pSession->pSessionGVM;
3301}
3302SUPR0_EXPORT_SYMBOL(SUPR0GetSessionGVM);
3303
3304
3305/**
3306 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3307 *
3308 * This will fail if there is already a VM associated with the session and pVM
3309 * isn't NULL.
3310 *
3311 * @retval VINF_SUCCESS
3312 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3313 * session.
3314 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3315 * the session is invalid.
3316 *
3317 * @param pSession The session of the current thread.
3318 * @param pGVM The GVM to associate with the session. Pass NULL to
3319 * dissassociate.
3320 * @param pVM The VM to associate with the session. Pass NULL to
3321 * dissassociate.
3322 */
3323SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3324{
3325 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3326 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3327
3328 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3329 if (pGVM)
3330 {
3331 if (!pSession->pSessionGVM)
3332 {
3333 pSession->pSessionGVM = pGVM;
3334 pSession->pSessionVM = pVM;
3335 pSession->pFastIoCtrlVM = NULL;
3336 }
3337 else
3338 {
3339 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3340 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3341 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3342 return VERR_ALREADY_EXISTS;
3343 }
3344 }
3345 else
3346 {
3347 pSession->pSessionGVM = NULL;
3348 pSession->pSessionVM = NULL;
3349 pSession->pFastIoCtrlVM = NULL;
3350 }
3351 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3352 return VINF_SUCCESS;
3353}
3354SUPR0_EXPORT_SYMBOL(SUPR0SetSessionVM);
3355
3356
3357/**
3358 * For getting SUPDRVSESSION::Uid.
3359 *
3360 * @returns The session UID. NIL_RTUID if invalid pointer or not successfully
3361 * set by the host code.
3362 * @param pSession The session of the current thread.
3363 */
3364SUPR0DECL(RTUID) SUPR0GetSessionUid(PSUPDRVSESSION pSession)
3365{
3366 AssertReturn(SUP_IS_SESSION_VALID(pSession), NIL_RTUID);
3367 return pSession->Uid;
3368}
3369SUPR0_EXPORT_SYMBOL(SUPR0GetSessionUid);
3370
3371
3372/** @copydoc RTLogDefaultInstanceEx
3373 * @remarks To allow overriding RTLogDefaultInstanceEx locally. */
3374SUPR0DECL(struct RTLOGGER *) SUPR0DefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3375{
3376 return RTLogDefaultInstanceEx(fFlagsAndGroup);
3377}
3378SUPR0_EXPORT_SYMBOL(SUPR0DefaultLogInstanceEx);
3379
3380
3381/** @copydoc RTLogGetDefaultInstanceEx
3382 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3383SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3384{
3385 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3386}
3387SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogInstanceEx);
3388
3389
3390/** @copydoc RTLogRelGetDefaultInstanceEx
3391 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3392SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3393{
3394 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3395}
3396SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogRelInstanceEx);
3397
3398
3399/**
3400 * Lock pages.
3401 *
3402 * @returns IPRT status code.
3403 * @param pSession Session to which the locked memory should be associated.
3404 * @param pvR3 Start of the memory range to lock.
3405 * This must be page aligned.
3406 * @param cPages Number of pages to lock.
3407 * @param paPages Where to put the physical addresses of locked memory.
3408 */
3409SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3410{
3411 int rc;
3412 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3413 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3414 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3415
3416 /*
3417 * Verify input.
3418 */
3419 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3420 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3421 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3422 || !pvR3)
3423 {
3424 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3425 return VERR_INVALID_PARAMETER;
3426 }
3427
3428 /*
3429 * Let IPRT do the job.
3430 */
3431 Mem.eType = MEMREF_TYPE_LOCKED;
3432 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3433 if (RT_SUCCESS(rc))
3434 {
3435 uint32_t iPage = cPages;
3436 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3437 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3438
3439 while (iPage-- > 0)
3440 {
3441 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3442 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3443 {
3444 AssertMsgFailed(("iPage=%d\n", iPage));
3445 rc = VERR_INTERNAL_ERROR;
3446 break;
3447 }
3448 }
3449 if (RT_SUCCESS(rc))
3450 rc = supdrvMemAdd(&Mem, pSession);
3451 if (RT_FAILURE(rc))
3452 {
3453 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3454 AssertRC(rc2);
3455 }
3456 }
3457
3458 return rc;
3459}
3460SUPR0_EXPORT_SYMBOL(SUPR0LockMem);
3461
3462
3463/**
3464 * Unlocks the memory pointed to by pv.
3465 *
3466 * @returns IPRT status code.
3467 * @param pSession Session to which the memory was locked.
3468 * @param pvR3 Memory to unlock.
3469 */
3470SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3471{
3472 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3473 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3474 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3475}
3476SUPR0_EXPORT_SYMBOL(SUPR0UnlockMem);
3477
3478
3479/**
3480 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3481 * backing.
3482 *
3483 * @returns IPRT status code.
3484 * @param pSession Session data.
3485 * @param cPages Number of pages to allocate.
3486 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3487 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3488 * @param pHCPhys Where to put the physical address of allocated memory.
3489 */
3490SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3491{
3492 int rc;
3493 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3494 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3495
3496 /*
3497 * Validate input.
3498 */
3499 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3500 if (!ppvR3 || !ppvR0 || !pHCPhys)
3501 {
3502 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3503 pSession, ppvR0, ppvR3, pHCPhys));
3504 return VERR_INVALID_PARAMETER;
3505
3506 }
3507 if (cPages < 1 || cPages >= 256)
3508 {
3509 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3510 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3511 }
3512
3513 /*
3514 * Let IPRT do the job.
3515 */
3516 /** @todo Is the 4GiB requirement actually necessray? */
3517 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, _4G-1 /*PhysHighest*/, true /* executable R0 mapping */);
3518 if (RT_SUCCESS(rc))
3519 {
3520 int rc2;
3521 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3522 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3523 if (RT_SUCCESS(rc))
3524 {
3525 Mem.eType = MEMREF_TYPE_CONT;
3526 rc = supdrvMemAdd(&Mem, pSession);
3527 if (!rc)
3528 {
3529 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3530 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3531 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3532 return 0;
3533 }
3534
3535 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3536 AssertRC(rc2);
3537 }
3538 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3539 AssertRC(rc2);
3540 }
3541
3542 return rc;
3543}
3544SUPR0_EXPORT_SYMBOL(SUPR0ContAlloc);
3545
3546
3547/**
3548 * Frees memory allocated using SUPR0ContAlloc().
3549 *
3550 * @returns IPRT status code.
3551 * @param pSession The session to which the memory was allocated.
3552 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3553 */
3554SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3555{
3556 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3557 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3558 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3559}
3560SUPR0_EXPORT_SYMBOL(SUPR0ContFree);
3561
3562
3563/**
3564 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3565 *
3566 * The memory isn't zeroed.
3567 *
3568 * @returns IPRT status code.
3569 * @param pSession Session data.
3570 * @param cPages Number of pages to allocate.
3571 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3572 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3573 * @param paPages Where to put the physical addresses of allocated memory.
3574 */
3575SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3576{
3577 unsigned iPage;
3578 int rc;
3579 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3580 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3581
3582 /*
3583 * Validate input.
3584 */
3585 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3586 if (!ppvR3 || !ppvR0 || !paPages)
3587 {
3588 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3589 pSession, ppvR3, ppvR0, paPages));
3590 return VERR_INVALID_PARAMETER;
3591
3592 }
3593 if (cPages < 1 || cPages >= 256)
3594 {
3595 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3596 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3597 }
3598
3599 /*
3600 * Let IPRT do the work.
3601 */
3602 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3603 if (RT_SUCCESS(rc))
3604 {
3605 int rc2;
3606 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3607 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3608 if (RT_SUCCESS(rc))
3609 {
3610 Mem.eType = MEMREF_TYPE_LOW;
3611 rc = supdrvMemAdd(&Mem, pSession);
3612 if (!rc)
3613 {
3614 for (iPage = 0; iPage < cPages; iPage++)
3615 {
3616 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3617 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3618 }
3619 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3620 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3621 return 0;
3622 }
3623
3624 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3625 AssertRC(rc2);
3626 }
3627
3628 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3629 AssertRC(rc2);
3630 }
3631
3632 return rc;
3633}
3634SUPR0_EXPORT_SYMBOL(SUPR0LowAlloc);
3635
3636
3637/**
3638 * Frees memory allocated using SUPR0LowAlloc().
3639 *
3640 * @returns IPRT status code.
3641 * @param pSession The session to which the memory was allocated.
3642 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3643 */
3644SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3645{
3646 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3647 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3648 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3649}
3650SUPR0_EXPORT_SYMBOL(SUPR0LowFree);
3651
3652
3653
3654/**
3655 * Allocates a chunk of memory with both R0 and R3 mappings.
3656 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3657 *
3658 * @returns IPRT status code.
3659 * @param pSession The session to associated the allocation with.
3660 * @param cb Number of bytes to allocate.
3661 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3662 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3663 */
3664SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3665{
3666 int rc;
3667 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3668 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3669
3670 /*
3671 * Validate input.
3672 */
3673 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3674 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3675 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3676 if (cb < 1 || cb >= _4M)
3677 {
3678 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3679 return VERR_INVALID_PARAMETER;
3680 }
3681
3682 /*
3683 * Let IPRT do the work.
3684 */
3685 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3686 if (RT_SUCCESS(rc))
3687 {
3688 int rc2;
3689 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3690 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3691 if (RT_SUCCESS(rc))
3692 {
3693 Mem.eType = MEMREF_TYPE_MEM;
3694 rc = supdrvMemAdd(&Mem, pSession);
3695 if (!rc)
3696 {
3697 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3698 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3699 return VINF_SUCCESS;
3700 }
3701
3702 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3703 AssertRC(rc2);
3704 }
3705
3706 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3707 AssertRC(rc2);
3708 }
3709
3710 return rc;
3711}
3712SUPR0_EXPORT_SYMBOL(SUPR0MemAlloc);
3713
3714
3715/**
3716 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3717 *
3718 * @returns IPRT status code.
3719 * @param pSession The session to which the memory was allocated.
3720 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3721 * @param paPages Where to store the physical addresses.
3722 */
3723SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3724{
3725 PSUPDRVBUNDLE pBundle;
3726 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3727
3728 /*
3729 * Validate input.
3730 */
3731 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3732 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3733 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3734
3735 /*
3736 * Search for the address.
3737 */
3738 RTSpinlockAcquire(pSession->Spinlock);
3739 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3740 {
3741 if (pBundle->cUsed > 0)
3742 {
3743 unsigned i;
3744 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3745 {
3746 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3747 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3748 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3749 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3750 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3751 )
3752 )
3753 {
3754 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3755 size_t iPage;
3756 for (iPage = 0; iPage < cPages; iPage++)
3757 {
3758 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3759 paPages[iPage].uReserved = 0;
3760 }
3761 RTSpinlockRelease(pSession->Spinlock);
3762 return VINF_SUCCESS;
3763 }
3764 }
3765 }
3766 }
3767 RTSpinlockRelease(pSession->Spinlock);
3768 Log(("Failed to find %p!!!\n", (void *)uPtr));
3769 return VERR_INVALID_PARAMETER;
3770}
3771SUPR0_EXPORT_SYMBOL(SUPR0MemGetPhys);
3772
3773
3774/**
3775 * Free memory allocated by SUPR0MemAlloc().
3776 *
3777 * @returns IPRT status code.
3778 * @param pSession The session owning the allocation.
3779 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3780 */
3781SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3782{
3783 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3784 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3785 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3786}
3787SUPR0_EXPORT_SYMBOL(SUPR0MemFree);
3788
3789
3790/**
3791 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3792 *
3793 * The memory is fixed and it's possible to query the physical addresses using
3794 * SUPR0MemGetPhys().
3795 *
3796 * @returns IPRT status code.
3797 * @param pSession The session to associated the allocation with.
3798 * @param cPages The number of pages to allocate.
3799 * @param fFlags Flags, reserved for the future. Must be zero.
3800 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3801 * NULL if no ring-3 mapping.
3802 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3803 * NULL if no ring-0 mapping.
3804 * @param paPages Where to store the addresses of the pages. Optional.
3805 */
3806SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3807{
3808 int rc;
3809 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3810 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3811
3812 /*
3813 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3814 */
3815 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3816 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3817 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3818 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3819 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3820 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3821 {
3822 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3823 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3824 }
3825
3826 /*
3827 * Let IPRT do the work.
3828 */
3829 if (ppvR0)
3830 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, false /*fExecutable*/);
3831 else
3832 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3833 if (RT_SUCCESS(rc))
3834 {
3835 int rc2;
3836 if (ppvR3)
3837 {
3838 /* Make sure memory mapped into ring-3 is zero initialized if we can: */
3839 if ( ppvR0
3840 && !RTR0MemObjWasZeroInitialized(Mem.MemObj))
3841 {
3842 void *pv = RTR0MemObjAddress(Mem.MemObj);
3843 Assert(pv || !ppvR0);
3844 if (pv)
3845 RT_BZERO(pv, (size_t)cPages * PAGE_SIZE);
3846 }
3847
3848 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3849 }
3850 else
3851 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3852 if (RT_SUCCESS(rc))
3853 {
3854 Mem.eType = MEMREF_TYPE_PAGE;
3855 rc = supdrvMemAdd(&Mem, pSession);
3856 if (!rc)
3857 {
3858 if (ppvR3)
3859 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3860 if (ppvR0)
3861 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3862 if (paPages)
3863 {
3864 uint32_t iPage = cPages;
3865 while (iPage-- > 0)
3866 {
3867 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3868 Assert(paPages[iPage] != NIL_RTHCPHYS);
3869 }
3870 }
3871 return VINF_SUCCESS;
3872 }
3873
3874 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3875 AssertRC(rc2);
3876 }
3877
3878 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3879 AssertRC(rc2);
3880 }
3881 return rc;
3882}
3883SUPR0_EXPORT_SYMBOL(SUPR0PageAllocEx);
3884
3885
3886/**
3887 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3888 * space.
3889 *
3890 * @returns IPRT status code.
3891 * @param pSession The session to associated the allocation with.
3892 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3893 * @param offSub Where to start mapping. Must be page aligned.
3894 * @param cbSub How much to map. Must be page aligned.
3895 * @param fFlags Flags, MBZ.
3896 * @param ppvR0 Where to return the address of the ring-0 mapping on
3897 * success.
3898 */
3899SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3900 uint32_t fFlags, PRTR0PTR ppvR0)
3901{
3902 int rc;
3903 PSUPDRVBUNDLE pBundle;
3904 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3905 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3906
3907 /*
3908 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3909 */
3910 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3911 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3912 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3913 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3914 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3915 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3916
3917 /*
3918 * Find the memory object.
3919 */
3920 RTSpinlockAcquire(pSession->Spinlock);
3921 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3922 {
3923 if (pBundle->cUsed > 0)
3924 {
3925 unsigned i;
3926 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3927 {
3928 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3929 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3930 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3931 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3932 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3933 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3934 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3935 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3936 {
3937 hMemObj = pBundle->aMem[i].MemObj;
3938 break;
3939 }
3940 }
3941 }
3942 }
3943 RTSpinlockRelease(pSession->Spinlock);
3944
3945 rc = VERR_INVALID_PARAMETER;
3946 if (hMemObj != NIL_RTR0MEMOBJ)
3947 {
3948 /*
3949 * Do some further input validations before calling IPRT.
3950 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3951 */
3952 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3953 if ( offSub < cbMemObj
3954 && cbSub <= cbMemObj
3955 && offSub + cbSub <= cbMemObj)
3956 {
3957 RTR0MEMOBJ hMapObj;
3958 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3959 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3960 if (RT_SUCCESS(rc))
3961 *ppvR0 = RTR0MemObjAddress(hMapObj);
3962 }
3963 else
3964 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3965
3966 }
3967 return rc;
3968}
3969SUPR0_EXPORT_SYMBOL(SUPR0PageMapKernel);
3970
3971
3972/**
3973 * Changes the page level protection of one or more pages previously allocated
3974 * by SUPR0PageAllocEx.
3975 *
3976 * @returns IPRT status code.
3977 * @param pSession The session to associated the allocation with.
3978 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3979 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3980 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3981 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3982 * @param offSub Where to start changing. Must be page aligned.
3983 * @param cbSub How much to change. Must be page aligned.
3984 * @param fProt The new page level protection, see RTMEM_PROT_*.
3985 */
3986SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3987{
3988 int rc;
3989 PSUPDRVBUNDLE pBundle;
3990 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3991 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3992 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3993
3994 /*
3995 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3996 */
3997 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3998 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3999 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4000 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4001 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
4002
4003 /*
4004 * Find the memory object.
4005 */
4006 RTSpinlockAcquire(pSession->Spinlock);
4007 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4008 {
4009 if (pBundle->cUsed > 0)
4010 {
4011 unsigned i;
4012 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4013 {
4014 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
4015 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
4016 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
4017 || pvR3 == NIL_RTR3PTR)
4018 && ( pvR0 == NIL_RTR0PTR
4019 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
4020 && ( pvR3 == NIL_RTR3PTR
4021 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
4022 {
4023 if (pvR0 != NIL_RTR0PTR)
4024 hMemObjR0 = pBundle->aMem[i].MemObj;
4025 if (pvR3 != NIL_RTR3PTR)
4026 hMemObjR3 = pBundle->aMem[i].MapObjR3;
4027 break;
4028 }
4029 }
4030 }
4031 }
4032 RTSpinlockRelease(pSession->Spinlock);
4033
4034 rc = VERR_INVALID_PARAMETER;
4035 if ( hMemObjR0 != NIL_RTR0MEMOBJ
4036 || hMemObjR3 != NIL_RTR0MEMOBJ)
4037 {
4038 /*
4039 * Do some further input validations before calling IPRT.
4040 */
4041 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
4042 if ( offSub < cbMemObj
4043 && cbSub <= cbMemObj
4044 && offSub + cbSub <= cbMemObj)
4045 {
4046 rc = VINF_SUCCESS;
4047 if (hMemObjR3 != NIL_RTR0PTR)
4048 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
4049 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
4050 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
4051 }
4052 else
4053 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
4054
4055 }
4056 return rc;
4057
4058}
4059SUPR0_EXPORT_SYMBOL(SUPR0PageProtect);
4060
4061
4062/**
4063 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
4064 *
4065 * @returns IPRT status code.
4066 * @param pSession The session owning the allocation.
4067 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
4068 * SUPR0PageAllocEx().
4069 */
4070SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
4071{
4072 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
4073 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4074 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
4075}
4076SUPR0_EXPORT_SYMBOL(SUPR0PageFree);
4077
4078
4079/**
4080 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
4081 *
4082 * @param pDevExt The device extension.
4083 * @param pszFile The source file where the caller detected the bad
4084 * context.
4085 * @param uLine The line number in @a pszFile.
4086 * @param pszExtra Optional additional message to give further hints.
4087 */
4088void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
4089{
4090 uint32_t cCalls;
4091
4092 /*
4093 * Shorten the filename before displaying the message.
4094 */
4095 for (;;)
4096 {
4097 const char *pszTmp = strchr(pszFile, '/');
4098 if (!pszTmp)
4099 pszTmp = strchr(pszFile, '\\');
4100 if (!pszTmp)
4101 break;
4102 pszFile = pszTmp + 1;
4103 }
4104 if (RT_VALID_PTR(pszExtra) && *pszExtra)
4105 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
4106 else
4107 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
4108
4109 /*
4110 * Record the incident so that we stand a chance of blocking I/O controls
4111 * before panicing the system.
4112 */
4113 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
4114 if (cCalls > UINT32_MAX - _1K)
4115 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
4116}
4117
4118
4119/**
4120 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
4121 *
4122 * @param pSession The session of the caller.
4123 * @param pszFile The source file where the caller detected the bad
4124 * context.
4125 * @param uLine The line number in @a pszFile.
4126 * @param pszExtra Optional additional message to give further hints.
4127 */
4128SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
4129{
4130 PSUPDRVDEVEXT pDevExt;
4131
4132 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
4133 pDevExt = pSession->pDevExt;
4134
4135 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
4136}
4137SUPR0_EXPORT_SYMBOL(SUPR0BadContext);
4138
4139
4140/**
4141 * Gets the paging mode of the current CPU.
4142 *
4143 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4144 */
4145SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4146{
4147#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4148 SUPPAGINGMODE enmMode;
4149
4150 RTR0UINTREG cr0 = ASMGetCR0();
4151 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4152 enmMode = SUPPAGINGMODE_INVALID;
4153 else
4154 {
4155 RTR0UINTREG cr4 = ASMGetCR4();
4156 uint32_t fNXEPlusLMA = 0;
4157 if (cr4 & X86_CR4_PAE)
4158 {
4159 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
4160 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
4161 {
4162 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4163 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4164 fNXEPlusLMA |= RT_BIT(0);
4165 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4166 fNXEPlusLMA |= RT_BIT(1);
4167 }
4168 }
4169
4170 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4171 {
4172 case 0:
4173 enmMode = SUPPAGINGMODE_32_BIT;
4174 break;
4175
4176 case X86_CR4_PGE:
4177 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4178 break;
4179
4180 case X86_CR4_PAE:
4181 enmMode = SUPPAGINGMODE_PAE;
4182 break;
4183
4184 case X86_CR4_PAE | RT_BIT(0):
4185 enmMode = SUPPAGINGMODE_PAE_NX;
4186 break;
4187
4188 case X86_CR4_PAE | X86_CR4_PGE:
4189 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4190 break;
4191
4192 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4193 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4194 break;
4195
4196 case RT_BIT(1) | X86_CR4_PAE:
4197 enmMode = SUPPAGINGMODE_AMD64;
4198 break;
4199
4200 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4201 enmMode = SUPPAGINGMODE_AMD64_NX;
4202 break;
4203
4204 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4205 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4206 break;
4207
4208 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4209 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4210 break;
4211
4212 default:
4213 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4214 enmMode = SUPPAGINGMODE_INVALID;
4215 break;
4216 }
4217 }
4218 return enmMode;
4219#else
4220 /** @todo portme? */
4221 return SUPPAGINGMODE_INVALID;
4222#endif
4223}
4224SUPR0_EXPORT_SYMBOL(SUPR0GetPagingMode);
4225
4226
4227/**
4228 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4229 *
4230 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4231 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4232 * for code with interrupts enabled.
4233 *
4234 * @returns the old CR4 value.
4235 *
4236 * @param fOrMask bits to be set in CR4.
4237 * @param fAndMask bits to be cleard in CR4.
4238 *
4239 * @remarks Must be called with preemption/interrupts disabled.
4240 */
4241SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4242{
4243#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4244# ifdef RT_OS_LINUX
4245 return supdrvOSChangeCR4(fOrMask, fAndMask);
4246# else
4247 RTCCUINTREG uOld = ASMGetCR4();
4248 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4249 if (uNew != uOld)
4250 ASMSetCR4(uNew);
4251 return uOld;
4252# endif
4253#else
4254 RT_NOREF(fOrMask, fAndMask);
4255 return RTCCUINTREG_MAX;
4256#endif
4257}
4258SUPR0_EXPORT_SYMBOL(SUPR0ChangeCR4);
4259
4260
4261/**
4262 * Enables or disabled hardware virtualization extensions using native OS APIs.
4263 *
4264 * @returns VBox status code.
4265 * @retval VINF_SUCCESS on success.
4266 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4267 *
4268 * @param fEnable Whether to enable or disable.
4269 */
4270SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4271{
4272#if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4273 return supdrvOSEnableVTx(fEnable);
4274#else
4275 RT_NOREF1(fEnable);
4276 return VERR_NOT_SUPPORTED;
4277#endif
4278}
4279SUPR0_EXPORT_SYMBOL(SUPR0EnableVTx);
4280
4281
4282/**
4283 * Suspends hardware virtualization extensions using the native OS API.
4284 *
4285 * This is called prior to entering raw-mode context.
4286 *
4287 * @returns @c true if suspended, @c false if not.
4288 */
4289SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4290{
4291#if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4292 return supdrvOSSuspendVTxOnCpu();
4293#else
4294 return false;
4295#endif
4296}
4297SUPR0_EXPORT_SYMBOL(SUPR0SuspendVTxOnCpu);
4298
4299
4300/**
4301 * Resumes hardware virtualization extensions using the native OS API.
4302 *
4303 * This is called after to entering raw-mode context.
4304 *
4305 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4306 */
4307SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4308{
4309#if defined(RT_OS_DARWIN) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4310 supdrvOSResumeVTxOnCpu(fSuspended);
4311#else
4312 RT_NOREF1(fSuspended);
4313 Assert(!fSuspended);
4314#endif
4315}
4316SUPR0_EXPORT_SYMBOL(SUPR0ResumeVTxOnCpu);
4317
4318
4319SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4320{
4321#if defined(RT_OS_LINUX) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86))
4322 return supdrvOSGetCurrentGdtRw(pGdtRw);
4323#else
4324 NOREF(pGdtRw);
4325 return VERR_NOT_IMPLEMENTED;
4326#endif
4327}
4328SUPR0_EXPORT_SYMBOL(SUPR0GetCurrentGdtRw);
4329
4330
4331/**
4332 * Gets AMD-V and VT-x support for the calling CPU.
4333 *
4334 * @returns VBox status code.
4335 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4336 * (SUPVTCAPS_AMD_V) is supported.
4337 */
4338SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4339{
4340 Assert(pfCaps);
4341 *pfCaps = 0;
4342
4343#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4344 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4345 if (ASMHasCpuId())
4346 {
4347 /* Check the range of standard CPUID leafs. */
4348 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4349 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4350 if (RTX86IsValidStdRange(uMaxLeaf))
4351 {
4352 /* Query the standard CPUID leaf. */
4353 uint32_t fFeatEcx, fFeatEdx, uDummy;
4354 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4355
4356 /* Check if the vendor is Intel (or compatible). */
4357 if ( RTX86IsIntelCpu(uVendorEbx, uVendorEcx, uVendorEdx)
4358 || RTX86IsViaCentaurCpu(uVendorEbx, uVendorEcx, uVendorEdx)
4359 || RTX86IsShanghaiCpu(uVendorEbx, uVendorEcx, uVendorEdx))
4360 {
4361 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4362 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4363 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4364 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4365 {
4366 *pfCaps = SUPVTCAPS_VT_X;
4367 return VINF_SUCCESS;
4368 }
4369 return VERR_VMX_NO_VMX;
4370 }
4371
4372 /* Check if the vendor is AMD (or compatible). */
4373 if ( RTX86IsAmdCpu(uVendorEbx, uVendorEcx, uVendorEdx)
4374 || RTX86IsHygonCpu(uVendorEbx, uVendorEcx, uVendorEdx))
4375 {
4376 uint32_t fExtFeatEcx, uExtMaxId;
4377 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4378 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4379
4380 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4381 if ( RTX86IsValidExtRange(uExtMaxId)
4382 && uExtMaxId >= 0x8000000a
4383 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4384 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4385 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4386 {
4387 *pfCaps = SUPVTCAPS_AMD_V;
4388 return VINF_SUCCESS;
4389 }
4390 return VERR_SVM_NO_SVM;
4391 }
4392 }
4393 }
4394#endif
4395 return VERR_UNSUPPORTED_CPU;
4396}
4397SUPR0_EXPORT_SYMBOL(SUPR0GetVTSupport);
4398
4399
4400/**
4401 * Checks if Intel VT-x feature is usable on this CPU.
4402 *
4403 * @returns VBox status code.
4404 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4405 * ambiguity that makes us unsure whether we
4406 * really can use VT-x or not.
4407 *
4408 * @remarks Must be called with preemption disabled.
4409 * The caller is also expected to check that the CPU is an Intel (or
4410 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4411 * function might throw a \#GP fault as it tries to read/write MSRs
4412 * that may not be present!
4413 */
4414SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4415{
4416#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4417 uint64_t fFeatMsr;
4418 bool fMaybeSmxMode;
4419 bool fMsrLocked;
4420 bool fSmxVmxAllowed;
4421 bool fVmxAllowed;
4422 bool fIsSmxModeAmbiguous;
4423 int rc;
4424
4425 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4426
4427 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4428 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4429 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4430 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4431 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4432 fIsSmxModeAmbiguous = false;
4433 rc = VERR_INTERNAL_ERROR_5;
4434
4435 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4436 if (fMsrLocked)
4437 {
4438 if (fVmxAllowed && fSmxVmxAllowed)
4439 rc = VINF_SUCCESS;
4440 else if (!fVmxAllowed && !fSmxVmxAllowed)
4441 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4442 else if (!fMaybeSmxMode)
4443 {
4444 if (fVmxAllowed)
4445 rc = VINF_SUCCESS;
4446 else
4447 rc = VERR_VMX_MSR_VMX_DISABLED;
4448 }
4449 else
4450 {
4451 /*
4452 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4453 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4454 * See @bugref{6873}.
4455 */
4456 Assert(fMaybeSmxMode == true);
4457 fIsSmxModeAmbiguous = true;
4458 rc = VINF_SUCCESS;
4459 }
4460 }
4461 else
4462 {
4463 /*
4464 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4465 * this MSR can no longer be modified.
4466 *
4467 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4468 * accurately. See @bugref{6873}.
4469 *
4470 * We need to check for SMX hardware support here, before writing the MSR as
4471 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4472 * for it.
4473 */
4474 uint32_t fFeaturesECX, uDummy;
4475#ifdef VBOX_STRICT
4476 /* Callers should have verified these at some point. */
4477 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4478 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4479 Assert(RTX86IsValidStdRange(uMaxId));
4480 Assert( RTX86IsIntelCpu( uVendorEBX, uVendorECX, uVendorEDX)
4481 || RTX86IsViaCentaurCpu(uVendorEBX, uVendorECX, uVendorEDX)
4482 || RTX86IsShanghaiCpu( uVendorEBX, uVendorECX, uVendorEDX));
4483#endif
4484 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4485 bool fSmxVmxHwSupport = false;
4486 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4487 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4488 fSmxVmxHwSupport = true;
4489
4490 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4491 | MSR_IA32_FEATURE_CONTROL_VMXON;
4492 if (fSmxVmxHwSupport)
4493 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4494
4495 /*
4496 * Commit.
4497 */
4498 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4499
4500 /*
4501 * Verify.
4502 */
4503 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4504 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4505 if (fMsrLocked)
4506 {
4507 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4508 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4509 if ( fVmxAllowed
4510 && ( !fSmxVmxHwSupport
4511 || fSmxVmxAllowed))
4512 rc = VINF_SUCCESS;
4513 else
4514 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4515 }
4516 else
4517 rc = VERR_VMX_MSR_LOCKING_FAILED;
4518 }
4519
4520 if (pfIsSmxModeAmbiguous)
4521 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4522
4523 return rc;
4524
4525#else /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4526 if (pfIsSmxModeAmbiguous)
4527 *pfIsSmxModeAmbiguous = false;
4528 return VERR_UNSUPPORTED_CPU;
4529#endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4530}
4531SUPR0_EXPORT_SYMBOL(SUPR0GetVmxUsability);
4532
4533
4534/**
4535 * Checks if AMD-V SVM feature is usable on this CPU.
4536 *
4537 * @returns VBox status code.
4538 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4539 *
4540 * @remarks Must be called with preemption disabled.
4541 */
4542SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4543{
4544#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4545 int rc;
4546 uint64_t fVmCr;
4547 uint64_t fEfer;
4548
4549 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4550 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4551 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4552 {
4553 rc = VINF_SUCCESS;
4554 if (fInitSvm)
4555 {
4556 /* Turn on SVM in the EFER MSR. */
4557 fEfer = ASMRdMsr(MSR_K6_EFER);
4558 if (fEfer & MSR_K6_EFER_SVME)
4559 rc = VERR_SVM_IN_USE;
4560 else
4561 {
4562 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4563
4564 /* Paranoia. */
4565 fEfer = ASMRdMsr(MSR_K6_EFER);
4566 if (fEfer & MSR_K6_EFER_SVME)
4567 {
4568 /* Restore previous value. */
4569 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4570 }
4571 else
4572 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4573 }
4574 }
4575 }
4576 else
4577 rc = VERR_SVM_DISABLED;
4578 return rc;
4579
4580#else /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4581 RT_NOREF(fInitSvm);
4582 return VERR_UNSUPPORTED_CPU;
4583#endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4584}
4585SUPR0_EXPORT_SYMBOL(SUPR0GetSvmUsability);
4586
4587
4588#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4589/**
4590 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4591 *
4592 * @returns VBox status code.
4593 * @retval VERR_VMX_NO_VMX
4594 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4595 * @retval VERR_VMX_MSR_VMX_DISABLED
4596 * @retval VERR_VMX_MSR_LOCKING_FAILED
4597 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4598 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4599 * @retval VERR_SVM_NO_SVM
4600 * @retval VERR_SVM_DISABLED
4601 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4602 * (centaur)/Shanghai CPU.
4603 *
4604 * @param pfCaps Where to store the capabilities.
4605 */
4606int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4607{
4608 int rc = VERR_UNSUPPORTED_CPU;
4609 bool fIsSmxModeAmbiguous = false;
4610 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4611
4612 /*
4613 * Input validation.
4614 */
4615 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4616 *pfCaps = 0;
4617
4618 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4619 RTThreadPreemptDisable(&PreemptState);
4620
4621 /* Check if VT-x/AMD-V is supported. */
4622 rc = SUPR0GetVTSupport(pfCaps);
4623 if (RT_SUCCESS(rc))
4624 {
4625 /* Check if VT-x is supported. */
4626 if (*pfCaps & SUPVTCAPS_VT_X)
4627 {
4628 /* Check if VT-x is usable. */
4629 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4630 if (RT_SUCCESS(rc))
4631 {
4632 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4633 VMXCTLSMSR vtCaps;
4634 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4635 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4636 {
4637 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4638 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4639 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4640 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4641 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4642 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4643 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4644 }
4645 }
4646 }
4647 /* Check if AMD-V is supported. */
4648 else if (*pfCaps & SUPVTCAPS_AMD_V)
4649 {
4650 /* Check is SVM is usable. */
4651 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4652 if (RT_SUCCESS(rc))
4653 {
4654 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4655 uint32_t uDummy, fSvmFeatures;
4656 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4657 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4658 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4659 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD)
4660 *pfCaps |= SUPVTCAPS_AMDV_VIRT_VMSAVE_VMLOAD;
4661 }
4662 }
4663 }
4664
4665 /* Restore preemption. */
4666 RTThreadPreemptRestore(&PreemptState);
4667
4668 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4669 if (fIsSmxModeAmbiguous)
4670 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4671
4672 return rc;
4673}
4674#endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4675
4676
4677/**
4678 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4679 *
4680 * @returns VBox status code.
4681 * @retval VERR_VMX_NO_VMX
4682 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4683 * @retval VERR_VMX_MSR_VMX_DISABLED
4684 * @retval VERR_VMX_MSR_LOCKING_FAILED
4685 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4686 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4687 * @retval VERR_SVM_NO_SVM
4688 * @retval VERR_SVM_DISABLED
4689 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4690 * (centaur)/Shanghai CPU.
4691 *
4692 * @param pSession The session handle.
4693 * @param pfCaps Where to store the capabilities.
4694 */
4695SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4696{
4697 /*
4698 * Input validation.
4699 */
4700 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4701 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4702
4703#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4704 /*
4705 * Call common worker.
4706 */
4707 return supdrvQueryVTCapsInternal(pfCaps);
4708#else
4709 return VERR_UNSUPPORTED_CPU;
4710#endif
4711}
4712SUPR0_EXPORT_SYMBOL(SUPR0QueryVTCaps);
4713
4714
4715#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4716/**
4717 * Queries the CPU microcode revision.
4718 *
4719 * @returns VBox status code.
4720 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4721 * readable microcode rev.
4722 *
4723 * @param puRevision Where to store the microcode revision.
4724 */
4725static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4726{
4727 int rc = VERR_UNSUPPORTED_CPU;
4728 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4729
4730 /*
4731 * Input validation.
4732 */
4733 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4734
4735 *puRevision = 0;
4736
4737 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4738 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4739 RTThreadPreemptDisable(&PreemptState);
4740
4741 if (ASMHasCpuId())
4742 {
4743 uint32_t uDummy, uTFMSEAX;
4744 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4745
4746 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4747 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4748
4749 if (RTX86IsValidStdRange(uMaxId))
4750 {
4751 uint64_t uRevMsr;
4752 if (RTX86IsIntelCpu(uVendorEBX, uVendorECX, uVendorEDX))
4753 {
4754 /* Architectural MSR available on Pentium Pro and later. */
4755 if (RTX86GetCpuFamily(uTFMSEAX) >= 6)
4756 {
4757 /* Revision is in the high dword. */
4758 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4759 *puRevision = RT_HIDWORD(uRevMsr);
4760 rc = VINF_SUCCESS;
4761 }
4762 }
4763 else if ( RTX86IsAmdCpu(uVendorEBX, uVendorECX, uVendorEDX)
4764 || RTX86IsHygonCpu(uVendorEBX, uVendorECX, uVendorEDX))
4765 {
4766 /* Not well documented, but at least all AMD64 CPUs support this. */
4767 if (RTX86GetCpuFamily(uTFMSEAX) >= 15)
4768 {
4769 /* Revision is in the low dword. */
4770 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4771 *puRevision = RT_LODWORD(uRevMsr);
4772 rc = VINF_SUCCESS;
4773 }
4774 }
4775 }
4776 }
4777
4778 RTThreadPreemptRestore(&PreemptState);
4779
4780 return rc;
4781}
4782#endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4783
4784
4785/**
4786 * Queries the CPU microcode revision.
4787 *
4788 * @returns VBox status code.
4789 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4790 * readable microcode rev.
4791 *
4792 * @param pSession The session handle.
4793 * @param puRevision Where to store the microcode revision.
4794 */
4795SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4796{
4797 /*
4798 * Input validation.
4799 */
4800 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4801 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4802
4803 /*
4804 * Call common worker.
4805 */
4806#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4807 return supdrvQueryUcodeRev(puRevision);
4808#else
4809 return VERR_UNSUPPORTED_CPU;
4810#endif
4811}
4812SUPR0_EXPORT_SYMBOL(SUPR0QueryUcodeRev);
4813
4814
4815/**
4816 * Gets hardware-virtualization MSRs of the calling CPU.
4817 *
4818 * @returns VBox status code.
4819 * @param pMsrs Where to store the hardware-virtualization MSRs.
4820 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4821 * to explicitly check for the presence of VT-x/AMD-V before
4822 * querying MSRs.
4823 * @param fForce Force querying of MSRs from the hardware.
4824 */
4825SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4826{
4827#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
4828 int rc;
4829 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4830 NOREF(fForce);
4831
4832 /*
4833 * Input validation.
4834 */
4835 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4836
4837 /*
4838 * Disable preemption so we make sure we don't migrate CPUs and because
4839 * we access global data.
4840 */
4841 RTThreadPreemptDisable(&PreemptState);
4842
4843 /*
4844 * Query the MSRs from the hardware.
4845 */
4846 SUPHWVIRTMSRS Msrs;
4847 RT_ZERO(Msrs);
4848
4849 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4850 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4851 rc = SUPR0GetVTSupport(&fCaps);
4852 else
4853 rc = VINF_SUCCESS;
4854 if (RT_SUCCESS(rc))
4855 {
4856 if (fCaps & SUPVTCAPS_VT_X)
4857 {
4858 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4859 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4860 Msrs.u.vmx.PinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4861 Msrs.u.vmx.ProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4862 Msrs.u.vmx.ExitCtls.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4863 Msrs.u.vmx.EntryCtls.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4864 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4865 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4866 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4867 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4868 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4869 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4870
4871 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4872 {
4873 Msrs.u.vmx.TruePinCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4874 Msrs.u.vmx.TrueProcCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4875 Msrs.u.vmx.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4876 Msrs.u.vmx.TrueExitCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4877 }
4878
4879 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4880 {
4881 Msrs.u.vmx.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4882
4883 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4884 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4885
4886 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
4887 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4888 }
4889
4890 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
4891 Msrs.u.vmx.u64ProcCtls3 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS3);
4892
4893 if (Msrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_USE_SECONDARY_CTLS)
4894 Msrs.u.vmx.u64ExitCtls2 = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS2);
4895 }
4896 else if (fCaps & SUPVTCAPS_AMD_V)
4897 {
4898 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4899 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4900 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4901 }
4902 else
4903 {
4904 RTThreadPreemptRestore(&PreemptState);
4905 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4906 VERR_INTERNAL_ERROR_2);
4907 }
4908
4909 /*
4910 * Copy the MSRs out.
4911 */
4912 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4913 }
4914
4915 RTThreadPreemptRestore(&PreemptState);
4916
4917 return rc;
4918
4919#else /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4920 RT_NOREF(pMsrs, fCaps, fForce);
4921 return VERR_UNSUPPORTED_CPU;
4922#endif /* !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86) */
4923}
4924SUPR0_EXPORT_SYMBOL(SUPR0GetHwvirtMsrs);
4925
4926
4927/**
4928 * Register a component factory with the support driver.
4929 *
4930 * This is currently restricted to kernel sessions only.
4931 *
4932 * @returns VBox status code.
4933 * @retval VINF_SUCCESS on success.
4934 * @retval VERR_NO_MEMORY if we're out of memory.
4935 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4936 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4937 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4938 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4939 *
4940 * @param pSession The SUPDRV session (must be a ring-0 session).
4941 * @param pFactory Pointer to the component factory registration structure.
4942 *
4943 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4944 */
4945SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4946{
4947 PSUPDRVFACTORYREG pNewReg;
4948 const char *psz;
4949 int rc;
4950
4951 /*
4952 * Validate parameters.
4953 */
4954 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4955 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4956 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4957 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4958 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4959 AssertReturn(psz, VERR_INVALID_PARAMETER);
4960
4961 /*
4962 * Allocate and initialize a new registration structure.
4963 */
4964 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4965 if (pNewReg)
4966 {
4967 pNewReg->pNext = NULL;
4968 pNewReg->pFactory = pFactory;
4969 pNewReg->pSession = pSession;
4970 pNewReg->cchName = psz - &pFactory->szName[0];
4971
4972 /*
4973 * Add it to the tail of the list after checking for prior registration.
4974 */
4975 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4976 if (RT_SUCCESS(rc))
4977 {
4978 PSUPDRVFACTORYREG pPrev = NULL;
4979 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4980 while (pCur && pCur->pFactory != pFactory)
4981 {
4982 pPrev = pCur;
4983 pCur = pCur->pNext;
4984 }
4985 if (!pCur)
4986 {
4987 if (pPrev)
4988 pPrev->pNext = pNewReg;
4989 else
4990 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4991 rc = VINF_SUCCESS;
4992 }
4993 else
4994 rc = VERR_ALREADY_EXISTS;
4995
4996 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4997 }
4998
4999 if (RT_FAILURE(rc))
5000 RTMemFree(pNewReg);
5001 }
5002 else
5003 rc = VERR_NO_MEMORY;
5004 return rc;
5005}
5006SUPR0_EXPORT_SYMBOL(SUPR0ComponentRegisterFactory);
5007
5008
5009/**
5010 * Deregister a component factory.
5011 *
5012 * @returns VBox status code.
5013 * @retval VINF_SUCCESS on success.
5014 * @retval VERR_NOT_FOUND if the factory wasn't registered.
5015 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
5016 * @retval VERR_INVALID_PARAMETER on invalid parameter.
5017 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
5018 *
5019 * @param pSession The SUPDRV session (must be a ring-0 session).
5020 * @param pFactory Pointer to the component factory registration structure
5021 * previously passed SUPR0ComponentRegisterFactory().
5022 *
5023 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
5024 */
5025SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
5026{
5027 int rc;
5028
5029 /*
5030 * Validate parameters.
5031 */
5032 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
5033 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
5034 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
5035
5036 /*
5037 * Take the lock and look for the registration record.
5038 */
5039 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
5040 if (RT_SUCCESS(rc))
5041 {
5042 PSUPDRVFACTORYREG pPrev = NULL;
5043 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
5044 while (pCur && pCur->pFactory != pFactory)
5045 {
5046 pPrev = pCur;
5047 pCur = pCur->pNext;
5048 }
5049 if (pCur)
5050 {
5051 if (!pPrev)
5052 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
5053 else
5054 pPrev->pNext = pCur->pNext;
5055
5056 pCur->pNext = NULL;
5057 pCur->pFactory = NULL;
5058 pCur->pSession = NULL;
5059 rc = VINF_SUCCESS;
5060 }
5061 else
5062 rc = VERR_NOT_FOUND;
5063
5064 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
5065
5066 RTMemFree(pCur);
5067 }
5068 return rc;
5069}
5070SUPR0_EXPORT_SYMBOL(SUPR0ComponentDeregisterFactory);
5071
5072
5073/**
5074 * Queries a component factory.
5075 *
5076 * @returns VBox status code.
5077 * @retval VERR_INVALID_PARAMETER on invalid parameter.
5078 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
5079 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
5080 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
5081 *
5082 * @param pSession The SUPDRV session.
5083 * @param pszName The name of the component factory.
5084 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
5085 * @param ppvFactoryIf Where to store the factory interface.
5086 */
5087SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
5088{
5089 const char *pszEnd;
5090 size_t cchName;
5091 int rc;
5092
5093 /*
5094 * Validate parameters.
5095 */
5096 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
5097
5098 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
5099 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
5100 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5101 cchName = pszEnd - pszName;
5102
5103 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
5104 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
5105 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5106
5107 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
5108 *ppvFactoryIf = NULL;
5109
5110 /*
5111 * Take the lock and try all factories by this name.
5112 */
5113 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
5114 if (RT_SUCCESS(rc))
5115 {
5116 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
5117 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
5118 while (pCur)
5119 {
5120 if ( pCur->cchName == cchName
5121 && !memcmp(pCur->pFactory->szName, pszName, cchName))
5122 {
5123 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
5124 if (pvFactory)
5125 {
5126 *ppvFactoryIf = pvFactory;
5127 rc = VINF_SUCCESS;
5128 break;
5129 }
5130 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
5131 }
5132
5133 /* next */
5134 pCur = pCur->pNext;
5135 }
5136
5137 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
5138 }
5139 return rc;
5140}
5141SUPR0_EXPORT_SYMBOL(SUPR0ComponentQueryFactory);
5142
5143
5144/**
5145 * Adds a memory object to the session.
5146 *
5147 * @returns IPRT status code.
5148 * @param pMem Memory tracking structure containing the
5149 * information to track.
5150 * @param pSession The session.
5151 */
5152static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
5153{
5154 PSUPDRVBUNDLE pBundle;
5155
5156 /*
5157 * Find free entry and record the allocation.
5158 */
5159 RTSpinlockAcquire(pSession->Spinlock);
5160 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5161 {
5162 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
5163 {
5164 unsigned i;
5165 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5166 {
5167 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
5168 {
5169 pBundle->cUsed++;
5170 pBundle->aMem[i] = *pMem;
5171 RTSpinlockRelease(pSession->Spinlock);
5172 return VINF_SUCCESS;
5173 }
5174 }
5175 AssertFailed(); /* !!this can't be happening!!! */
5176 }
5177 }
5178 RTSpinlockRelease(pSession->Spinlock);
5179
5180 /*
5181 * Need to allocate a new bundle.
5182 * Insert into the last entry in the bundle.
5183 */
5184 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
5185 if (!pBundle)
5186 return VERR_NO_MEMORY;
5187
5188 /* take last entry. */
5189 pBundle->cUsed++;
5190 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
5191
5192 /* insert into list. */
5193 RTSpinlockAcquire(pSession->Spinlock);
5194 pBundle->pNext = pSession->Bundle.pNext;
5195 pSession->Bundle.pNext = pBundle;
5196 RTSpinlockRelease(pSession->Spinlock);
5197
5198 return VINF_SUCCESS;
5199}
5200
5201
5202/**
5203 * Releases a memory object referenced by pointer and type.
5204 *
5205 * @returns IPRT status code.
5206 * @param pSession Session data.
5207 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
5208 * @param eType Memory type.
5209 */
5210static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
5211{
5212 PSUPDRVBUNDLE pBundle;
5213
5214 /*
5215 * Validate input.
5216 */
5217 if (!uPtr)
5218 {
5219 Log(("Illegal address %p\n", (void *)uPtr));
5220 return VERR_INVALID_PARAMETER;
5221 }
5222
5223 /*
5224 * Search for the address.
5225 */
5226 RTSpinlockAcquire(pSession->Spinlock);
5227 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5228 {
5229 if (pBundle->cUsed > 0)
5230 {
5231 unsigned i;
5232 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5233 {
5234 if ( pBundle->aMem[i].eType == eType
5235 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5236 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5237 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5238 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5239 )
5240 {
5241 /* Make a copy of it and release it outside the spinlock. */
5242 SUPDRVMEMREF Mem = pBundle->aMem[i];
5243 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5244 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5245 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5246 RTSpinlockRelease(pSession->Spinlock);
5247
5248 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5249 {
5250 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5251 AssertRC(rc); /** @todo figure out how to handle this. */
5252 }
5253 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5254 {
5255 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5256 AssertRC(rc); /** @todo figure out how to handle this. */
5257 }
5258 return VINF_SUCCESS;
5259 }
5260 }
5261 }
5262 }
5263 RTSpinlockRelease(pSession->Spinlock);
5264 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5265 return VERR_INVALID_PARAMETER;
5266}
5267
5268
5269/**
5270 * Opens an image. If it's the first time it's opened the call must upload
5271 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5272 *
5273 * This is the 1st step of the loading.
5274 *
5275 * @returns IPRT status code.
5276 * @param pDevExt Device globals.
5277 * @param pSession Session data.
5278 * @param pReq The open request.
5279 */
5280static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5281{
5282 int rc;
5283 PSUPDRVLDRIMAGE pImage;
5284 void *pv;
5285 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5286 SUPDRV_CHECK_SMAP_SETUP();
5287 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5288 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5289
5290 /*
5291 * Check if we got an instance of the image already.
5292 */
5293 supdrvLdrLock(pDevExt);
5294 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5295 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5296 {
5297 if ( pImage->szName[cchName] == '\0'
5298 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5299 {
5300 /** @todo Add an _1M (or something) per session reference. */
5301 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
5302 {
5303 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5304 * that indicates that the images are different. */
5305 pReq->u.Out.pvImageBase = pImage->pvImage;
5306 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5307 pReq->u.Out.fNativeLoader = pImage->fNative;
5308 supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5309 supdrvLdrUnlock(pDevExt);
5310 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5311 return VINF_SUCCESS;
5312 }
5313 supdrvLdrUnlock(pDevExt);
5314 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5315 return VERR_TOO_MANY_REFERENCES;
5316 }
5317 }
5318 /* (not found - add it!) */
5319
5320 /* If the loader interface is locked down, make userland fail early */
5321 if (pDevExt->fLdrLockedDown)
5322 {
5323 supdrvLdrUnlock(pDevExt);
5324 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5325 return VERR_PERMISSION_DENIED;
5326 }
5327
5328 /* Stop if caller doesn't wish to prepare loading things. */
5329 if (!pReq->u.In.cbImageBits)
5330 {
5331 supdrvLdrUnlock(pDevExt);
5332 Log(("supdrvIOCtl_LdrOpen: Returning VERR_MODULE_NOT_FOUND for '%s'!\n", pReq->u.In.szName));
5333 return VERR_MODULE_NOT_FOUND;
5334 }
5335
5336 /*
5337 * Allocate memory.
5338 */
5339 Assert(cchName < sizeof(pImage->szName));
5340 pv = RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5341 if (!pv)
5342 {
5343 supdrvLdrUnlock(pDevExt);
5344 Log(("supdrvIOCtl_LdrOpen: RTMemAllocZ() failed\n"));
5345 return VERR_NO_MEMORY;
5346 }
5347 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5348
5349 /*
5350 * Setup and link in the LDR stuff.
5351 */
5352 pImage = (PSUPDRVLDRIMAGE)pv;
5353 pImage->pvImage = NULL;
5354 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5355 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5356 pImage->cbImageBits = pReq->u.In.cbImageBits;
5357 pImage->cSymbols = 0;
5358 pImage->paSymbols = NULL;
5359 pImage->pachStrTab = NULL;
5360 pImage->cbStrTab = 0;
5361 pImage->cSegments = 0;
5362 pImage->paSegments = NULL;
5363 pImage->pfnModuleInit = NULL;
5364 pImage->pfnModuleTerm = NULL;
5365 pImage->pfnServiceReqHandler = NULL;
5366 pImage->uState = SUP_IOCTL_LDR_OPEN;
5367 pImage->cImgUsage = 0; /* Increased by supdrvLdrAddUsage later */
5368 pImage->pDevExt = pDevExt;
5369 pImage->pImageImport = NULL;
5370 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5371 pImage->pWrappedModInfo = NULL;
5372 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5373
5374 /*
5375 * Try load it using the native loader, if that isn't supported, fall back
5376 * on the older method.
5377 */
5378 pImage->fNative = true;
5379 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5380 if (rc == VERR_NOT_SUPPORTED)
5381 {
5382 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5383 if (RT_SUCCESS(rc))
5384 {
5385 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5386 pImage->fNative = false;
5387 }
5388 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5389 }
5390 if (RT_SUCCESS(rc))
5391 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5392 if (RT_FAILURE(rc))
5393 {
5394 supdrvLdrUnlock(pDevExt);
5395 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5396 RTMemFree(pImage);
5397 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5398 return rc;
5399 }
5400 Assert(RT_VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5401
5402 /*
5403 * Link it.
5404 */
5405 pImage->pNext = pDevExt->pLdrImages;
5406 pDevExt->pLdrImages = pImage;
5407
5408 pReq->u.Out.pvImageBase = pImage->pvImage;
5409 pReq->u.Out.fNeedsLoading = true;
5410 pReq->u.Out.fNativeLoader = pImage->fNative;
5411 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5412
5413 supdrvLdrUnlock(pDevExt);
5414 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5415 return VINF_SUCCESS;
5416}
5417
5418
5419/**
5420 * Formats a load error message.
5421 *
5422 * @returns @a rc
5423 * @param rc Return code.
5424 * @param pReq The request.
5425 * @param pszFormat The error message format string.
5426 * @param ... Argument to the format string.
5427 */
5428int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5429{
5430 va_list va;
5431 va_start(va, pszFormat);
5432 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5433 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5434 va_end(va);
5435 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5436 return rc;
5437}
5438
5439
5440/**
5441 * Worker that validates a pointer to an image entrypoint.
5442 *
5443 * Calls supdrvLdrLoadError on error.
5444 *
5445 * @returns IPRT status code.
5446 * @param pDevExt The device globals.
5447 * @param pImage The loader image.
5448 * @param pv The pointer into the image.
5449 * @param fMayBeNull Whether it may be NULL.
5450 * @param pszSymbol The entrypoint name or log name. If the symbol is
5451 * capitalized it signifies a specific symbol, otherwise it
5452 * for logging.
5453 * @param pbImageBits The image bits prepared by ring-3.
5454 * @param pReq The request for passing to supdrvLdrLoadError.
5455 *
5456 * @note Will leave the loader lock on failure!
5457 */
5458static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5459 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5460{
5461 if (!fMayBeNull || pv)
5462 {
5463 uint32_t iSeg;
5464
5465 /* Must be within the image bits: */
5466 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5467 if (uRva >= pImage->cbImageBits)
5468 {
5469 supdrvLdrUnlock(pDevExt);
5470 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5471 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5472 pv, pszSymbol, uRva, pImage->cbImageBits);
5473 }
5474
5475 /* Must be in an executable segment: */
5476 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5477 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5478 {
5479 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5480 break;
5481 supdrvLdrUnlock(pDevExt);
5482 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5483 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5484 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5485 pImage->paSegments[iSeg].fProt);
5486 }
5487 if (iSeg >= pImage->cSegments)
5488 {
5489 supdrvLdrUnlock(pDevExt);
5490 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5491 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5492 pv, pszSymbol, uRva);
5493 }
5494
5495 if (pImage->fNative)
5496 {
5497 /** @todo pass pReq along to the native code. */
5498 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5499 if (RT_FAILURE(rc))
5500 {
5501 supdrvLdrUnlock(pDevExt);
5502 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5503 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5504 }
5505 }
5506 }
5507 return VINF_SUCCESS;
5508}
5509
5510
5511/**
5512 * Loads the image bits.
5513 *
5514 * This is the 2nd step of the loading.
5515 *
5516 * @returns IPRT status code.
5517 * @param pDevExt Device globals.
5518 * @param pSession Session data.
5519 * @param pReq The request.
5520 */
5521static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5522{
5523 PSUPDRVLDRUSAGE pUsage;
5524 PSUPDRVLDRIMAGE pImage;
5525 PSUPDRVLDRIMAGE pImageImport;
5526 int rc;
5527 SUPDRV_CHECK_SMAP_SETUP();
5528 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5529 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5530
5531 /*
5532 * Find the ldr image.
5533 */
5534 supdrvLdrLock(pDevExt);
5535 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5536
5537 pUsage = pSession->pLdrUsage;
5538 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5539 pUsage = pUsage->pNext;
5540 if (!pUsage)
5541 {
5542 supdrvLdrUnlock(pDevExt);
5543 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5544 }
5545 pImage = pUsage->pImage;
5546
5547 /*
5548 * Validate input.
5549 */
5550 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5551 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5552 {
5553 supdrvLdrUnlock(pDevExt);
5554 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5555 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5556 }
5557
5558 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5559 {
5560 unsigned uState = pImage->uState;
5561 supdrvLdrUnlock(pDevExt);
5562 if (uState != SUP_IOCTL_LDR_LOAD)
5563 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5564 pReq->u.Out.uErrorMagic = 0;
5565 return VERR_ALREADY_LOADED;
5566 }
5567
5568 /* If the loader interface is locked down, don't load new images */
5569 if (pDevExt->fLdrLockedDown)
5570 {
5571 supdrvLdrUnlock(pDevExt);
5572 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5573 }
5574
5575 /*
5576 * If the new image is a dependant of VMMR0.r0, resolve it via the
5577 * caller's usage list and make sure it's in ready state.
5578 */
5579 pImageImport = NULL;
5580 if (pReq->u.In.fFlags & SUPLDRLOAD_F_DEP_VMMR0)
5581 {
5582 PSUPDRVLDRUSAGE pUsageDependency = pSession->pLdrUsage;
5583 while (pUsageDependency && pUsageDependency->pImage->pvImage != pDevExt->pvVMMR0)
5584 pUsageDependency = pUsageDependency->pNext;
5585 if (!pUsageDependency || !pDevExt->pvVMMR0)
5586 {
5587 supdrvLdrUnlock(pDevExt);
5588 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 not loaded by session");
5589 }
5590 pImageImport = pUsageDependency->pImage;
5591 if (pImageImport->uState != SUP_IOCTL_LDR_LOAD)
5592 {
5593 supdrvLdrUnlock(pDevExt);
5594 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 is not ready (state %#x)", pImageImport->uState);
5595 }
5596 }
5597
5598 /*
5599 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5600 */
5601 pImage->cSegments = pReq->u.In.cSegments;
5602 {
5603 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5604 uint8_t const * const pbSrcImage = pReq->u.In.abImage;
5605 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pbSrcImage[pReq->u.In.offSegments], cbSegments);
5606 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5607 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5608 else
5609 {
5610 supdrvLdrUnlock(pDevExt);
5611 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5612 }
5613 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5614 }
5615
5616 /*
5617 * Validate entrypoints.
5618 */
5619 switch (pReq->u.In.eEPType)
5620 {
5621 case SUPLDRLOADEP_NOTHING:
5622 break;
5623
5624 case SUPLDRLOADEP_VMMR0:
5625 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5626 if (RT_FAILURE(rc))
5627 return rc;
5628 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5629 if (RT_FAILURE(rc))
5630 return rc;
5631
5632 /* Fail here if there is already a VMMR0 module. */
5633 if (pDevExt->pvVMMR0 != NULL)
5634 {
5635 supdrvLdrUnlock(pDevExt);
5636 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "There is already a VMMR0 module loaded (%p)", pDevExt->pvVMMR0);
5637 }
5638 break;
5639
5640 case SUPLDRLOADEP_SERVICE:
5641 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5642 if (RT_FAILURE(rc))
5643 return rc;
5644 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5645 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5646 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5647 {
5648 supdrvLdrUnlock(pDevExt);
5649 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5650 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5651 pReq->u.In.EP.Service.apvReserved[2]);
5652 }
5653 break;
5654
5655 default:
5656 supdrvLdrUnlock(pDevExt);
5657 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5658 }
5659
5660 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5661 if (RT_FAILURE(rc))
5662 return rc;
5663 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5664 if (RT_FAILURE(rc))
5665 return rc;
5666 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5667
5668 /*
5669 * Allocate and copy the tables if non-native.
5670 * (No need to do try/except as this is a buffered request.)
5671 */
5672 if (!pImage->fNative)
5673 {
5674 uint8_t const * const pbSrcImage = pReq->u.In.abImage;
5675 pImage->cbStrTab = pReq->u.In.cbStrTab;
5676 if (pImage->cbStrTab)
5677 {
5678 pImage->pachStrTab = (char *)RTMemDup(&pbSrcImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5679 if (!pImage->pachStrTab)
5680 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5681 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5682 }
5683
5684 pImage->cSymbols = pReq->u.In.cSymbols;
5685 if (RT_SUCCESS(rc) && pImage->cSymbols)
5686 {
5687 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5688 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pbSrcImage[pReq->u.In.offSymbols], cbSymbols);
5689 if (!pImage->paSymbols)
5690 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5691 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5692 }
5693 }
5694
5695 /*
5696 * Copy the bits and apply permissions / complete native loading.
5697 */
5698 if (RT_SUCCESS(rc))
5699 {
5700 pImage->uState = SUP_IOCTL_LDR_LOAD;
5701 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5702 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5703
5704 if (pImage->fNative)
5705 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5706 else
5707 {
5708 uint32_t i;
5709 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5710
5711 for (i = 0; i < pImage->cSegments; i++)
5712 {
5713 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5714 pImage->paSegments[i].fProt);
5715 if (RT_SUCCESS(rc))
5716 continue;
5717 if (rc == VERR_NOT_SUPPORTED)
5718 rc = VINF_SUCCESS;
5719 else
5720 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5721 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5722 break;
5723 }
5724 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5725 }
5726 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5727 }
5728
5729 /*
5730 * On success call the module initialization.
5731 */
5732 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5733 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5734 {
5735 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5736 pDevExt->pLdrInitImage = pImage;
5737 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5738 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5739 rc = pImage->pfnModuleInit(pImage);
5740 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5741 pDevExt->pLdrInitImage = NULL;
5742 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5743 if (RT_FAILURE(rc))
5744 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5745 }
5746 if (RT_SUCCESS(rc))
5747 {
5748 /*
5749 * Publish any standard entry points.
5750 */
5751 switch (pReq->u.In.eEPType)
5752 {
5753 case SUPLDRLOADEP_VMMR0:
5754 Assert(!pDevExt->pvVMMR0);
5755 Assert(!pDevExt->pfnVMMR0EntryFast);
5756 Assert(!pDevExt->pfnVMMR0EntryEx);
5757 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
5758 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
5759 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryFast);
5760 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
5761 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5762 break;
5763 case SUPLDRLOADEP_SERVICE:
5764 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5765 break;
5766 default:
5767 break;
5768 }
5769
5770 /*
5771 * Increase the usage counter of any imported image.
5772 */
5773 if (pImageImport)
5774 {
5775 pImageImport->cImgUsage++;
5776 if (pImageImport->cImgUsage == 2 && pImageImport->pWrappedModInfo)
5777 supdrvOSLdrRetainWrapperModule(pDevExt, pImageImport);
5778 pImage->pImageImport = pImageImport;
5779 }
5780
5781 /*
5782 * Done!
5783 */
5784 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5785 pReq->u.Out.uErrorMagic = 0;
5786 pReq->u.Out.szError[0] = '\0';
5787 }
5788 else
5789 {
5790 /* Inform the tracing component in case ModuleInit registered TPs. */
5791 supdrvTracerModuleUnloading(pDevExt, pImage);
5792
5793 pImage->uState = SUP_IOCTL_LDR_OPEN;
5794 pImage->pfnModuleInit = NULL;
5795 pImage->pfnModuleTerm = NULL;
5796 pImage->pfnServiceReqHandler= NULL;
5797 pImage->cbStrTab = 0;
5798 RTMemFree(pImage->pachStrTab);
5799 pImage->pachStrTab = NULL;
5800 RTMemFree(pImage->paSymbols);
5801 pImage->paSymbols = NULL;
5802 pImage->cSymbols = 0;
5803 }
5804
5805 supdrvLdrUnlock(pDevExt);
5806 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5807 return rc;
5808}
5809
5810
5811/**
5812 * Registers a .r0 module wrapped in a native one and manually loaded.
5813 *
5814 * @returns VINF_SUCCESS or error code (no info statuses).
5815 * @param pDevExt Device globals.
5816 * @param pWrappedModInfo The wrapped module info.
5817 * @param pvNative OS specific information.
5818 * @param phMod Where to store the module handle.
5819 */
5820int VBOXCALL supdrvLdrRegisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo,
5821 void *pvNative, void **phMod)
5822{
5823 size_t cchName;
5824 PSUPDRVLDRIMAGE pImage;
5825 PCSUPLDRWRAPMODSYMBOL paSymbols;
5826 uint16_t idx;
5827 const char *pszPrevSymbol;
5828 int rc;
5829 SUPDRV_CHECK_SMAP_SETUP();
5830 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5831
5832 /*
5833 * Validate input.
5834 */
5835 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
5836 *phMod = NULL;
5837 AssertPtrReturn(pDevExt, VERR_INTERNAL_ERROR_2);
5838
5839 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
5840 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5841 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5842 VERR_INVALID_MAGIC);
5843 AssertMsgReturn(pWrappedModInfo->uVersion == SUPLDRWRAPPEDMODULE_VERSION,
5844 ("Unsupported uVersion=%#x, current version %#x\n", pWrappedModInfo->uVersion, SUPLDRWRAPPEDMODULE_VERSION),
5845 VERR_VERSION_MISMATCH);
5846 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5847 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5848 VERR_INVALID_MAGIC);
5849 AssertMsgReturn(pWrappedModInfo->fFlags <= SUPLDRWRAPPEDMODULE_F_VMMR0, ("Unknown flags in: %#x\n", pWrappedModInfo->fFlags),
5850 VERR_INVALID_FLAGS);
5851
5852 /* szName: */
5853 AssertReturn(RTStrEnd(pWrappedModInfo->szName, sizeof(pWrappedModInfo->szName)) != NULL, VERR_INVALID_NAME);
5854 AssertReturn(supdrvIsLdrModuleNameValid(pWrappedModInfo->szName), VERR_INVALID_NAME);
5855 AssertCompile(sizeof(pImage->szName) == sizeof(pWrappedModInfo->szName));
5856 cchName = strlen(pWrappedModInfo->szName);
5857
5858 /* Image range: */
5859 AssertPtrReturn(pWrappedModInfo->pvImageStart, VERR_INVALID_POINTER);
5860 AssertPtrReturn(pWrappedModInfo->pvImageEnd, VERR_INVALID_POINTER);
5861 AssertReturn((uintptr_t)pWrappedModInfo->pvImageEnd > (uintptr_t)pWrappedModInfo->pvImageStart, VERR_INVALID_PARAMETER);
5862
5863 /* Symbol table: */
5864 AssertMsgReturn(pWrappedModInfo->cSymbols <= _8K, ("Too many symbols: %u, max 8192\n", pWrappedModInfo->cSymbols),
5865 VERR_TOO_MANY_SYMLINKS);
5866 pszPrevSymbol = "\x7f";
5867 paSymbols = pWrappedModInfo->paSymbols;
5868 idx = pWrappedModInfo->cSymbols;
5869 while (idx-- > 0)
5870 {
5871 const char *pszSymbol = paSymbols[idx].pszSymbol;
5872 AssertMsgReturn(RT_VALID_PTR(pszSymbol) && RT_VALID_PTR(paSymbols[idx].pfnValue),
5873 ("paSymbols[%u]: %p/%p\n", idx, pszSymbol, paSymbols[idx].pfnValue),
5874 VERR_INVALID_POINTER);
5875 AssertReturn(*pszSymbol != '\0', VERR_EMPTY_STRING);
5876 AssertMsgReturn(strcmp(pszSymbol, pszPrevSymbol) < 0,
5877 ("symbol table out of order at index %u: '%s' vs '%s'\n", idx, pszSymbol, pszPrevSymbol),
5878 VERR_WRONG_ORDER);
5879 pszPrevSymbol = pszSymbol;
5880 }
5881
5882 /* Standard entry points: */
5883 AssertPtrNullReturn(pWrappedModInfo->pfnModuleInit, VERR_INVALID_POINTER);
5884 AssertPtrNullReturn(pWrappedModInfo->pfnModuleTerm, VERR_INVALID_POINTER);
5885 AssertReturn((uintptr_t)pWrappedModInfo->pfnModuleInit != (uintptr_t)pWrappedModInfo->pfnModuleTerm || pWrappedModInfo->pfnModuleInit == NULL,
5886 VERR_INVALID_PARAMETER);
5887 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
5888 {
5889 AssertReturn(pWrappedModInfo->pfnServiceReqHandler == NULL, VERR_INVALID_PARAMETER);
5890 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryFast, VERR_INVALID_POINTER);
5891 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_POINTER);
5892 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast != pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_PARAMETER);
5893 }
5894 else
5895 {
5896 AssertPtrNullReturn(pWrappedModInfo->pfnServiceReqHandler, VERR_INVALID_POINTER);
5897 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast == NULL, VERR_INVALID_PARAMETER);
5898 AssertReturn(pWrappedModInfo->pfnVMMR0EntryEx == NULL, VERR_INVALID_PARAMETER);
5899 }
5900
5901 /*
5902 * Check if we got an instance of the image already.
5903 */
5904 supdrvLdrLock(pDevExt);
5905 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5906 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5907 {
5908 if ( pImage->szName[cchName] == '\0'
5909 && !memcmp(pImage->szName, pWrappedModInfo->szName, cchName))
5910 {
5911 supdrvLdrUnlock(pDevExt);
5912 Log(("supdrvLdrRegisterWrappedModule: '%s' already loaded!\n", pWrappedModInfo->szName));
5913 return VERR_ALREADY_LOADED;
5914 }
5915 }
5916 /* (not found - add it!) */
5917
5918 /* If the loader interface is locked down, make userland fail early */
5919 if (pDevExt->fLdrLockedDown)
5920 {
5921 supdrvLdrUnlock(pDevExt);
5922 Log(("supdrvLdrRegisterWrappedModule: Not adding '%s' to image list, loader interface is locked down!\n", pWrappedModInfo->szName));
5923 return VERR_PERMISSION_DENIED;
5924 }
5925
5926 /* Only one VMMR0: */
5927 if ( pDevExt->pvVMMR0 != NULL
5928 && (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0))
5929 {
5930 supdrvLdrUnlock(pDevExt);
5931 Log(("supdrvLdrRegisterWrappedModule: Rejecting '%s' as we already got a VMMR0 module!\n", pWrappedModInfo->szName));
5932 return VERR_ALREADY_EXISTS;
5933 }
5934
5935 /*
5936 * Allocate memory.
5937 */
5938 Assert(cchName < sizeof(pImage->szName));
5939 pImage = (PSUPDRVLDRIMAGE)RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5940 if (!pImage)
5941 {
5942 supdrvLdrUnlock(pDevExt);
5943 Log(("supdrvLdrRegisterWrappedModule: RTMemAllocZ() failed\n"));
5944 return VERR_NO_MEMORY;
5945 }
5946 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5947
5948 /*
5949 * Setup and link in the LDR stuff.
5950 */
5951 pImage->pvImage = (void *)pWrappedModInfo->pvImageStart;
5952 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5953 pImage->cbImageWithEverything
5954 = pImage->cbImageBits = (uintptr_t)pWrappedModInfo->pvImageEnd - (uintptr_t)pWrappedModInfo->pvImageStart;
5955 pImage->cSymbols = 0;
5956 pImage->paSymbols = NULL;
5957 pImage->pachStrTab = NULL;
5958 pImage->cbStrTab = 0;
5959 pImage->cSegments = 0;
5960 pImage->paSegments = NULL;
5961 pImage->pfnModuleInit = pWrappedModInfo->pfnModuleInit;
5962 pImage->pfnModuleTerm = pWrappedModInfo->pfnModuleTerm;
5963 pImage->pfnServiceReqHandler = NULL; /* Only setting this after module init */
5964 pImage->uState = SUP_IOCTL_LDR_LOAD;
5965 pImage->cImgUsage = 1; /* Held by the wrapper module till unload. */
5966 pImage->pDevExt = pDevExt;
5967 pImage->pImageImport = NULL;
5968 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5969 pImage->pWrappedModInfo = pWrappedModInfo;
5970 pImage->pvWrappedNative = pvNative;
5971 pImage->fNative = true;
5972 memcpy(pImage->szName, pWrappedModInfo->szName, cchName + 1);
5973
5974 /*
5975 * Link it.
5976 */
5977 pImage->pNext = pDevExt->pLdrImages;
5978 pDevExt->pLdrImages = pImage;
5979
5980 /*
5981 * Call module init function if found.
5982 */
5983 rc = VINF_SUCCESS;
5984 if (pImage->pfnModuleInit)
5985 {
5986 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5987 pDevExt->pLdrInitImage = pImage;
5988 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5989 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5990 rc = pImage->pfnModuleInit(pImage);
5991 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5992 pDevExt->pLdrInitImage = NULL;
5993 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5994 }
5995 if (RT_SUCCESS(rc))
5996 {
5997 /*
5998 * Update entry points.
5999 */
6000 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
6001 {
6002 Assert(!pDevExt->pvVMMR0);
6003 Assert(!pDevExt->pfnVMMR0EntryFast);
6004 Assert(!pDevExt->pfnVMMR0EntryEx);
6005 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
6006 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
6007 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryFast);
6008 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
6009 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryEx);
6010 }
6011 else
6012 pImage->pfnServiceReqHandler = pWrappedModInfo->pfnServiceReqHandler;
6013#ifdef IN_RING3
6014# error "WTF?"
6015#endif
6016 *phMod = pImage;
6017 }
6018 else
6019 {
6020 /*
6021 * Module init failed - bail, no module term callout.
6022 */
6023 SUPR0Printf("ModuleInit failed for '%s': %Rrc\n", pImage->szName, rc);
6024
6025 pImage->pfnModuleTerm = NULL;
6026 pImage->uState = SUP_IOCTL_LDR_OPEN;
6027 supdrvLdrFree(pDevExt, pImage);
6028 }
6029
6030 supdrvLdrUnlock(pDevExt);
6031 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
6032 return VINF_SUCCESS;
6033}
6034
6035
6036/**
6037 * Decrements SUPDRVLDRIMAGE::cImgUsage when two or greater.
6038 *
6039 * @param pDevExt Device globals.
6040 * @param pImage The image.
6041 * @param cReference Number of references being removed.
6042 */
6043DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference)
6044{
6045 Assert(cReference > 0);
6046 Assert(pImage->cImgUsage > cReference);
6047 pImage->cImgUsage -= cReference;
6048 if (pImage->cImgUsage == 1 && pImage->pWrappedModInfo)
6049 supdrvOSLdrReleaseWrapperModule(pDevExt, pImage);
6050}
6051
6052
6053/**
6054 * Frees a previously loaded (prep'ed) image.
6055 *
6056 * @returns IPRT status code.
6057 * @param pDevExt Device globals.
6058 * @param pSession Session data.
6059 * @param pReq The request.
6060 */
6061static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
6062{
6063 int rc;
6064 PSUPDRVLDRUSAGE pUsagePrev;
6065 PSUPDRVLDRUSAGE pUsage;
6066 PSUPDRVLDRIMAGE pImage;
6067 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
6068
6069 /*
6070 * Find the ldr image.
6071 */
6072 supdrvLdrLock(pDevExt);
6073 pUsagePrev = NULL;
6074 pUsage = pSession->pLdrUsage;
6075 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
6076 {
6077 pUsagePrev = pUsage;
6078 pUsage = pUsage->pNext;
6079 }
6080 if (!pUsage)
6081 {
6082 supdrvLdrUnlock(pDevExt);
6083 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
6084 return VERR_INVALID_HANDLE;
6085 }
6086 if (pUsage->cRing3Usage == 0)
6087 {
6088 supdrvLdrUnlock(pDevExt);
6089 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
6090 return VERR_CALLER_NO_REFERENCE;
6091 }
6092
6093 /*
6094 * Check if we can remove anything.
6095 */
6096 rc = VINF_SUCCESS;
6097 pImage = pUsage->pImage;
6098 Log(("SUP_IOCTL_LDR_FREE: pImage=%p %s cImgUsage=%d r3=%d r0=%u\n",
6099 pImage, pImage->szName, pImage->cImgUsage, pUsage->cRing3Usage, pUsage->cRing0Usage));
6100 if (pImage->cImgUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
6101 {
6102 /*
6103 * Check if there are any objects with destructors in the image, if
6104 * so leave it for the session cleanup routine so we get a chance to
6105 * clean things up in the right order and not leave them all dangling.
6106 */
6107 RTSpinlockAcquire(pDevExt->Spinlock);
6108 if (pImage->cImgUsage <= 1)
6109 {
6110 PSUPDRVOBJ pObj;
6111 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6112 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6113 {
6114 rc = VERR_DANGLING_OBJECTS;
6115 break;
6116 }
6117 }
6118 else
6119 {
6120 PSUPDRVUSAGE pGenUsage;
6121 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
6122 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6123 {
6124 rc = VERR_DANGLING_OBJECTS;
6125 break;
6126 }
6127 }
6128 RTSpinlockRelease(pDevExt->Spinlock);
6129 if (rc == VINF_SUCCESS)
6130 {
6131 /* unlink it */
6132 if (pUsagePrev)
6133 pUsagePrev->pNext = pUsage->pNext;
6134 else
6135 pSession->pLdrUsage = pUsage->pNext;
6136
6137 /* free it */
6138 pUsage->pImage = NULL;
6139 pUsage->pNext = NULL;
6140 RTMemFree(pUsage);
6141
6142 /*
6143 * Dereference the image.
6144 */
6145 if (pImage->cImgUsage <= 1)
6146 supdrvLdrFree(pDevExt, pImage);
6147 else
6148 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6149 }
6150 else
6151 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
6152 }
6153 else
6154 {
6155 /*
6156 * Dereference both image and usage.
6157 */
6158 pUsage->cRing3Usage--;
6159 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6160 }
6161
6162 supdrvLdrUnlock(pDevExt);
6163 return rc;
6164}
6165
6166
6167/**
6168 * Deregisters a wrapped .r0 module.
6169 *
6170 * @param pDevExt Device globals.
6171 * @param pWrappedModInfo The wrapped module info.
6172 * @param phMod Where to store the module is stored (NIL'ed on
6173 * success).
6174 */
6175int VBOXCALL supdrvLdrDeregisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo, void **phMod)
6176{
6177 PSUPDRVLDRIMAGE pImage;
6178 uint32_t cSleeps;
6179
6180 /*
6181 * Validate input.
6182 */
6183 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
6184 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6185 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6186 VERR_INVALID_MAGIC);
6187 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6188 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6189 VERR_INVALID_MAGIC);
6190
6191 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6192 pImage = *(PSUPDRVLDRIMAGE *)phMod;
6193 if (!pImage)
6194 return VINF_SUCCESS;
6195 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
6196 AssertMsgReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, ("pImage=%p uMagic=%#x\n", pImage, pImage->uMagic),
6197 VERR_INVALID_MAGIC);
6198 AssertMsgReturn(pImage->pvImage == pWrappedModInfo->pvImageStart,
6199 ("pWrappedModInfo(%p)->pvImageStart=%p vs. pImage(=%p)->pvImage=%p\n",
6200 pWrappedModInfo, pWrappedModInfo->pvImageStart, pImage, pImage->pvImage),
6201 VERR_MISMATCH);
6202
6203 AssertPtrReturn(pDevExt, VERR_INVALID_POINTER);
6204
6205 /*
6206 * Try free it, but first we have to wait for its usage count to reach 1 (our).
6207 */
6208 supdrvLdrLock(pDevExt);
6209 for (cSleeps = 0; ; cSleeps++)
6210 {
6211 PSUPDRVLDRIMAGE pCur;
6212
6213 /* Check that the image is in the list. */
6214 for (pCur = pDevExt->pLdrImages; pCur; pCur = pCur->pNext)
6215 if (pCur == pImage)
6216 break;
6217 AssertBreak(pCur == pImage);
6218
6219 /* Anyone still using it? */
6220 if (pImage->cImgUsage <= 1)
6221 break;
6222
6223 /* Someone is using it, wait and check again. */
6224 if (!(cSleeps % 60))
6225 SUPR0Printf("supdrvLdrUnregisterWrappedModule: Still %u users of wrapped image '%s' ...\n",
6226 pImage->cImgUsage, pImage->szName);
6227 supdrvLdrUnlock(pDevExt);
6228 RTThreadSleep(1000);
6229 supdrvLdrLock(pDevExt);
6230 }
6231
6232 /* We're the last 'user', free it. */
6233 supdrvLdrFree(pDevExt, pImage);
6234
6235 supdrvLdrUnlock(pDevExt);
6236
6237 *phMod = NULL;
6238 return VINF_SUCCESS;
6239}
6240
6241
6242/**
6243 * Lock down the image loader interface.
6244 *
6245 * @returns IPRT status code.
6246 * @param pDevExt Device globals.
6247 */
6248static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
6249{
6250 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
6251
6252 supdrvLdrLock(pDevExt);
6253 if (!pDevExt->fLdrLockedDown)
6254 {
6255 pDevExt->fLdrLockedDown = true;
6256 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
6257 }
6258 supdrvLdrUnlock(pDevExt);
6259
6260 return VINF_SUCCESS;
6261}
6262
6263
6264/**
6265 * Worker for getting the address of a symbol in an image.
6266 *
6267 * @returns IPRT status code.
6268 * @param pDevExt Device globals.
6269 * @param pImage The image to search.
6270 * @param pszSymbol The symbol name.
6271 * @param cchSymbol The length of the symbol name.
6272 * @param ppvValue Where to return the symbol
6273 * @note Caller owns the loader lock.
6274 */
6275static int supdrvLdrQuerySymbolWorker(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage,
6276 const char *pszSymbol, size_t cchSymbol, void **ppvValue)
6277{
6278 int rc = VERR_SYMBOL_NOT_FOUND;
6279 if (pImage->fNative && !pImage->pWrappedModInfo)
6280 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cchSymbol, ppvValue);
6281 else if (pImage->fNative && pImage->pWrappedModInfo)
6282 {
6283 PCSUPLDRWRAPMODSYMBOL paSymbols = pImage->pWrappedModInfo->paSymbols;
6284 uint32_t iEnd = pImage->pWrappedModInfo->cSymbols;
6285 uint32_t iStart = 0;
6286 while (iStart < iEnd)
6287 {
6288 uint32_t const i = iStart + (iEnd - iStart) / 2;
6289 int const iDiff = strcmp(paSymbols[i].pszSymbol, pszSymbol);
6290 if (iDiff < 0)
6291 iStart = i + 1;
6292 else if (iDiff > 0)
6293 iEnd = i;
6294 else
6295 {
6296 *ppvValue = (void *)(uintptr_t)paSymbols[i].pfnValue;
6297 rc = VINF_SUCCESS;
6298 break;
6299 }
6300 }
6301#ifdef VBOX_STRICT
6302 if (rc != VINF_SUCCESS)
6303 for (iStart = 0, iEnd = pImage->pWrappedModInfo->cSymbols; iStart < iEnd; iStart++)
6304 Assert(strcmp(paSymbols[iStart].pszSymbol, pszSymbol));
6305#endif
6306 }
6307 else
6308 {
6309 const char *pchStrings = pImage->pachStrTab;
6310 PSUPLDRSYM paSyms = pImage->paSymbols;
6311 uint32_t i;
6312 Assert(!pImage->pWrappedModInfo);
6313 for (i = 0; i < pImage->cSymbols; i++)
6314 {
6315 if ( paSyms[i].offName + cchSymbol + 1 <= pImage->cbStrTab
6316 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cchSymbol + 1))
6317 {
6318 /*
6319 * Note! The int32_t is for native loading on solaris where the data
6320 * and text segments are in very different places.
6321 */
6322 *ppvValue = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
6323 rc = VINF_SUCCESS;
6324 break;
6325 }
6326 }
6327 }
6328 return rc;
6329}
6330
6331
6332/**
6333 * Queries the address of a symbol in an open image.
6334 *
6335 * @returns IPRT status code.
6336 * @param pDevExt Device globals.
6337 * @param pSession Session data.
6338 * @param pReq The request buffer.
6339 */
6340static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
6341{
6342 PSUPDRVLDRIMAGE pImage;
6343 PSUPDRVLDRUSAGE pUsage;
6344 const size_t cchSymbol = strlen(pReq->u.In.szSymbol);
6345 void *pvSymbol = NULL;
6346 int rc;
6347 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
6348
6349 /*
6350 * Find the ldr image.
6351 */
6352 supdrvLdrLock(pDevExt);
6353
6354 pUsage = pSession->pLdrUsage;
6355 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
6356 pUsage = pUsage->pNext;
6357 if (pUsage)
6358 {
6359 pImage = pUsage->pImage;
6360 if (pImage->uState == SUP_IOCTL_LDR_LOAD)
6361 {
6362 /*
6363 * Search the image exports / symbol strings.
6364 */
6365 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pReq->u.In.szSymbol, cchSymbol, &pvSymbol);
6366 }
6367 else
6368 {
6369 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", pImage->uState, pImage->uState));
6370 rc = VERR_WRONG_ORDER;
6371 }
6372 }
6373 else
6374 {
6375 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
6376 rc = VERR_INVALID_HANDLE;
6377 }
6378
6379 supdrvLdrUnlock(pDevExt);
6380
6381 pReq->u.Out.pvSymbol = pvSymbol;
6382 return rc;
6383}
6384
6385
6386/**
6387 * Gets the address of a symbol in an open image or the support driver.
6388 *
6389 * @returns VBox status code.
6390 * @param pDevExt Device globals.
6391 * @param pSession Session data.
6392 * @param pReq The request buffer.
6393 */
6394static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
6395{
6396 const char *pszSymbol = pReq->u.In.pszSymbol;
6397 const char *pszModule = pReq->u.In.pszModule;
6398 size_t cchSymbol;
6399 char const *pszEnd;
6400 uint32_t i;
6401 int rc;
6402
6403 /*
6404 * Input validation.
6405 */
6406 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
6407 pszEnd = RTStrEnd(pszSymbol, 512);
6408 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6409 cchSymbol = pszEnd - pszSymbol;
6410
6411 if (pszModule)
6412 {
6413 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
6414 pszEnd = RTStrEnd(pszModule, 64);
6415 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6416 }
6417 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
6418
6419 if ( !pszModule
6420 || !strcmp(pszModule, "SupDrv"))
6421 {
6422 /*
6423 * Search the support driver export table.
6424 */
6425 rc = VERR_SYMBOL_NOT_FOUND;
6426 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6427 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6428 {
6429 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
6430 rc = VINF_SUCCESS;
6431 break;
6432 }
6433 }
6434 else
6435 {
6436 /*
6437 * Find the loader image.
6438 */
6439 PSUPDRVLDRIMAGE pImage;
6440
6441 supdrvLdrLock(pDevExt);
6442
6443 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6444 if (!strcmp(pImage->szName, pszModule))
6445 break;
6446 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
6447 {
6448 /*
6449 * Search the image exports / symbol strings. Do usage counting on the session.
6450 */
6451 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pszSymbol, cchSymbol, (void **)&pReq->u.Out.pfnSymbol);
6452 if (RT_SUCCESS(rc))
6453 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
6454 }
6455 else
6456 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
6457
6458 supdrvLdrUnlock(pDevExt);
6459 }
6460 return rc;
6461}
6462
6463
6464/**
6465 * Looks up a symbol in g_aFunctions
6466 *
6467 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
6468 * @param pszSymbol The symbol to look up.
6469 * @param puValue Where to return the value.
6470 */
6471int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
6472{
6473 uint32_t i;
6474 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6475 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6476 {
6477 *puValue = (uintptr_t)g_aFunctions[i].pfn;
6478 return VINF_SUCCESS;
6479 }
6480
6481 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
6482 {
6483 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
6484 return VINF_SUCCESS;
6485 }
6486
6487 return VERR_SYMBOL_NOT_FOUND;
6488}
6489
6490
6491/**
6492 * Adds a usage reference in the specified session of an image.
6493 *
6494 * Called while owning the loader semaphore.
6495 *
6496 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
6497 * @param pDevExt Pointer to device extension.
6498 * @param pSession Session in question.
6499 * @param pImage Image which the session is using.
6500 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
6501 */
6502static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
6503{
6504 PSUPDRVLDRUSAGE pUsage;
6505 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
6506
6507 /*
6508 * Referenced it already?
6509 */
6510 pUsage = pSession->pLdrUsage;
6511 while (pUsage)
6512 {
6513 if (pUsage->pImage == pImage)
6514 {
6515 if (fRing3Usage)
6516 pUsage->cRing3Usage++;
6517 else
6518 pUsage->cRing0Usage++;
6519 Assert(pImage->cImgUsage > 1 || !pImage->pWrappedModInfo);
6520 pImage->cImgUsage++;
6521 return VINF_SUCCESS;
6522 }
6523 pUsage = pUsage->pNext;
6524 }
6525
6526 /*
6527 * Allocate new usage record.
6528 */
6529 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
6530 AssertReturn(pUsage, VERR_NO_MEMORY);
6531 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
6532 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
6533 pUsage->pImage = pImage;
6534 pUsage->pNext = pSession->pLdrUsage;
6535 pSession->pLdrUsage = pUsage;
6536
6537 /*
6538 * Wrapped modules needs to retain a native module reference.
6539 */
6540 pImage->cImgUsage++;
6541 if (pImage->cImgUsage == 2 && pImage->pWrappedModInfo)
6542 supdrvOSLdrRetainWrapperModule(pDevExt, pImage);
6543
6544 return VINF_SUCCESS;
6545}
6546
6547
6548/**
6549 * Frees a load image.
6550 *
6551 * @param pDevExt Pointer to device extension.
6552 * @param pImage Pointer to the image we're gonna free.
6553 * This image must exit!
6554 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
6555 */
6556static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
6557{
6558 unsigned cLoops;
6559 for (cLoops = 0; ; cLoops++)
6560 {
6561 PSUPDRVLDRIMAGE pImagePrev;
6562 PSUPDRVLDRIMAGE pImageImport;
6563 LogFlow(("supdrvLdrFree: pImage=%p %s [loop %u]\n", pImage, pImage->szName, cLoops));
6564 AssertBreak(cLoops < 2);
6565
6566 /*
6567 * Warn if we're releasing images while the image loader interface is
6568 * locked down -- we won't be able to reload them!
6569 */
6570 if (pDevExt->fLdrLockedDown)
6571 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6572
6573 /* find it - arg. should've used doubly linked list. */
6574 Assert(pDevExt->pLdrImages);
6575 pImagePrev = NULL;
6576 if (pDevExt->pLdrImages != pImage)
6577 {
6578 pImagePrev = pDevExt->pLdrImages;
6579 while (pImagePrev->pNext != pImage)
6580 pImagePrev = pImagePrev->pNext;
6581 Assert(pImagePrev->pNext == pImage);
6582 }
6583
6584 /* unlink */
6585 if (pImagePrev)
6586 pImagePrev->pNext = pImage->pNext;
6587 else
6588 pDevExt->pLdrImages = pImage->pNext;
6589
6590 /* check if this is VMMR0.r0 unset its entry point pointers. */
6591 if (pDevExt->pvVMMR0 == pImage->pvImage)
6592 {
6593 pDevExt->pvVMMR0 = NULL;
6594 pDevExt->pfnVMMR0EntryFast = NULL;
6595 pDevExt->pfnVMMR0EntryEx = NULL;
6596 }
6597
6598 /* check for objects with destructors in this image. (Shouldn't happen.) */
6599 if (pDevExt->pObjs)
6600 {
6601 unsigned cObjs = 0;
6602 PSUPDRVOBJ pObj;
6603 RTSpinlockAcquire(pDevExt->Spinlock);
6604 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6605 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6606 {
6607 pObj->pfnDestructor = NULL;
6608 cObjs++;
6609 }
6610 RTSpinlockRelease(pDevExt->Spinlock);
6611 if (cObjs)
6612 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6613 }
6614
6615 /* call termination function if fully loaded. */
6616 if ( pImage->pfnModuleTerm
6617 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6618 {
6619 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6620 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6621 pImage->pfnModuleTerm(pImage);
6622 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6623 }
6624
6625 /* Inform the tracing component. */
6626 supdrvTracerModuleUnloading(pDevExt, pImage);
6627
6628 /* Do native unload if appropriate, then inform the native code about the
6629 unloading (mainly for non-native loading case). */
6630 if (pImage->fNative)
6631 supdrvOSLdrUnload(pDevExt, pImage);
6632 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6633
6634 /* free the image */
6635 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6636 pImage->cImgUsage = 0;
6637 pImage->pDevExt = NULL;
6638 pImage->pNext = NULL;
6639 pImage->uState = SUP_IOCTL_LDR_FREE;
6640 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6641 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6642 pImage->pvImage = NULL;
6643 RTMemFree(pImage->pachStrTab);
6644 pImage->pachStrTab = NULL;
6645 RTMemFree(pImage->paSymbols);
6646 pImage->paSymbols = NULL;
6647 RTMemFree(pImage->paSegments);
6648 pImage->paSegments = NULL;
6649
6650 pImageImport = pImage->pImageImport;
6651 pImage->pImageImport = NULL;
6652
6653 RTMemFree(pImage);
6654
6655 /*
6656 * Deal with any import image.
6657 */
6658 if (!pImageImport)
6659 break;
6660 if (pImageImport->cImgUsage > 1)
6661 {
6662 supdrvLdrSubtractUsage(pDevExt, pImageImport, 1);
6663 break;
6664 }
6665 pImage = pImageImport;
6666 }
6667}
6668
6669
6670/**
6671 * Acquires the loader lock.
6672 *
6673 * @returns IPRT status code.
6674 * @param pDevExt The device extension.
6675 * @note Not recursive on all platforms yet.
6676 */
6677DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6678{
6679#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6680 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6681#else
6682 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6683#endif
6684 AssertRC(rc);
6685 return rc;
6686}
6687
6688
6689/**
6690 * Releases the loader lock.
6691 *
6692 * @returns IPRT status code.
6693 * @param pDevExt The device extension.
6694 */
6695DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6696{
6697#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6698 return RTSemMutexRelease(pDevExt->mtxLdr);
6699#else
6700 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6701#endif
6702}
6703
6704
6705/**
6706 * Acquires the global loader lock.
6707 *
6708 * This can be useful when accessing structures being modified by the ModuleInit
6709 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6710 *
6711 * @returns VBox status code.
6712 * @param pSession The session doing the locking.
6713 *
6714 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6715 */
6716SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6717{
6718 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6719 return supdrvLdrLock(pSession->pDevExt);
6720}
6721SUPR0_EXPORT_SYMBOL(SUPR0LdrLock);
6722
6723
6724/**
6725 * Releases the global loader lock.
6726 *
6727 * Must correspond to a SUPR0LdrLock call!
6728 *
6729 * @returns VBox status code.
6730 * @param pSession The session doing the locking.
6731 *
6732 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6733 */
6734SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6735{
6736 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6737 return supdrvLdrUnlock(pSession->pDevExt);
6738}
6739SUPR0_EXPORT_SYMBOL(SUPR0LdrUnlock);
6740
6741
6742/**
6743 * For checking lock ownership in Assert() statements during ModuleInit and
6744 * ModuleTerm.
6745 *
6746 * @returns Whether we own the loader lock or not.
6747 * @param hMod The module in question.
6748 * @param fWantToHear For hosts where it is difficult to know who owns the
6749 * lock, this will be returned instead.
6750 */
6751SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6752{
6753 PSUPDRVDEVEXT pDevExt;
6754 RTNATIVETHREAD hOwner;
6755
6756 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6757 AssertPtrReturn(pImage, fWantToHear);
6758 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6759
6760 pDevExt = pImage->pDevExt;
6761 AssertPtrReturn(pDevExt, fWantToHear);
6762
6763 /*
6764 * Expecting this to be called at init/term time only, so this will be sufficient.
6765 */
6766 hOwner = pDevExt->hLdrInitThread;
6767 if (hOwner == NIL_RTNATIVETHREAD)
6768 hOwner = pDevExt->hLdrTermThread;
6769 if (hOwner != NIL_RTNATIVETHREAD)
6770 return hOwner == RTThreadNativeSelf();
6771
6772 /*
6773 * Neither of the two semaphore variants currently offers very good
6774 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6775 */
6776#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6777 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6778#else
6779 return fWantToHear;
6780#endif
6781}
6782SUPR0_EXPORT_SYMBOL(SUPR0LdrIsLockOwnerByMod);
6783
6784
6785/**
6786 * Locates and retains the given module for ring-0 usage.
6787 *
6788 * @returns VBox status code.
6789 * @param pSession The session to associate the module reference with.
6790 * @param pszName The module name (no path).
6791 * @param phMod Where to return the module handle. The module is
6792 * referenced and a call to SUPR0LdrModRelease() is
6793 * necessary when done with it.
6794 */
6795SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6796{
6797 int rc;
6798 size_t cchName;
6799 PSUPDRVDEVEXT pDevExt;
6800
6801 /*
6802 * Validate input.
6803 */
6804 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6805 *phMod = NULL;
6806 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6807 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6808 cchName = strlen(pszName);
6809 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6810 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6811
6812 /*
6813 * Do the lookup.
6814 */
6815 pDevExt = pSession->pDevExt;
6816 rc = supdrvLdrLock(pDevExt);
6817 if (RT_SUCCESS(rc))
6818 {
6819 PSUPDRVLDRIMAGE pImage;
6820 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6821 {
6822 if ( pImage->szName[cchName] == '\0'
6823 && !memcmp(pImage->szName, pszName, cchName))
6824 {
6825 /*
6826 * Check the state and make sure we don't overflow the reference counter before return it.
6827 */
6828 uint32_t uState = pImage->uState;
6829 if (uState == SUP_IOCTL_LDR_LOAD)
6830 {
6831 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6832 {
6833 supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6834 *phMod = pImage;
6835 supdrvLdrUnlock(pDevExt);
6836 return VINF_SUCCESS;
6837 }
6838 supdrvLdrUnlock(pDevExt);
6839 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6840 return VERR_TOO_MANY_REFERENCES;
6841 }
6842 supdrvLdrUnlock(pDevExt);
6843 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6844 return VERR_INVALID_STATE;
6845 }
6846 }
6847 supdrvLdrUnlock(pDevExt);
6848 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6849 rc = VERR_MODULE_NOT_FOUND;
6850 }
6851 return rc;
6852}
6853SUPR0_EXPORT_SYMBOL(SUPR0LdrModByName);
6854
6855
6856/**
6857 * Retains a ring-0 module reference.
6858 *
6859 * Release reference when done by calling SUPR0LdrModRelease().
6860 *
6861 * @returns VBox status code.
6862 * @param pSession The session to reference the module in. A usage
6863 * record is added if needed.
6864 * @param hMod The handle to the module to retain.
6865 */
6866SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6867{
6868 PSUPDRVDEVEXT pDevExt;
6869 PSUPDRVLDRIMAGE pImage;
6870 int rc;
6871
6872 /* Validate input a little. */
6873 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6874 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6875 pImage = (PSUPDRVLDRIMAGE)hMod;
6876 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6877
6878 /* Reference the module: */
6879 pDevExt = pSession->pDevExt;
6880 rc = supdrvLdrLock(pDevExt);
6881 if (RT_SUCCESS(rc))
6882 {
6883 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6884 {
6885 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6886 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6887 else
6888 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6889 }
6890 else
6891 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6892 supdrvLdrUnlock(pDevExt);
6893 }
6894 return rc;
6895}
6896SUPR0_EXPORT_SYMBOL(SUPR0LdrModRetain);
6897
6898
6899/**
6900 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6901 * SUPR0LdrModRetain().
6902 *
6903 * @returns VBox status code.
6904 * @param pSession The session that the module was retained in.
6905 * @param hMod The module handle. NULL is silently ignored.
6906 */
6907SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6908{
6909 PSUPDRVDEVEXT pDevExt;
6910 PSUPDRVLDRIMAGE pImage;
6911 int rc;
6912
6913 /*
6914 * Validate input.
6915 */
6916 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6917 if (!hMod)
6918 return VINF_SUCCESS;
6919 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6920 pImage = (PSUPDRVLDRIMAGE)hMod;
6921 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6922
6923 /*
6924 * Take the loader lock and revalidate the module:
6925 */
6926 pDevExt = pSession->pDevExt;
6927 rc = supdrvLdrLock(pDevExt);
6928 if (RT_SUCCESS(rc))
6929 {
6930 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6931 {
6932 /*
6933 * Find the usage record for the module:
6934 */
6935 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6936 PSUPDRVLDRUSAGE pUsage;
6937
6938 rc = VERR_MODULE_NOT_FOUND;
6939 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6940 {
6941 if (pUsage->pImage == pImage)
6942 {
6943 /*
6944 * Drop a ring-0 reference:
6945 */
6946 Assert(pImage->cImgUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6947 if (pUsage->cRing0Usage > 0)
6948 {
6949 if (pImage->cImgUsage > 1)
6950 {
6951 pUsage->cRing0Usage -= 1;
6952 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6953 rc = VINF_SUCCESS;
6954 }
6955 else
6956 {
6957 Assert(!pImage->pWrappedModInfo /* (The wrapper kmod has the last reference.) */);
6958 supdrvLdrFree(pDevExt, pImage);
6959
6960 if (pPrevUsage)
6961 pPrevUsage->pNext = pUsage->pNext;
6962 else
6963 pSession->pLdrUsage = pUsage->pNext;
6964 pUsage->pNext = NULL;
6965 pUsage->pImage = NULL;
6966 pUsage->cRing0Usage = 0;
6967 pUsage->cRing3Usage = 0;
6968 RTMemFree(pUsage);
6969
6970 rc = VINF_OBJECT_DESTROYED;
6971 }
6972 }
6973 else
6974 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6975 break;
6976 }
6977 pPrevUsage = pUsage;
6978 }
6979 }
6980 else
6981 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6982 supdrvLdrUnlock(pDevExt);
6983 }
6984 return rc;
6985
6986}
6987SUPR0_EXPORT_SYMBOL(SUPR0LdrModRelease);
6988
6989
6990/**
6991 * Implements the service call request.
6992 *
6993 * @returns VBox status code.
6994 * @param pDevExt The device extension.
6995 * @param pSession The calling session.
6996 * @param pReq The request packet, valid.
6997 */
6998static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6999{
7000#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
7001 int rc;
7002
7003 /*
7004 * Find the module first in the module referenced by the calling session.
7005 */
7006 rc = supdrvLdrLock(pDevExt);
7007 if (RT_SUCCESS(rc))
7008 {
7009 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
7010 PSUPDRVLDRUSAGE pUsage;
7011
7012 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
7013 if ( pUsage->pImage->pfnServiceReqHandler
7014 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
7015 {
7016 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
7017 break;
7018 }
7019 supdrvLdrUnlock(pDevExt);
7020
7021 if (pfnServiceReqHandler)
7022 {
7023 /*
7024 * Call it.
7025 */
7026 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
7027 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
7028 else
7029 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
7030 }
7031 else
7032 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
7033 }
7034
7035 /* log it */
7036 if ( RT_FAILURE(rc)
7037 && rc != VERR_INTERRUPTED
7038 && rc != VERR_TIMEOUT)
7039 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
7040 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
7041 else
7042 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
7043 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
7044 return rc;
7045#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
7046 RT_NOREF3(pDevExt, pSession, pReq);
7047 return VERR_NOT_IMPLEMENTED;
7048#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
7049}
7050
7051
7052/**
7053 * Implements the logger settings request.
7054 *
7055 * @returns VBox status code.
7056 * @param pReq The request.
7057 */
7058static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
7059{
7060 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
7061 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
7062 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
7063 PRTLOGGER pLogger = NULL;
7064 int rc;
7065
7066 /*
7067 * Some further validation.
7068 */
7069 switch (pReq->u.In.fWhat)
7070 {
7071 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
7072 case SUPLOGGERSETTINGS_WHAT_CREATE:
7073 break;
7074
7075 case SUPLOGGERSETTINGS_WHAT_DESTROY:
7076 if (*pszGroup || *pszFlags || *pszDest)
7077 return VERR_INVALID_PARAMETER;
7078 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
7079 return VERR_ACCESS_DENIED;
7080 break;
7081
7082 default:
7083 return VERR_INTERNAL_ERROR;
7084 }
7085
7086 /*
7087 * Get the logger.
7088 */
7089 switch (pReq->u.In.fWhich)
7090 {
7091 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7092 pLogger = RTLogGetDefaultInstance();
7093 break;
7094
7095 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7096 pLogger = RTLogRelGetDefaultInstance();
7097 break;
7098
7099 default:
7100 return VERR_INTERNAL_ERROR;
7101 }
7102
7103 /*
7104 * Do the job.
7105 */
7106 switch (pReq->u.In.fWhat)
7107 {
7108 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
7109 if (pLogger)
7110 {
7111 rc = RTLogFlags(pLogger, pszFlags);
7112 if (RT_SUCCESS(rc))
7113 rc = RTLogGroupSettings(pLogger, pszGroup);
7114 NOREF(pszDest);
7115 }
7116 else
7117 rc = VERR_NOT_FOUND;
7118 break;
7119
7120 case SUPLOGGERSETTINGS_WHAT_CREATE:
7121 {
7122 if (pLogger)
7123 rc = VERR_ALREADY_EXISTS;
7124 else
7125 {
7126 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
7127
7128 rc = RTLogCreate(&pLogger,
7129 0 /* fFlags */,
7130 pszGroup,
7131 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
7132 ? "VBOX_LOG"
7133 : "VBOX_RELEASE_LOG",
7134 RT_ELEMENTS(s_apszGroups),
7135 s_apszGroups,
7136 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
7137 NULL);
7138 if (RT_SUCCESS(rc))
7139 {
7140 rc = RTLogFlags(pLogger, pszFlags);
7141 NOREF(pszDest);
7142 if (RT_SUCCESS(rc))
7143 {
7144 switch (pReq->u.In.fWhich)
7145 {
7146 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7147 pLogger = RTLogSetDefaultInstance(pLogger);
7148 break;
7149 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7150 pLogger = RTLogRelSetDefaultInstance(pLogger);
7151 break;
7152 }
7153 }
7154 RTLogDestroy(pLogger);
7155 }
7156 }
7157 break;
7158 }
7159
7160 case SUPLOGGERSETTINGS_WHAT_DESTROY:
7161 switch (pReq->u.In.fWhich)
7162 {
7163 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7164 pLogger = RTLogSetDefaultInstance(NULL);
7165 break;
7166 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7167 pLogger = RTLogRelSetDefaultInstance(NULL);
7168 break;
7169 }
7170 rc = RTLogDestroy(pLogger);
7171 break;
7172
7173 default:
7174 {
7175 rc = VERR_INTERNAL_ERROR;
7176 break;
7177 }
7178 }
7179
7180 return rc;
7181}
7182
7183
7184/**
7185 * Implements the MSR prober operations.
7186 *
7187 * @returns VBox status code.
7188 * @param pDevExt The device extension.
7189 * @param pReq The request.
7190 */
7191static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
7192{
7193#ifdef SUPDRV_WITH_MSR_PROBER
7194 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
7195 int rc;
7196
7197 switch (pReq->u.In.enmOp)
7198 {
7199 case SUPMSRPROBEROP_READ:
7200 {
7201 uint64_t uValue;
7202 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
7203 if (RT_SUCCESS(rc))
7204 {
7205 pReq->u.Out.uResults.Read.uValue = uValue;
7206 pReq->u.Out.uResults.Read.fGp = false;
7207 }
7208 else if (rc == VERR_ACCESS_DENIED)
7209 {
7210 pReq->u.Out.uResults.Read.uValue = 0;
7211 pReq->u.Out.uResults.Read.fGp = true;
7212 rc = VINF_SUCCESS;
7213 }
7214 break;
7215 }
7216
7217 case SUPMSRPROBEROP_WRITE:
7218 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
7219 if (RT_SUCCESS(rc))
7220 pReq->u.Out.uResults.Write.fGp = false;
7221 else if (rc == VERR_ACCESS_DENIED)
7222 {
7223 pReq->u.Out.uResults.Write.fGp = true;
7224 rc = VINF_SUCCESS;
7225 }
7226 break;
7227
7228 case SUPMSRPROBEROP_MODIFY:
7229 case SUPMSRPROBEROP_MODIFY_FASTER:
7230 rc = supdrvOSMsrProberModify(idCpu, pReq);
7231 break;
7232
7233 default:
7234 return VERR_INVALID_FUNCTION;
7235 }
7236 RT_NOREF1(pDevExt);
7237 return rc;
7238#else
7239 RT_NOREF2(pDevExt, pReq);
7240 return VERR_NOT_IMPLEMENTED;
7241#endif
7242}
7243
7244
7245/**
7246 * Resume built-in keyboard on MacBook Air and Pro hosts.
7247 * If there is no built-in keyboard device, return success anyway.
7248 *
7249 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
7250 */
7251static int supdrvIOCtl_ResumeSuspendedKbds(void)
7252{
7253#if defined(RT_OS_DARWIN)
7254 return supdrvDarwinResumeSuspendedKbds();
7255#else
7256 return VERR_NOT_IMPLEMENTED;
7257#endif
7258}
7259
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette