VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 91800

Last change on this file since 91800 was 91800, checked in by vboxsync, 3 years ago

SUPDrv: Reworked the export table a little. SUPFUNC got a cArgs field taken from the name. bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 262.2 KB
Line 
1/* $Id: SUPDrv.cpp 91800 2021-10-18 00:49:23Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_vmx.h>
68
69#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
70# include "dtrace/SUPDrv.h"
71#else
72# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
73# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
74# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
75# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
76#endif
77
78/*
79 * Logging assignments:
80 * Log - useful stuff, like failures.
81 * LogFlow - program flow, except the really noisy bits.
82 * Log2 - Cleanup.
83 * Log3 - Loader flow noise.
84 * Log4 - Call VMMR0 flow noise.
85 * Log5 - Native yet-to-be-defined noise.
86 * Log6 - Native ioctl flow noise.
87 *
88 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
89 * instantiation in log-vbox.c(pp).
90 */
91
92
93/*********************************************************************************************************************************
94* Defined Constants And Macros *
95*********************************************************************************************************************************/
96/** @def VBOX_SVN_REV
97 * The makefile should define this if it can. */
98#ifndef VBOX_SVN_REV
99# define VBOX_SVN_REV 0
100#endif
101
102/** @ SUPDRV_CHECK_SMAP_SETUP
103 * SMAP check setup. */
104/** @def SUPDRV_CHECK_SMAP_CHECK
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
106 * will be logged and @a a_BadExpr is executed. */
107#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
108# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
109# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
110 do { \
111 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
112 { \
113 RTCCUINTREG fEfl = ASMGetFlags(); \
114 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
115 { /* likely */ } \
116 else \
117 { \
118 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
119 a_BadExpr; \
120 } \
121 } \
122 } while (0)
123#else
124# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
125# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
133static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
134static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
135static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
136static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
137static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
138static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
139static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
140static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
141static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
142static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
143DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference);
144static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
145DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
146DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
147static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
148static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
149static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
150static int supdrvIOCtl_ResumeSuspendedKbds(void);
151
152
153/*********************************************************************************************************************************
154* Global Variables *
155*********************************************************************************************************************************/
156/** @def STKBACK
157 * Indicates that the symbol needs to switch back to the kernel stack on darwin.
158 * See @bugref{10124} for details. */
159#ifdef RT_OS_DARWIN
160# define STKBACK(a) "StkBack_" a
161#else
162# define STKBACK(a) a
163#endif
164/** @def STKOKAY
165 * The oposite of STKBACK, just for make the table nicly aligned. */
166#define STKOKAY(a) a
167
168/** @name Function table entry macros.
169 * @{ */
170#define SUPEXP_CUSTOM(a_cArgs, a_Name, a_Value) { #a_Name, a_cArgs, (void *)(uintptr_t)(a_Value) }
171#define SUPEXP_STK_OKAY(a_cArgs, a_Name) { #a_Name, 0, (void *)(uintptr_t)a_Name }
172#ifdef RT_OS_DARWIN
173# define SUPEXP_STK_BACK(a_cArgs, a_Name) { "StkBack_" #a_Name, 0, (void *)(uintptr_t)a_Name }
174#else
175# define SUPEXP_STK_BACK(a_cArgs, a_Name) { #a_Name, 0, (void *)(uintptr_t)a_Name }
176#endif
177/** @} */
178
179/**
180 * Array of the R0 SUP API.
181 *
182 * While making changes to these exports, make sure to update the IOC
183 * minor version (SUPDRV_IOC_VERSION).
184 *
185 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
186 * produce definition files from which import libraries are generated.
187 * Take care when commenting things and especially with \#ifdef'ing.
188 */
189static SUPFUNC g_aFunctions[] =
190{
191/* SED: START */
192 /* name function */
193 /* Entries with absolute addresses determined at runtime, fixup
194 code makes ugly ASSUMPTIONS about the order here: */
195 SUPEXP_CUSTOM( 0, SUPR0AbsIs64bit, 0),
196 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelCS, 0),
197 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelSS, 0),
198 SUPEXP_CUSTOM( 0, SUPR0Abs64bitKernelDS, 0),
199 SUPEXP_CUSTOM( 0, SUPR0AbsKernelCS, 0),
200 SUPEXP_CUSTOM( 0, SUPR0AbsKernelSS, 0),
201 SUPEXP_CUSTOM( 0, SUPR0AbsKernelDS, 0),
202 SUPEXP_CUSTOM( 0, SUPR0AbsKernelES, 0),
203 SUPEXP_CUSTOM( 0, SUPR0AbsKernelFS, 0),
204 SUPEXP_CUSTOM( 0, SUPR0AbsKernelGS, 0),
205 /* Normal function & data pointers: */
206 SUPEXP_CUSTOM( 0, g_pSUPGlobalInfoPage, &g_pSUPGlobalInfoPage), /* SED: DATA */
207 SUPEXP_STK_OKAY(0, SUPGetGIP),
208 SUPEXP_STK_BACK(22, SUPReadTscWithDelta),
209 SUPEXP_STK_BACK(22, SUPGetTscDeltaSlow),
210 SUPEXP_STK_BACK(22, SUPGetCpuHzFromGipForAsyncMode),
211 SUPEXP_STK_OKAY(0, SUPIsTscFreqCompatible),
212 SUPEXP_STK_OKAY(0, SUPIsTscFreqCompatibleEx),
213 SUPEXP_STK_BACK(22, SUPR0BadContext),
214 SUPEXP_STK_BACK(22, SUPR0ComponentDeregisterFactory),
215 SUPEXP_STK_BACK(22, SUPR0ComponentQueryFactory),
216 SUPEXP_STK_BACK(22, SUPR0ComponentRegisterFactory),
217 SUPEXP_STK_BACK(22, SUPR0ContAlloc),
218 SUPEXP_STK_BACK(22, SUPR0ContFree),
219 SUPEXP_STK_BACK(22, SUPR0ChangeCR4),
220 SUPEXP_STK_BACK(22, SUPR0EnableVTx),
221 SUPEXP_STK_BACK(22, SUPR0SuspendVTxOnCpu),
222 SUPEXP_STK_BACK(22, SUPR0ResumeVTxOnCpu),
223 SUPEXP_STK_OKAY(0, SUPR0GetCurrentGdtRw),
224 SUPEXP_STK_OKAY(0, SUPR0GetKernelFeatures),
225 SUPEXP_STK_BACK(22, SUPR0GetHwvirtMsrs),
226 SUPEXP_STK_BACK(22, SUPR0GetPagingMode),
227 SUPEXP_STK_BACK(22, SUPR0GetSvmUsability),
228 SUPEXP_STK_BACK(22, SUPR0GetVTSupport),
229 SUPEXP_STK_BACK(22, SUPR0GetVmxUsability),
230 SUPEXP_STK_BACK(22, SUPR0LdrIsLockOwnerByMod),
231 SUPEXP_STK_BACK(22, SUPR0LdrLock),
232 SUPEXP_STK_BACK(22, SUPR0LdrUnlock),
233 SUPEXP_STK_BACK(22, SUPR0LdrModByName),
234 SUPEXP_STK_BACK(22, SUPR0LdrModRelease),
235 SUPEXP_STK_BACK(22, SUPR0LdrModRetain),
236 SUPEXP_STK_BACK(22, SUPR0LockMem),
237 SUPEXP_STK_BACK(22, SUPR0LowAlloc),
238 SUPEXP_STK_BACK(22, SUPR0LowFree),
239 SUPEXP_STK_BACK(22, SUPR0MemAlloc),
240 SUPEXP_STK_BACK(22, SUPR0MemFree),
241 SUPEXP_STK_BACK(22, SUPR0MemGetPhys),
242 SUPEXP_STK_BACK(22, SUPR0ObjAddRef),
243 SUPEXP_STK_BACK(22, SUPR0ObjAddRefEx),
244 SUPEXP_STK_BACK(22, SUPR0ObjRegister),
245 SUPEXP_STK_BACK(22, SUPR0ObjRelease),
246 SUPEXP_STK_BACK(22, SUPR0ObjVerifyAccess),
247 SUPEXP_STK_BACK(22, SUPR0PageAllocEx),
248 SUPEXP_STK_BACK(22, SUPR0PageFree),
249 SUPEXP_STK_BACK(22, SUPR0PageMapKernel),
250 SUPEXP_STK_BACK(22, SUPR0PageProtect),
251#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
252 SUPEXP_STK_OKAY(0, SUPR0HCPhysToVirt), /* only-linux, only-solaris, only-freebsd */
253#endif
254 SUPEXP_STK_BACK(22, SUPR0PrintfV),
255 SUPEXP_STK_BACK(22, SUPR0GetSessionGVM),
256 SUPEXP_STK_BACK(22, SUPR0GetSessionVM),
257 SUPEXP_STK_BACK(22, SUPR0SetSessionVM),
258 SUPEXP_STK_BACK(22, SUPR0TscDeltaMeasureBySetIndex),
259 SUPEXP_STK_BACK(22, SUPR0TracerDeregisterDrv),
260 SUPEXP_STK_BACK(22, SUPR0TracerDeregisterImpl),
261 SUPEXP_STK_BACK(22, SUPR0TracerFireProbe),
262 SUPEXP_STK_BACK(22, SUPR0TracerRegisterDrv),
263 SUPEXP_STK_BACK(22, SUPR0TracerRegisterImpl),
264 SUPEXP_STK_BACK(22, SUPR0TracerRegisterModule),
265 SUPEXP_STK_BACK(22, SUPR0TracerUmodProbeFire),
266 SUPEXP_STK_BACK(22, SUPR0UnlockMem),
267#ifdef RT_OS_WINDOWS
268 SUPEXP_STK_BACK(22, SUPR0IoCtlSetupForHandle), /* only-windows */
269 SUPEXP_STK_BACK(22, SUPR0IoCtlPerform), /* only-windows */
270 SUPEXP_STK_BACK(22, SUPR0IoCtlCleanup), /* only-windows */
271#endif
272 SUPEXP_STK_BACK(22, SUPSemEventClose),
273 SUPEXP_STK_BACK(22, SUPSemEventCreate),
274 SUPEXP_STK_BACK(22, SUPSemEventGetResolution),
275 SUPEXP_STK_BACK(22, SUPSemEventMultiClose),
276 SUPEXP_STK_BACK(22, SUPSemEventMultiCreate),
277 SUPEXP_STK_BACK(22, SUPSemEventMultiGetResolution),
278 SUPEXP_STK_BACK(22, SUPSemEventMultiReset),
279 SUPEXP_STK_BACK(22, SUPSemEventMultiSignal),
280 SUPEXP_STK_BACK(22, SUPSemEventMultiWait),
281 SUPEXP_STK_BACK(22, SUPSemEventMultiWaitNoResume),
282 SUPEXP_STK_BACK(22, SUPSemEventMultiWaitNsAbsIntr),
283 SUPEXP_STK_BACK(22, SUPSemEventMultiWaitNsRelIntr),
284 SUPEXP_STK_BACK(22, SUPSemEventSignal),
285 SUPEXP_STK_BACK(22, SUPSemEventWait),
286 SUPEXP_STK_BACK(22, SUPSemEventWaitNoResume),
287 SUPEXP_STK_BACK(22, SUPSemEventWaitNsAbsIntr),
288 SUPEXP_STK_BACK(22, SUPSemEventWaitNsRelIntr),
289
290 SUPEXP_STK_BACK(22, RTAssertAreQuiet),
291 SUPEXP_STK_BACK(22, RTAssertMayPanic),
292 SUPEXP_STK_BACK(22, RTAssertMsg1),
293 SUPEXP_STK_BACK(22, RTAssertMsg2AddV),
294 SUPEXP_STK_BACK(22, RTAssertMsg2V),
295 SUPEXP_STK_BACK(22, RTAssertSetMayPanic),
296 SUPEXP_STK_BACK(22, RTAssertSetQuiet),
297 SUPEXP_STK_OKAY(0, RTCrc32),
298 SUPEXP_STK_OKAY(0, RTCrc32Finish),
299 SUPEXP_STK_OKAY(0, RTCrc32Process),
300 SUPEXP_STK_OKAY(0, RTCrc32Start),
301 SUPEXP_STK_OKAY(0, RTErrConvertFromErrno),
302 SUPEXP_STK_OKAY(0, RTErrConvertToErrno),
303 SUPEXP_STK_BACK(22, RTHandleTableAllocWithCtx),
304 SUPEXP_STK_BACK(22, RTHandleTableCreate),
305 SUPEXP_STK_BACK(22, RTHandleTableCreateEx),
306 SUPEXP_STK_BACK(22, RTHandleTableDestroy),
307 SUPEXP_STK_BACK(22, RTHandleTableFreeWithCtx),
308 SUPEXP_STK_BACK(22, RTHandleTableLookupWithCtx),
309 SUPEXP_STK_BACK(22, RTLogBulkUpdate),
310 SUPEXP_STK_BACK(22, RTLogCheckGroupFlags),
311 SUPEXP_STK_BACK(22, RTLogCreateExV),
312 SUPEXP_STK_BACK(22, RTLogDestroy),
313 SUPEXP_STK_BACK(22, RTLogDefaultInstance),
314 SUPEXP_STK_BACK(22, RTLogDefaultInstanceEx),
315 SUPEXP_STK_BACK(22, SUPR0DefaultLogInstanceEx),
316 SUPEXP_STK_BACK(22, RTLogGetDefaultInstance),
317 SUPEXP_STK_BACK(22, RTLogGetDefaultInstanceEx),
318 SUPEXP_STK_BACK(22, SUPR0GetDefaultLogInstanceEx),
319 SUPEXP_STK_BACK(22, RTLogLoggerExV),
320 SUPEXP_STK_BACK(22, RTLogPrintfV),
321 SUPEXP_STK_BACK(22, RTLogRelGetDefaultInstance),
322 SUPEXP_STK_BACK(22, RTLogRelGetDefaultInstanceEx),
323 SUPEXP_STK_BACK(22, SUPR0GetDefaultLogRelInstanceEx),
324 SUPEXP_STK_BACK(22, RTLogSetDefaultInstanceThread),
325 SUPEXP_STK_BACK(22, RTLogSetFlushCallback),
326 SUPEXP_STK_BACK(22, RTLogSetR0ProgramStart),
327 SUPEXP_STK_BACK(22, RTLogSetR0ThreadNameV),
328 SUPEXP_STK_BACK(22, RTMemAllocExTag),
329 SUPEXP_STK_BACK(22, RTMemAllocTag),
330 SUPEXP_STK_BACK(22, RTMemAllocVarTag),
331 SUPEXP_STK_BACK(22, RTMemAllocZTag),
332 SUPEXP_STK_BACK(22, RTMemAllocZVarTag),
333 SUPEXP_STK_BACK(22, RTMemDupExTag),
334 SUPEXP_STK_BACK(22, RTMemDupTag),
335 SUPEXP_STK_BACK(22, RTMemFree),
336 SUPEXP_STK_BACK(22, RTMemFreeEx),
337 SUPEXP_STK_BACK(22, RTMemReallocTag),
338 SUPEXP_STK_BACK(22, RTMpCpuId),
339 SUPEXP_STK_BACK(22, RTMpCpuIdFromSetIndex),
340 SUPEXP_STK_BACK(22, RTMpCpuIdToSetIndex),
341 SUPEXP_STK_BACK(22, RTMpCurSetIndex),
342 SUPEXP_STK_BACK(22, RTMpCurSetIndexAndId),
343 SUPEXP_STK_BACK(22, RTMpGetArraySize),
344 SUPEXP_STK_BACK(22, RTMpGetCount),
345 SUPEXP_STK_BACK(22, RTMpGetMaxCpuId),
346 SUPEXP_STK_BACK(22, RTMpGetOnlineCount),
347 SUPEXP_STK_BACK(22, RTMpGetOnlineSet),
348 SUPEXP_STK_BACK(22, RTMpGetSet),
349 SUPEXP_STK_BACK(22, RTMpIsCpuOnline),
350 SUPEXP_STK_BACK(22, RTMpIsCpuPossible),
351 SUPEXP_STK_BACK(22, RTMpIsCpuWorkPending),
352 SUPEXP_STK_BACK(22, RTMpNotificationDeregister),
353 SUPEXP_STK_BACK(22, RTMpNotificationRegister),
354 SUPEXP_STK_BACK(22, RTMpOnAll),
355 SUPEXP_STK_BACK(22, RTMpOnOthers),
356 SUPEXP_STK_BACK(22, RTMpOnSpecific),
357 SUPEXP_STK_BACK(22, RTMpPokeCpu),
358 SUPEXP_STK_OKAY(0, RTNetIPv4AddDataChecksum),
359 SUPEXP_STK_OKAY(0, RTNetIPv4AddTCPChecksum),
360 SUPEXP_STK_OKAY(0, RTNetIPv4AddUDPChecksum),
361 SUPEXP_STK_OKAY(0, RTNetIPv4FinalizeChecksum),
362 SUPEXP_STK_OKAY(0, RTNetIPv4HdrChecksum),
363 SUPEXP_STK_OKAY(0, RTNetIPv4IsDHCPValid),
364 SUPEXP_STK_OKAY(0, RTNetIPv4IsHdrValid),
365 SUPEXP_STK_OKAY(0, RTNetIPv4IsTCPSizeValid),
366 SUPEXP_STK_OKAY(0, RTNetIPv4IsTCPValid),
367 SUPEXP_STK_OKAY(0, RTNetIPv4IsUDPSizeValid),
368 SUPEXP_STK_OKAY(0, RTNetIPv4IsUDPValid),
369 SUPEXP_STK_OKAY(0, RTNetIPv4PseudoChecksum),
370 SUPEXP_STK_OKAY(0, RTNetIPv4PseudoChecksumBits),
371 SUPEXP_STK_OKAY(0, RTNetIPv4TCPChecksum),
372 SUPEXP_STK_OKAY(0, RTNetIPv4UDPChecksum),
373 SUPEXP_STK_OKAY(0, RTNetIPv6PseudoChecksum),
374 SUPEXP_STK_OKAY(0, RTNetIPv6PseudoChecksumBits),
375 SUPEXP_STK_OKAY(0, RTNetIPv6PseudoChecksumEx),
376 SUPEXP_STK_OKAY(0, RTNetTCPChecksum),
377 SUPEXP_STK_OKAY(0, RTNetUDPChecksum),
378 SUPEXP_STK_BACK(22, RTPowerNotificationDeregister),
379 SUPEXP_STK_BACK(22, RTPowerNotificationRegister),
380 SUPEXP_STK_BACK(22, RTProcSelf),
381 SUPEXP_STK_BACK(22, RTR0AssertPanicSystem),
382#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
383 SUPEXP_STK_BACK(22, RTR0DbgKrnlInfoOpen), /* only-darwin, only-solaris, only-windows */
384 SUPEXP_STK_BACK(22, RTR0DbgKrnlInfoQueryMember), /* only-darwin, only-solaris, only-windows */
385# if defined(RT_OS_SOLARIS)
386 SUPEXP_STK_BACK(22, RTR0DbgKrnlInfoQuerySize), /* only-solaris */
387# endif
388 SUPEXP_STK_BACK(22, RTR0DbgKrnlInfoQuerySymbol), /* only-darwin, only-solaris, only-windows */
389 SUPEXP_STK_BACK(22, RTR0DbgKrnlInfoRelease), /* only-darwin, only-solaris, only-windows */
390 SUPEXP_STK_BACK(22, RTR0DbgKrnlInfoRetain), /* only-darwin, only-solaris, only-windows */
391#endif
392 SUPEXP_STK_BACK(22, RTR0MemAreKrnlAndUsrDifferent),
393 SUPEXP_STK_BACK(22, RTR0MemKernelIsValidAddr),
394 SUPEXP_STK_BACK(22, RTR0MemKernelCopyFrom),
395 SUPEXP_STK_BACK(22, RTR0MemKernelCopyTo),
396 SUPEXP_STK_OKAY(0, RTR0MemObjAddress),
397 SUPEXP_STK_OKAY(0, RTR0MemObjAddressR3),
398 SUPEXP_STK_BACK(22, RTR0MemObjAllocContTag),
399 SUPEXP_STK_BACK(22, RTR0MemObjAllocLargeTag),
400 SUPEXP_STK_BACK(22, RTR0MemObjAllocLowTag),
401 SUPEXP_STK_BACK(22, RTR0MemObjAllocPageTag),
402 SUPEXP_STK_BACK(22, RTR0MemObjAllocPhysExTag),
403 SUPEXP_STK_BACK(22, RTR0MemObjAllocPhysNCTag),
404 SUPEXP_STK_BACK(22, RTR0MemObjAllocPhysTag),
405 SUPEXP_STK_BACK(22, RTR0MemObjEnterPhysTag),
406 SUPEXP_STK_BACK(22, RTR0MemObjFree),
407 SUPEXP_STK_BACK(22, RTR0MemObjGetPagePhysAddr),
408 SUPEXP_STK_OKAY(0, RTR0MemObjIsMapping),
409 SUPEXP_STK_BACK(22, RTR0MemObjLockUserTag),
410 SUPEXP_STK_BACK(22, RTR0MemObjLockKernelTag),
411 SUPEXP_STK_BACK(22, RTR0MemObjMapKernelExTag),
412 SUPEXP_STK_BACK(22, RTR0MemObjMapKernelTag),
413 SUPEXP_STK_BACK(22, RTR0MemObjMapUserTag),
414 SUPEXP_STK_BACK(22, RTR0MemObjMapUserExTag),
415 SUPEXP_STK_BACK(22, RTR0MemObjProtect),
416 SUPEXP_STK_OKAY(0, RTR0MemObjSize),
417 SUPEXP_STK_BACK(22, RTR0MemUserCopyFrom),
418 SUPEXP_STK_BACK(22, RTR0MemUserCopyTo),
419 SUPEXP_STK_BACK(22, RTR0MemUserIsValidAddr),
420 SUPEXP_STK_BACK(22, RTR0ProcHandleSelf),
421 SUPEXP_STK_BACK(22, RTSemEventCreate),
422 SUPEXP_STK_BACK(22, RTSemEventDestroy),
423 SUPEXP_STK_BACK(22, RTSemEventGetResolution),
424 SUPEXP_STK_BACK(22, RTSemEventIsSignalSafe),
425 SUPEXP_STK_BACK(22, RTSemEventMultiCreate),
426 SUPEXP_STK_BACK(22, RTSemEventMultiDestroy),
427 SUPEXP_STK_BACK(22, RTSemEventMultiGetResolution),
428 SUPEXP_STK_BACK(22, RTSemEventMultiIsSignalSafe),
429 SUPEXP_STK_BACK(22, RTSemEventMultiReset),
430 SUPEXP_STK_BACK(22, RTSemEventMultiSignal),
431 SUPEXP_STK_BACK(22, RTSemEventMultiWait),
432 SUPEXP_STK_BACK(22, RTSemEventMultiWaitEx),
433 SUPEXP_STK_BACK(22, RTSemEventMultiWaitExDebug),
434 SUPEXP_STK_BACK(22, RTSemEventMultiWaitNoResume),
435 SUPEXP_STK_BACK(22, RTSemEventSignal),
436 SUPEXP_STK_BACK(22, RTSemEventWait),
437 SUPEXP_STK_BACK(22, RTSemEventWaitEx),
438 SUPEXP_STK_BACK(22, RTSemEventWaitExDebug),
439 SUPEXP_STK_BACK(22, RTSemEventWaitNoResume),
440 SUPEXP_STK_BACK(22, RTSemFastMutexCreate),
441 SUPEXP_STK_BACK(22, RTSemFastMutexDestroy),
442 SUPEXP_STK_BACK(22, RTSemFastMutexRelease),
443 SUPEXP_STK_BACK(22, RTSemFastMutexRequest),
444 SUPEXP_STK_BACK(22, RTSemMutexCreate),
445 SUPEXP_STK_BACK(22, RTSemMutexDestroy),
446 SUPEXP_STK_BACK(22, RTSemMutexRelease),
447 SUPEXP_STK_BACK(22, RTSemMutexRequest),
448 SUPEXP_STK_BACK(22, RTSemMutexRequestDebug),
449 SUPEXP_STK_BACK(22, RTSemMutexRequestNoResume),
450 SUPEXP_STK_BACK(22, RTSemMutexRequestNoResumeDebug),
451 SUPEXP_STK_BACK(22, RTSpinlockAcquire),
452 SUPEXP_STK_BACK(22, RTSpinlockCreate),
453 SUPEXP_STK_BACK(22, RTSpinlockDestroy),
454 SUPEXP_STK_BACK(22, RTSpinlockRelease),
455 SUPEXP_STK_OKAY(0, RTStrCopy),
456 SUPEXP_STK_BACK(22, RTStrDupTag),
457 SUPEXP_STK_BACK(22, RTStrFormatNumber),
458 SUPEXP_STK_BACK(22, RTStrFormatTypeDeregister),
459 SUPEXP_STK_BACK(22, RTStrFormatTypeRegister),
460 SUPEXP_STK_BACK(22, RTStrFormatTypeSetUser),
461 SUPEXP_STK_BACK(22, RTStrFormatV),
462 SUPEXP_STK_BACK(22, RTStrFree),
463 SUPEXP_STK_OKAY(0, RTStrNCmp),
464 SUPEXP_STK_BACK(22, RTStrPrintfExV),
465 SUPEXP_STK_BACK(22, RTStrPrintfV),
466 SUPEXP_STK_BACK(22, RTStrPrintf2ExV),
467 SUPEXP_STK_BACK(22, RTStrPrintf2V),
468 SUPEXP_STK_BACK(22, RTThreadCreate),
469 SUPEXP_STK_BACK(22, RTThreadCtxHookIsEnabled),
470 SUPEXP_STK_BACK(22, RTThreadCtxHookCreate),
471 SUPEXP_STK_BACK(22, RTThreadCtxHookDestroy),
472 SUPEXP_STK_BACK(22, RTThreadCtxHookDisable),
473 SUPEXP_STK_BACK(22, RTThreadCtxHookEnable),
474 SUPEXP_STK_BACK(22, RTThreadGetName),
475 SUPEXP_STK_BACK(22, RTThreadGetNative),
476 SUPEXP_STK_BACK(22, RTThreadGetType),
477 SUPEXP_STK_BACK(22, RTThreadIsInInterrupt),
478 SUPEXP_STK_BACK(22, RTThreadNativeSelf),
479 SUPEXP_STK_BACK(22, RTThreadPreemptDisable),
480 SUPEXP_STK_BACK(22, RTThreadPreemptIsEnabled),
481 SUPEXP_STK_BACK(22, RTThreadPreemptIsPending),
482 SUPEXP_STK_BACK(22, RTThreadPreemptIsPendingTrusty),
483 SUPEXP_STK_BACK(22, RTThreadPreemptIsPossible),
484 SUPEXP_STK_BACK(22, RTThreadPreemptRestore),
485 SUPEXP_STK_BACK(22, RTThreadQueryTerminationStatus),
486 SUPEXP_STK_BACK(22, RTThreadSelf),
487 SUPEXP_STK_BACK(22, RTThreadSelfName),
488 SUPEXP_STK_BACK(22, RTThreadSleep),
489 SUPEXP_STK_BACK(22, RTThreadUserReset),
490 SUPEXP_STK_BACK(22, RTThreadUserSignal),
491 SUPEXP_STK_BACK(22, RTThreadUserWait),
492 SUPEXP_STK_BACK(22, RTThreadUserWaitNoResume),
493 SUPEXP_STK_BACK(22, RTThreadWait),
494 SUPEXP_STK_BACK(22, RTThreadWaitNoResume),
495 SUPEXP_STK_BACK(22, RTThreadYield),
496 SUPEXP_STK_BACK(22, RTTimeNow),
497 SUPEXP_STK_BACK(22, RTTimerCanDoHighResolution),
498 SUPEXP_STK_BACK(22, RTTimerChangeInterval),
499 SUPEXP_STK_BACK(22, RTTimerCreate),
500 SUPEXP_STK_BACK(22, RTTimerCreateEx),
501 SUPEXP_STK_BACK(22, RTTimerDestroy),
502 SUPEXP_STK_BACK(22, RTTimerGetSystemGranularity),
503 SUPEXP_STK_BACK(22, RTTimerReleaseSystemGranularity),
504 SUPEXP_STK_BACK(22, RTTimerRequestSystemGranularity),
505 SUPEXP_STK_BACK(22, RTTimerStart),
506 SUPEXP_STK_BACK(22, RTTimerStop),
507 SUPEXP_STK_BACK(22, RTTimeSystemMilliTS),
508 SUPEXP_STK_BACK(22, RTTimeSystemNanoTS),
509 SUPEXP_STK_OKAY(0, RTUuidCompare),
510 SUPEXP_STK_OKAY(0, RTUuidCompareStr),
511 SUPEXP_STK_OKAY(0, RTUuidFromStr),
512/* SED: END */
513};
514
515#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
516/**
517 * Drag in the rest of IRPT since we share it with the
518 * rest of the kernel modules on darwin.
519 */
520struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
521{
522 /* VBoxNetAdp */
523 { (PFNRT)RTRandBytes },
524 /* VBoxUSB */
525 { (PFNRT)RTPathStripFilename },
526#if !defined(RT_OS_FREEBSD)
527 { (PFNRT)RTHandleTableAlloc },
528 { (PFNRT)RTStrPurgeEncoding },
529#endif
530 { NULL }
531};
532#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
533
534
535
536/**
537 * Initializes the device extentsion structure.
538 *
539 * @returns IPRT status code.
540 * @param pDevExt The device extension to initialize.
541 * @param cbSession The size of the session structure. The size of
542 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
543 * defined because we're skipping the OS specific members
544 * then.
545 */
546int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
547{
548 int rc;
549
550#ifdef SUPDRV_WITH_RELEASE_LOGGER
551 /*
552 * Create the release log.
553 */
554 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
555 PRTLOGGER pRelLogger;
556 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
557 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
558 if (RT_SUCCESS(rc))
559 RTLogRelSetDefaultInstance(pRelLogger);
560 /** @todo Add native hook for getting logger config parameters and setting
561 * them. On linux we should use the module parameter stuff... */
562#endif
563
564#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
565 /*
566 * Require SSE2 to be present.
567 */
568 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
569 {
570 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
571 return VERR_UNSUPPORTED_CPU;
572 }
573#endif
574
575 /*
576 * Initialize it.
577 */
578 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
579 pDevExt->Spinlock = NIL_RTSPINLOCK;
580 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
581 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
582#ifdef SUPDRV_USE_MUTEX_FOR_LDR
583 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
584#else
585 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
586#endif
587#ifdef SUPDRV_USE_MUTEX_FOR_GIP
588 pDevExt->mtxGip = NIL_RTSEMMUTEX;
589 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
590#else
591 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
592 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
593#endif
594
595 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
596 if (RT_SUCCESS(rc))
597 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
598 if (RT_SUCCESS(rc))
599 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
600
601 if (RT_SUCCESS(rc))
602#ifdef SUPDRV_USE_MUTEX_FOR_LDR
603 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
604#else
605 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
606#endif
607 if (RT_SUCCESS(rc))
608#ifdef SUPDRV_USE_MUTEX_FOR_GIP
609 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
610#else
611 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
612#endif
613 if (RT_SUCCESS(rc))
614 {
615 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
616 if (RT_SUCCESS(rc))
617 {
618#ifdef SUPDRV_USE_MUTEX_FOR_GIP
619 rc = RTSemMutexCreate(&pDevExt->mtxGip);
620#else
621 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
622#endif
623 if (RT_SUCCESS(rc))
624 {
625 rc = supdrvGipCreate(pDevExt);
626 if (RT_SUCCESS(rc))
627 {
628 rc = supdrvTracerInit(pDevExt);
629 if (RT_SUCCESS(rc))
630 {
631 pDevExt->pLdrInitImage = NULL;
632 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
633 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
634 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
635 pDevExt->cbSession = (uint32_t)cbSession;
636
637 /*
638 * Fixup the absolute symbols.
639 *
640 * Because of the table indexing assumptions we'll have a little #ifdef orgy
641 * here rather than distributing this to OS specific files. At least for now.
642 */
643#ifdef RT_OS_DARWIN
644# if ARCH_BITS == 32
645 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
646 {
647 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
648 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
649 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
650 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
651 }
652 else
653 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
654 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
655 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
656 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
657 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
658 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
659 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
660# else /* 64-bit darwin: */
661 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
662 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
663 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
664 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
665 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
666 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
667 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
668 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
669 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
670 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
671
672# endif
673#else /* !RT_OS_DARWIN */
674# if ARCH_BITS == 64
675 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
676 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
677 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
678 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
679# else
680 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
681# endif
682 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
683 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
684 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
685 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
686 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
687 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
688#endif /* !RT_OS_DARWIN */
689 return VINF_SUCCESS;
690 }
691
692 supdrvGipDestroy(pDevExt);
693 }
694
695#ifdef SUPDRV_USE_MUTEX_FOR_GIP
696 RTSemMutexDestroy(pDevExt->mtxGip);
697 pDevExt->mtxGip = NIL_RTSEMMUTEX;
698#else
699 RTSemFastMutexDestroy(pDevExt->mtxGip);
700 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
701#endif
702 }
703 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
704 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
705 }
706 }
707
708#ifdef SUPDRV_USE_MUTEX_FOR_GIP
709 RTSemMutexDestroy(pDevExt->mtxTscDelta);
710 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
711#else
712 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
713 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
714#endif
715#ifdef SUPDRV_USE_MUTEX_FOR_LDR
716 RTSemMutexDestroy(pDevExt->mtxLdr);
717 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
718#else
719 RTSemFastMutexDestroy(pDevExt->mtxLdr);
720 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
721#endif
722 RTSpinlockDestroy(pDevExt->Spinlock);
723 pDevExt->Spinlock = NIL_RTSPINLOCK;
724 RTSpinlockDestroy(pDevExt->hGipSpinlock);
725 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
726 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
727 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
728
729#ifdef SUPDRV_WITH_RELEASE_LOGGER
730 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
731 RTLogDestroy(RTLogSetDefaultInstance(NULL));
732#endif
733
734 return rc;
735}
736
737
738/**
739 * Delete the device extension (e.g. cleanup members).
740 *
741 * @param pDevExt The device extension to delete.
742 */
743void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
744{
745 PSUPDRVOBJ pObj;
746 PSUPDRVUSAGE pUsage;
747
748 /*
749 * Kill mutexes and spinlocks.
750 */
751#ifdef SUPDRV_USE_MUTEX_FOR_GIP
752 RTSemMutexDestroy(pDevExt->mtxGip);
753 pDevExt->mtxGip = NIL_RTSEMMUTEX;
754 RTSemMutexDestroy(pDevExt->mtxTscDelta);
755 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
756#else
757 RTSemFastMutexDestroy(pDevExt->mtxGip);
758 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
759 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
760 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
761#endif
762#ifdef SUPDRV_USE_MUTEX_FOR_LDR
763 RTSemMutexDestroy(pDevExt->mtxLdr);
764 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
765#else
766 RTSemFastMutexDestroy(pDevExt->mtxLdr);
767 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
768#endif
769 RTSpinlockDestroy(pDevExt->Spinlock);
770 pDevExt->Spinlock = NIL_RTSPINLOCK;
771 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
772 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
773 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
774 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
775
776 /*
777 * Free lists.
778 */
779 /* objects. */
780 pObj = pDevExt->pObjs;
781 Assert(!pObj); /* (can trigger on forced unloads) */
782 pDevExt->pObjs = NULL;
783 while (pObj)
784 {
785 void *pvFree = pObj;
786 pObj = pObj->pNext;
787 RTMemFree(pvFree);
788 }
789
790 /* usage records. */
791 pUsage = pDevExt->pUsageFree;
792 pDevExt->pUsageFree = NULL;
793 while (pUsage)
794 {
795 void *pvFree = pUsage;
796 pUsage = pUsage->pNext;
797 RTMemFree(pvFree);
798 }
799
800 /* kill the GIP. */
801 supdrvGipDestroy(pDevExt);
802 RTSpinlockDestroy(pDevExt->hGipSpinlock);
803 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
804
805 supdrvTracerTerm(pDevExt);
806
807#ifdef SUPDRV_WITH_RELEASE_LOGGER
808 /* destroy the loggers. */
809 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
810 RTLogDestroy(RTLogSetDefaultInstance(NULL));
811#endif
812}
813
814
815/**
816 * Create session.
817 *
818 * @returns IPRT status code.
819 * @param pDevExt Device extension.
820 * @param fUser Flag indicating whether this is a user or kernel
821 * session.
822 * @param fUnrestricted Unrestricted access (system) or restricted access
823 * (user)?
824 * @param ppSession Where to store the pointer to the session data.
825 */
826int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
827{
828 int rc;
829 PSUPDRVSESSION pSession;
830
831 if (!SUP_IS_DEVEXT_VALID(pDevExt))
832 return VERR_INVALID_PARAMETER;
833
834 /*
835 * Allocate memory for the session data.
836 */
837 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
838 if (pSession)
839 {
840 /* Initialize session data. */
841 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
842 if (!rc)
843 {
844 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
845 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
846 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
847 if (RT_SUCCESS(rc))
848 {
849 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
850 pSession->pDevExt = pDevExt;
851 pSession->u32Cookie = BIRD_INV;
852 pSession->fUnrestricted = fUnrestricted;
853 /*pSession->fInHashTable = false; */
854 pSession->cRefs = 1;
855 /*pSession->pCommonNextHash = NULL;
856 pSession->ppOsSessionPtr = NULL; */
857 if (fUser)
858 {
859 pSession->Process = RTProcSelf();
860 pSession->R0Process = RTR0ProcHandleSelf();
861 }
862 else
863 {
864 pSession->Process = NIL_RTPROCESS;
865 pSession->R0Process = NIL_RTR0PROCESS;
866 }
867 /*pSession->pLdrUsage = NULL;
868 pSession->pVM = NULL;
869 pSession->pUsage = NULL;
870 pSession->pGip = NULL;
871 pSession->fGipReferenced = false;
872 pSession->Bundle.cUsed = 0; */
873 pSession->Uid = NIL_RTUID;
874 pSession->Gid = NIL_RTGID;
875 /*pSession->uTracerData = 0;*/
876 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
877 RTListInit(&pSession->TpProviders);
878 /*pSession->cTpProviders = 0;*/
879 /*pSession->cTpProbesFiring = 0;*/
880 RTListInit(&pSession->TpUmods);
881 /*RT_ZERO(pSession->apTpLookupTable);*/
882
883 VBOXDRV_SESSION_CREATE(pSession, fUser);
884 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
885 return VINF_SUCCESS;
886 }
887
888 RTSpinlockDestroy(pSession->Spinlock);
889 }
890 RTMemFree(pSession);
891 *ppSession = NULL;
892 Log(("Failed to create spinlock, rc=%d!\n", rc));
893 }
894 else
895 rc = VERR_NO_MEMORY;
896
897 return rc;
898}
899
900
901/**
902 * Cleans up the session in the context of the process to which it belongs, the
903 * caller will free the session and the session spinlock.
904 *
905 * This should normally occur when the session is closed or as the process
906 * exits. Careful reference counting in the OS specfic code makes sure that
907 * there cannot be any races between process/handle cleanup callbacks and
908 * threads doing I/O control calls.
909 *
910 * @param pDevExt The device extension.
911 * @param pSession Session data.
912 */
913static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
914{
915 int rc;
916 PSUPDRVBUNDLE pBundle;
917 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
918
919 Assert(!pSession->fInHashTable);
920 Assert(!pSession->ppOsSessionPtr);
921 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
922 ("R0Process=%p cur=%p; curpid=%u\n",
923 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
924
925 /*
926 * Remove logger instances related to this session.
927 */
928 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
929
930 /*
931 * Destroy the handle table.
932 */
933 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
934 AssertRC(rc);
935 pSession->hHandleTable = NIL_RTHANDLETABLE;
936
937 /*
938 * Release object references made in this session.
939 * In theory there should be noone racing us in this session.
940 */
941 Log2(("release objects - start\n"));
942 if (pSession->pUsage)
943 {
944 PSUPDRVUSAGE pUsage;
945 RTSpinlockAcquire(pDevExt->Spinlock);
946
947 while ((pUsage = pSession->pUsage) != NULL)
948 {
949 PSUPDRVOBJ pObj = pUsage->pObj;
950 pSession->pUsage = pUsage->pNext;
951
952 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
953 if (pUsage->cUsage < pObj->cUsage)
954 {
955 pObj->cUsage -= pUsage->cUsage;
956 RTSpinlockRelease(pDevExt->Spinlock);
957 }
958 else
959 {
960 /* Destroy the object and free the record. */
961 if (pDevExt->pObjs == pObj)
962 pDevExt->pObjs = pObj->pNext;
963 else
964 {
965 PSUPDRVOBJ pObjPrev;
966 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
967 if (pObjPrev->pNext == pObj)
968 {
969 pObjPrev->pNext = pObj->pNext;
970 break;
971 }
972 Assert(pObjPrev);
973 }
974 RTSpinlockRelease(pDevExt->Spinlock);
975
976 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
977 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
978 if (pObj->pfnDestructor)
979 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
980 RTMemFree(pObj);
981 }
982
983 /* free it and continue. */
984 RTMemFree(pUsage);
985
986 RTSpinlockAcquire(pDevExt->Spinlock);
987 }
988
989 RTSpinlockRelease(pDevExt->Spinlock);
990 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
991 }
992 Log2(("release objects - done\n"));
993
994 /*
995 * Make sure the associated VM pointers are NULL.
996 */
997 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
998 {
999 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
1000 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
1001 pSession->pSessionGVM = NULL;
1002 pSession->pSessionVM = NULL;
1003 pSession->pFastIoCtrlVM = NULL;
1004 }
1005
1006 /*
1007 * Do tracer cleanups related to this session.
1008 */
1009 Log2(("release tracer stuff - start\n"));
1010 supdrvTracerCleanupSession(pDevExt, pSession);
1011 Log2(("release tracer stuff - end\n"));
1012
1013 /*
1014 * Release memory allocated in the session.
1015 *
1016 * We do not serialize this as we assume that the application will
1017 * not allocated memory while closing the file handle object.
1018 */
1019 Log2(("freeing memory:\n"));
1020 pBundle = &pSession->Bundle;
1021 while (pBundle)
1022 {
1023 PSUPDRVBUNDLE pToFree;
1024 unsigned i;
1025
1026 /*
1027 * Check and unlock all entries in the bundle.
1028 */
1029 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
1030 {
1031 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
1032 {
1033 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1034 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1035 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1036 {
1037 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1038 AssertRC(rc); /** @todo figure out how to handle this. */
1039 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1040 }
1041 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1042 AssertRC(rc); /** @todo figure out how to handle this. */
1043 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1044 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1045 }
1046 }
1047
1048 /*
1049 * Advance and free previous bundle.
1050 */
1051 pToFree = pBundle;
1052 pBundle = pBundle->pNext;
1053
1054 pToFree->pNext = NULL;
1055 pToFree->cUsed = 0;
1056 if (pToFree != &pSession->Bundle)
1057 RTMemFree(pToFree);
1058 }
1059 Log2(("freeing memory - done\n"));
1060
1061 /*
1062 * Deregister component factories.
1063 */
1064 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1065 Log2(("deregistering component factories:\n"));
1066 if (pDevExt->pComponentFactoryHead)
1067 {
1068 PSUPDRVFACTORYREG pPrev = NULL;
1069 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1070 while (pCur)
1071 {
1072 if (pCur->pSession == pSession)
1073 {
1074 /* unlink it */
1075 PSUPDRVFACTORYREG pNext = pCur->pNext;
1076 if (pPrev)
1077 pPrev->pNext = pNext;
1078 else
1079 pDevExt->pComponentFactoryHead = pNext;
1080
1081 /* free it */
1082 pCur->pNext = NULL;
1083 pCur->pSession = NULL;
1084 pCur->pFactory = NULL;
1085 RTMemFree(pCur);
1086
1087 /* next */
1088 pCur = pNext;
1089 }
1090 else
1091 {
1092 /* next */
1093 pPrev = pCur;
1094 pCur = pCur->pNext;
1095 }
1096 }
1097 }
1098 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1099 Log2(("deregistering component factories - done\n"));
1100
1101 /*
1102 * Loaded images needs to be dereferenced and possibly freed up.
1103 */
1104 supdrvLdrLock(pDevExt);
1105 Log2(("freeing images:\n"));
1106 if (pSession->pLdrUsage)
1107 {
1108 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1109 pSession->pLdrUsage = NULL;
1110 while (pUsage)
1111 {
1112 void *pvFree = pUsage;
1113 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1114 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1115 if (pImage->cImgUsage > cUsage)
1116 supdrvLdrSubtractUsage(pDevExt, pImage, cUsage);
1117 else
1118 supdrvLdrFree(pDevExt, pImage);
1119 pUsage->pImage = NULL;
1120 pUsage = pUsage->pNext;
1121 RTMemFree(pvFree);
1122 }
1123 }
1124 supdrvLdrUnlock(pDevExt);
1125 Log2(("freeing images - done\n"));
1126
1127 /*
1128 * Unmap the GIP.
1129 */
1130 Log2(("umapping GIP:\n"));
1131 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1132 {
1133 SUPR0GipUnmap(pSession);
1134 pSession->fGipReferenced = 0;
1135 }
1136 Log2(("umapping GIP - done\n"));
1137}
1138
1139
1140/**
1141 * Common code for freeing a session when the reference count reaches zero.
1142 *
1143 * @param pDevExt Device extension.
1144 * @param pSession Session data.
1145 * This data will be freed by this routine.
1146 */
1147static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1148{
1149 VBOXDRV_SESSION_CLOSE(pSession);
1150
1151 /*
1152 * Cleanup the session first.
1153 */
1154 supdrvCleanupSession(pDevExt, pSession);
1155 supdrvOSCleanupSession(pDevExt, pSession);
1156
1157 /*
1158 * Free the rest of the session stuff.
1159 */
1160 RTSpinlockDestroy(pSession->Spinlock);
1161 pSession->Spinlock = NIL_RTSPINLOCK;
1162 pSession->pDevExt = NULL;
1163 RTMemFree(pSession);
1164 LogFlow(("supdrvDestroySession: returns\n"));
1165}
1166
1167
1168/**
1169 * Inserts the session into the global hash table.
1170 *
1171 * @retval VINF_SUCCESS on success.
1172 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1173 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1174 * session (asserted).
1175 * @retval VERR_DUPLICATE if there is already a session for that pid.
1176 *
1177 * @param pDevExt The device extension.
1178 * @param pSession The session.
1179 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1180 * available and used. This will set to point to the
1181 * session while under the protection of the session
1182 * hash table spinlock. It will also be kept in
1183 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1184 * cleanup use.
1185 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1186 */
1187int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1188 void *pvUser)
1189{
1190 PSUPDRVSESSION pCur;
1191 unsigned iHash;
1192
1193 /*
1194 * Validate input.
1195 */
1196 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1197 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1198
1199 /*
1200 * Calculate the hash table index and acquire the spinlock.
1201 */
1202 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1203
1204 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1205
1206 /*
1207 * If there are a collisions, we need to carefully check if we got a
1208 * duplicate. There can only be one open session per process.
1209 */
1210 pCur = pDevExt->apSessionHashTab[iHash];
1211 if (pCur)
1212 {
1213 while (pCur && pCur->Process != pSession->Process)
1214 pCur = pCur->pCommonNextHash;
1215
1216 if (pCur)
1217 {
1218 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1219 if (pCur == pSession)
1220 {
1221 Assert(pSession->fInHashTable);
1222 AssertFailed();
1223 return VERR_WRONG_ORDER;
1224 }
1225 Assert(!pSession->fInHashTable);
1226 if (pCur->R0Process == pSession->R0Process)
1227 return VERR_RESOURCE_IN_USE;
1228 return VERR_DUPLICATE;
1229 }
1230 }
1231 Assert(!pSession->fInHashTable);
1232 Assert(!pSession->ppOsSessionPtr);
1233
1234 /*
1235 * Insert it, doing a callout to the OS specific code in case it has
1236 * anything it wishes to do while we're holding the spinlock.
1237 */
1238 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1239 pDevExt->apSessionHashTab[iHash] = pSession;
1240 pSession->fInHashTable = true;
1241 ASMAtomicIncS32(&pDevExt->cSessions);
1242
1243 pSession->ppOsSessionPtr = ppOsSessionPtr;
1244 if (ppOsSessionPtr)
1245 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1246
1247 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1248
1249 /*
1250 * Retain a reference for the pointer in the session table.
1251 */
1252 ASMAtomicIncU32(&pSession->cRefs);
1253
1254 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1255 return VINF_SUCCESS;
1256}
1257
1258
1259/**
1260 * Removes the session from the global hash table.
1261 *
1262 * @retval VINF_SUCCESS on success.
1263 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1264 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1265 * session (asserted).
1266 *
1267 * @param pDevExt The device extension.
1268 * @param pSession The session. The caller is expected to have a reference
1269 * to this so it won't croak on us when we release the hash
1270 * table reference.
1271 * @param pvUser OS specific context value for the
1272 * supdrvOSSessionHashTabInserted callback.
1273 */
1274int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1275{
1276 PSUPDRVSESSION pCur;
1277 unsigned iHash;
1278 int32_t cRefs;
1279
1280 /*
1281 * Validate input.
1282 */
1283 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1284 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1285
1286 /*
1287 * Calculate the hash table index and acquire the spinlock.
1288 */
1289 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1290
1291 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1292
1293 /*
1294 * Unlink it.
1295 */
1296 pCur = pDevExt->apSessionHashTab[iHash];
1297 if (pCur == pSession)
1298 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1299 else
1300 {
1301 PSUPDRVSESSION pPrev = pCur;
1302 while (pCur && pCur != pSession)
1303 {
1304 pPrev = pCur;
1305 pCur = pCur->pCommonNextHash;
1306 }
1307 if (pCur)
1308 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1309 else
1310 {
1311 Assert(!pSession->fInHashTable);
1312 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1313 return VERR_NOT_FOUND;
1314 }
1315 }
1316
1317 pSession->pCommonNextHash = NULL;
1318 pSession->fInHashTable = false;
1319
1320 ASMAtomicDecS32(&pDevExt->cSessions);
1321
1322 /*
1323 * Clear OS specific session pointer if available and do the OS callback.
1324 */
1325 if (pSession->ppOsSessionPtr)
1326 {
1327 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1328 pSession->ppOsSessionPtr = NULL;
1329 }
1330
1331 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1332
1333 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1334
1335 /*
1336 * Drop the reference the hash table had to the session. This shouldn't
1337 * be the last reference!
1338 */
1339 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1340 Assert(cRefs > 0 && cRefs < _1M);
1341 if (cRefs == 0)
1342 supdrvDestroySession(pDevExt, pSession);
1343
1344 return VINF_SUCCESS;
1345}
1346
1347
1348/**
1349 * Looks up the session for the current process in the global hash table or in
1350 * OS specific pointer.
1351 *
1352 * @returns Pointer to the session with a reference that the caller must
1353 * release. If no valid session was found, NULL is returned.
1354 *
1355 * @param pDevExt The device extension.
1356 * @param Process The process ID.
1357 * @param R0Process The ring-0 process handle.
1358 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1359 * this is used instead of the hash table. For
1360 * additional safety it must then be equal to the
1361 * SUPDRVSESSION::ppOsSessionPtr member.
1362 * This can be NULL even if the OS has a session
1363 * pointer.
1364 */
1365PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1366 PSUPDRVSESSION *ppOsSessionPtr)
1367{
1368 PSUPDRVSESSION pCur;
1369 unsigned iHash;
1370
1371 /*
1372 * Validate input.
1373 */
1374 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1375
1376 /*
1377 * Calculate the hash table index and acquire the spinlock.
1378 */
1379 iHash = SUPDRV_SESSION_HASH(Process);
1380
1381 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1382
1383 /*
1384 * If an OS session pointer is provided, always use it.
1385 */
1386 if (ppOsSessionPtr)
1387 {
1388 pCur = *ppOsSessionPtr;
1389 if ( pCur
1390 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1391 || pCur->Process != Process
1392 || pCur->R0Process != R0Process) )
1393 pCur = NULL;
1394 }
1395 else
1396 {
1397 /*
1398 * Otherwise, do the hash table lookup.
1399 */
1400 pCur = pDevExt->apSessionHashTab[iHash];
1401 while ( pCur
1402 && ( pCur->Process != Process
1403 || pCur->R0Process != R0Process) )
1404 pCur = pCur->pCommonNextHash;
1405 }
1406
1407 /*
1408 * Retain the session.
1409 */
1410 if (pCur)
1411 {
1412 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1413 NOREF(cRefs);
1414 Assert(cRefs > 1 && cRefs < _1M);
1415 }
1416
1417 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1418
1419 return pCur;
1420}
1421
1422
1423/**
1424 * Retain a session to make sure it doesn't go away while it is in use.
1425 *
1426 * @returns New reference count on success, UINT32_MAX on failure.
1427 * @param pSession Session data.
1428 */
1429uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1430{
1431 uint32_t cRefs;
1432 AssertPtrReturn(pSession, UINT32_MAX);
1433 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1434
1435 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1436 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1437 return cRefs;
1438}
1439
1440
1441/**
1442 * Releases a given session.
1443 *
1444 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1445 * @param pSession Session data.
1446 */
1447uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1448{
1449 uint32_t cRefs;
1450 AssertPtrReturn(pSession, UINT32_MAX);
1451 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1452
1453 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1454 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1455 if (cRefs == 0)
1456 supdrvDestroySession(pSession->pDevExt, pSession);
1457 return cRefs;
1458}
1459
1460
1461/**
1462 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1463 *
1464 * @returns IPRT status code, see SUPR0ObjAddRef.
1465 * @param hHandleTable The handle table handle. Ignored.
1466 * @param pvObj The object pointer.
1467 * @param pvCtx Context, the handle type. Ignored.
1468 * @param pvUser Session pointer.
1469 */
1470static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1471{
1472 NOREF(pvCtx);
1473 NOREF(hHandleTable);
1474 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1475}
1476
1477
1478/**
1479 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1480 *
1481 * @param hHandleTable The handle table handle. Ignored.
1482 * @param h The handle value. Ignored.
1483 * @param pvObj The object pointer.
1484 * @param pvCtx Context, the handle type. Ignored.
1485 * @param pvUser Session pointer.
1486 */
1487static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1488{
1489 NOREF(pvCtx);
1490 NOREF(h);
1491 NOREF(hHandleTable);
1492 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1493}
1494
1495
1496/**
1497 * Fast path I/O Control worker.
1498 *
1499 * @returns VBox status code that should be passed down to ring-3 unchanged.
1500 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1501 * @param idCpu VMCPU id.
1502 * @param pDevExt Device extention.
1503 * @param pSession Session data.
1504 */
1505int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1506{
1507 /*
1508 * Validate input and check that the VM has a session.
1509 */
1510 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1511 {
1512 PVM pVM = pSession->pSessionVM;
1513 PGVM pGVM = pSession->pSessionGVM;
1514 if (RT_LIKELY( pGVM != NULL
1515 && pVM != NULL
1516 && pVM == pSession->pFastIoCtrlVM))
1517 {
1518 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1519 {
1520 /*
1521 * Make the call.
1522 */
1523 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1524 return VINF_SUCCESS;
1525 }
1526
1527 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1528 }
1529 else
1530 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1531 pGVM, pVM, pSession->pFastIoCtrlVM);
1532 }
1533 else
1534 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1535 return VERR_INTERNAL_ERROR;
1536}
1537
1538
1539/**
1540 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1541 *
1542 * Check if pszStr contains any character of pszChars. We would use strpbrk
1543 * here if this function would be contained in the RedHat kABI white list, see
1544 * http://www.kerneldrivers.org/RHEL5.
1545 *
1546 * @returns true if fine, false if not.
1547 * @param pszName The module name to check.
1548 */
1549static bool supdrvIsLdrModuleNameValid(const char *pszName)
1550{
1551 int chCur;
1552 while ((chCur = *pszName++) != '\0')
1553 {
1554 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1555 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1556 while (offInv-- > 0)
1557 if (s_szInvalidChars[offInv] == chCur)
1558 return false;
1559 }
1560 return true;
1561}
1562
1563
1564
1565/**
1566 * I/O Control inner worker (tracing reasons).
1567 *
1568 * @returns IPRT status code.
1569 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1570 *
1571 * @param uIOCtl Function number.
1572 * @param pDevExt Device extention.
1573 * @param pSession Session data.
1574 * @param pReqHdr The request header.
1575 */
1576static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1577{
1578 /*
1579 * Validation macros
1580 */
1581#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1582 do { \
1583 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1584 { \
1585 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1586 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1587 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1588 } \
1589 } while (0)
1590
1591#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1592
1593#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1594 do { \
1595 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1596 { \
1597 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1598 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1599 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1600 } \
1601 } while (0)
1602
1603#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1604 do { \
1605 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1606 { \
1607 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1608 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1609 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1610 } \
1611 } while (0)
1612
1613#define REQ_CHECK_EXPR(Name, expr) \
1614 do { \
1615 if (RT_UNLIKELY(!(expr))) \
1616 { \
1617 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1618 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1619 } \
1620 } while (0)
1621
1622#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1623 do { \
1624 if (RT_UNLIKELY(!(expr))) \
1625 { \
1626 OSDBGPRINT( fmt ); \
1627 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1628 } \
1629 } while (0)
1630
1631 /*
1632 * The switch.
1633 */
1634 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1635 {
1636 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1637 {
1638 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1639 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1640 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1641 {
1642 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1643 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1644 return 0;
1645 }
1646
1647#if 0
1648 /*
1649 * Call out to the OS specific code and let it do permission checks on the
1650 * client process.
1651 */
1652 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1653 {
1654 pReq->u.Out.u32Cookie = 0xffffffff;
1655 pReq->u.Out.u32SessionCookie = 0xffffffff;
1656 pReq->u.Out.u32SessionVersion = 0xffffffff;
1657 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1658 pReq->u.Out.pSession = NULL;
1659 pReq->u.Out.cFunctions = 0;
1660 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1661 return 0;
1662 }
1663#endif
1664
1665 /*
1666 * Match the version.
1667 * The current logic is very simple, match the major interface version.
1668 */
1669 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1670 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1671 {
1672 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1673 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1674 pReq->u.Out.u32Cookie = 0xffffffff;
1675 pReq->u.Out.u32SessionCookie = 0xffffffff;
1676 pReq->u.Out.u32SessionVersion = 0xffffffff;
1677 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1678 pReq->u.Out.pSession = NULL;
1679 pReq->u.Out.cFunctions = 0;
1680 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1681 return 0;
1682 }
1683
1684 /*
1685 * Fill in return data and be gone.
1686 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1687 * u32SessionVersion <= u32ReqVersion!
1688 */
1689 /** @todo Somehow validate the client and negotiate a secure cookie... */
1690 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1691 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1692 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1693 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1694 pReq->u.Out.pSession = pSession;
1695 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1696 pReq->Hdr.rc = VINF_SUCCESS;
1697 return 0;
1698 }
1699
1700 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1701 {
1702 /* validate */
1703 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1704 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1705
1706 /* execute */
1707 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1708 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1709 pReq->Hdr.rc = VINF_SUCCESS;
1710 return 0;
1711 }
1712
1713 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1714 {
1715 /* validate */
1716 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1717 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1718 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1719 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1720 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1721
1722 /* execute */
1723 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1724 if (RT_FAILURE(pReq->Hdr.rc))
1725 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1726 return 0;
1727 }
1728
1729 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1730 {
1731 /* validate */
1732 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1733 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1734
1735 /* execute */
1736 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1737 return 0;
1738 }
1739
1740 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1741 {
1742 /* validate */
1743 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1744 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1745
1746 /* execute */
1747 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1748 if (RT_FAILURE(pReq->Hdr.rc))
1749 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1750 return 0;
1751 }
1752
1753 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1754 {
1755 /* validate */
1756 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1757 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1758
1759 /* execute */
1760 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1761 return 0;
1762 }
1763
1764 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1765 {
1766 /* validate */
1767 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1768 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1769 if ( pReq->u.In.cbImageWithEverything != 0
1770 || pReq->u.In.cbImageBits != 0)
1771 {
1772 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1773 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1774 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1775 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1776 }
1777 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1778 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1779 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1780 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1781
1782 /* execute */
1783 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1784 return 0;
1785 }
1786
1787 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1788 {
1789 /* validate */
1790 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1791 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1792 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1793 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1794 || ( pReq->u.In.cSymbols <= 16384
1795 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1796 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1797 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1798 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1799 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1800 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1801 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1802 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1803 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1804 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1805 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1806 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1807 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1808 && pReq->u.In.cSegments <= 128
1809 && pReq->u.In.cSegments <= (pReq->u.In.cbImageBits + PAGE_SIZE - 1) / PAGE_SIZE
1810 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1811 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1812 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1813 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1814 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1815
1816 if (pReq->u.In.cSymbols)
1817 {
1818 uint32_t i;
1819 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1820 for (i = 0; i < pReq->u.In.cSymbols; i++)
1821 {
1822 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1823 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1824 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1825 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1826 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1827 pReq->u.In.cbStrTab - paSyms[i].offName),
1828 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1829 }
1830 }
1831 {
1832 uint32_t i;
1833 uint32_t offPrevEnd = 0;
1834 PSUPLDRSEG paSegs = (PSUPLDRSEG)&pReq->u.In.abImage[pReq->u.In.offSegments];
1835 for (i = 0; i < pReq->u.In.cSegments; i++)
1836 {
1837 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1838 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1839 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1840 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1841 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1842 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1843 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1844 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1845 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1846 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1847 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1848 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1849 }
1850 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1851 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1852 }
1853 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fFlags & ~SUPLDRLOAD_F_VALID_MASK),
1854 ("SUP_IOCTL_LDR_LOAD: fFlags=%#x\n", (unsigned)pReq->u.In.fFlags));
1855
1856 /* execute */
1857 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1858 return 0;
1859 }
1860
1861 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1862 {
1863 /* validate */
1864 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1865 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1866
1867 /* execute */
1868 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1869 return 0;
1870 }
1871
1872 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1873 {
1874 /* validate */
1875 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1876
1877 /* execute */
1878 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1879 return 0;
1880 }
1881
1882 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1883 {
1884 /* validate */
1885 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1886 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1887 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1888
1889 /* execute */
1890 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1891 return 0;
1892 }
1893
1894 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1895 {
1896 /* validate */
1897 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1898 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1899 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1900
1901 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1902 {
1903 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1904
1905 /* execute */
1906 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1907 {
1908 if (pReq->u.In.pVMR0 == NULL)
1909 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1910 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1911 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1912 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1913 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1914 else
1915 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1916 }
1917 else
1918 pReq->Hdr.rc = VERR_WRONG_ORDER;
1919 }
1920 else
1921 {
1922 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1923 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1924 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1925 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1926 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1927
1928 /* execute */
1929 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1930 {
1931 if (pReq->u.In.pVMR0 == NULL)
1932 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1933 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1934 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1935 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1936 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1937 else
1938 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1939 }
1940 else
1941 pReq->Hdr.rc = VERR_WRONG_ORDER;
1942 }
1943
1944 if ( RT_FAILURE(pReq->Hdr.rc)
1945 && pReq->Hdr.rc != VERR_INTERRUPTED
1946 && pReq->Hdr.rc != VERR_TIMEOUT)
1947 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1948 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1949 else
1950 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1951 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1952 return 0;
1953 }
1954
1955 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1956 {
1957 /* validate */
1958 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1959 PSUPVMMR0REQHDR pVMMReq;
1960 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1961 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1962
1963 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1964 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1965 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1966 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1967 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1968
1969 /* execute */
1970 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1971 {
1972 if (pReq->u.In.pVMR0 == NULL)
1973 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1974 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1975 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1976 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1977 else
1978 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1979 }
1980 else
1981 pReq->Hdr.rc = VERR_WRONG_ORDER;
1982
1983 if ( RT_FAILURE(pReq->Hdr.rc)
1984 && pReq->Hdr.rc != VERR_INTERRUPTED
1985 && pReq->Hdr.rc != VERR_TIMEOUT)
1986 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1987 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1988 else
1989 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1990 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1991 return 0;
1992 }
1993
1994 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1995 {
1996 /* validate */
1997 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1998 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1999
2000 /* execute */
2001 pReq->Hdr.rc = VINF_SUCCESS;
2002 pReq->u.Out.enmMode = SUPR0GetPagingMode();
2003 return 0;
2004 }
2005
2006 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
2007 {
2008 /* validate */
2009 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
2010 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
2011 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
2012
2013 /* execute */
2014 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
2015 if (RT_FAILURE(pReq->Hdr.rc))
2016 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2017 return 0;
2018 }
2019
2020 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
2021 {
2022 /* validate */
2023 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
2024 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
2025
2026 /* execute */
2027 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
2028 return 0;
2029 }
2030
2031 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
2032 {
2033 /* validate */
2034 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
2035 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
2036
2037 /* execute */
2038 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
2039 if (RT_SUCCESS(pReq->Hdr.rc))
2040 pReq->u.Out.pGipR0 = pDevExt->pGip;
2041 return 0;
2042 }
2043
2044 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2045 {
2046 /* validate */
2047 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2048 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2049
2050 /* execute */
2051 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2052 return 0;
2053 }
2054
2055 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2056 {
2057 /* validate */
2058 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2059 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2060 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2061 || ( RT_VALID_PTR(pReq->u.In.pVMR0)
2062 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2063 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2064
2065 /* execute */
2066 RTSpinlockAcquire(pDevExt->Spinlock);
2067 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2068 {
2069 if (pSession->pFastIoCtrlVM == NULL)
2070 {
2071 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2072 RTSpinlockRelease(pDevExt->Spinlock);
2073 pReq->Hdr.rc = VINF_SUCCESS;
2074 }
2075 else
2076 {
2077 RTSpinlockRelease(pDevExt->Spinlock);
2078 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2079 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2080 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2081 }
2082 }
2083 else
2084 {
2085 RTSpinlockRelease(pDevExt->Spinlock);
2086 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2087 pSession->pSessionVM, pReq->u.In.pVMR0));
2088 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2089 }
2090 return 0;
2091 }
2092
2093 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2094 {
2095 /* validate */
2096 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2097 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2098 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2099 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2100 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2101 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2102 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2103 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2104 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2105
2106 /* execute */
2107 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2108 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2109 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2110 &pReq->u.Out.aPages[0]);
2111 if (RT_FAILURE(pReq->Hdr.rc))
2112 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2113 return 0;
2114 }
2115
2116 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2117 {
2118 /* validate */
2119 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2120 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2121 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2122 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2123 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2124 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2125
2126 /* execute */
2127 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2128 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2129 if (RT_FAILURE(pReq->Hdr.rc))
2130 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2131 return 0;
2132 }
2133
2134 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2135 {
2136 /* validate */
2137 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2138 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2139 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2140 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2141 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2142 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2143 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2144
2145 /* execute */
2146 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2147 return 0;
2148 }
2149
2150 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2151 {
2152 /* validate */
2153 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2154 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2155
2156 /* execute */
2157 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2158 return 0;
2159 }
2160
2161 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2162 {
2163 /* validate */
2164 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2165 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2166 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2167
2168 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2169 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2170 else
2171 {
2172 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2173 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2174 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2175 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2176 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2177 }
2178 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2179
2180 /* execute */
2181 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2182 return 0;
2183 }
2184
2185 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2186 {
2187 /* validate */
2188 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2189 size_t cbStrTab;
2190 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2191 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2192 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2193 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2194 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2195 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2196 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2197 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2198 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2199 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2200 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2201
2202 /* execute */
2203 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2204 return 0;
2205 }
2206
2207 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2208 {
2209 /* validate */
2210 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2211 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2212 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2213
2214 /* execute */
2215 switch (pReq->u.In.uType)
2216 {
2217 case SUP_SEM_TYPE_EVENT:
2218 {
2219 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2220 switch (pReq->u.In.uOp)
2221 {
2222 case SUPSEMOP2_WAIT_MS_REL:
2223 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2224 break;
2225 case SUPSEMOP2_WAIT_NS_ABS:
2226 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2227 break;
2228 case SUPSEMOP2_WAIT_NS_REL:
2229 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2230 break;
2231 case SUPSEMOP2_SIGNAL:
2232 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2233 break;
2234 case SUPSEMOP2_CLOSE:
2235 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2236 break;
2237 case SUPSEMOP2_RESET:
2238 default:
2239 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2240 break;
2241 }
2242 break;
2243 }
2244
2245 case SUP_SEM_TYPE_EVENT_MULTI:
2246 {
2247 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2248 switch (pReq->u.In.uOp)
2249 {
2250 case SUPSEMOP2_WAIT_MS_REL:
2251 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2252 break;
2253 case SUPSEMOP2_WAIT_NS_ABS:
2254 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2255 break;
2256 case SUPSEMOP2_WAIT_NS_REL:
2257 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2258 break;
2259 case SUPSEMOP2_SIGNAL:
2260 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2261 break;
2262 case SUPSEMOP2_CLOSE:
2263 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2264 break;
2265 case SUPSEMOP2_RESET:
2266 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2267 break;
2268 default:
2269 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2270 break;
2271 }
2272 break;
2273 }
2274
2275 default:
2276 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2277 break;
2278 }
2279 return 0;
2280 }
2281
2282 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2283 {
2284 /* validate */
2285 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2286 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2287 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2288
2289 /* execute */
2290 switch (pReq->u.In.uType)
2291 {
2292 case SUP_SEM_TYPE_EVENT:
2293 {
2294 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2295 switch (pReq->u.In.uOp)
2296 {
2297 case SUPSEMOP3_CREATE:
2298 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2299 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2300 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2301 break;
2302 case SUPSEMOP3_GET_RESOLUTION:
2303 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2304 pReq->Hdr.rc = VINF_SUCCESS;
2305 pReq->Hdr.cbOut = sizeof(*pReq);
2306 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2307 break;
2308 default:
2309 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2310 break;
2311 }
2312 break;
2313 }
2314
2315 case SUP_SEM_TYPE_EVENT_MULTI:
2316 {
2317 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2318 switch (pReq->u.In.uOp)
2319 {
2320 case SUPSEMOP3_CREATE:
2321 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2322 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2323 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2324 break;
2325 case SUPSEMOP3_GET_RESOLUTION:
2326 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2327 pReq->Hdr.rc = VINF_SUCCESS;
2328 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2329 break;
2330 default:
2331 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2332 break;
2333 }
2334 break;
2335 }
2336
2337 default:
2338 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2339 break;
2340 }
2341 return 0;
2342 }
2343
2344 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2345 {
2346 /* validate */
2347 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2348 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2349
2350 /* execute */
2351 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2352 if (RT_FAILURE(pReq->Hdr.rc))
2353 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2354 return 0;
2355 }
2356
2357 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2358 {
2359 /* validate */
2360 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2361 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2362
2363 /* execute */
2364 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2365 return 0;
2366 }
2367
2368 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2369 {
2370 /* validate */
2371 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2372
2373 /* execute */
2374 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2375 return 0;
2376 }
2377
2378 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2379 {
2380 /* validate */
2381 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2382 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2383
2384 /* execute */
2385 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2386 return 0;
2387 }
2388
2389 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2390 {
2391 /* validate */
2392 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2393 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2394 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2395 return VERR_INVALID_PARAMETER;
2396
2397 /* execute */
2398 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2399 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2400 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2401 pReq->u.In.szName, pReq->u.In.fFlags);
2402 return 0;
2403 }
2404
2405 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2406 {
2407 /* validate */
2408 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2409 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2410
2411 /* execute */
2412 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2413 return 0;
2414 }
2415
2416 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2417 {
2418 /* validate */
2419 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2420 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2421
2422 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2423 pReqHdr->rc = VINF_SUCCESS;
2424 return 0;
2425 }
2426
2427 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2428 {
2429 /* validate */
2430 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2431 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2432 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2433 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2434
2435 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2436 return 0;
2437 }
2438
2439 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2440 {
2441 /* validate */
2442 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2443
2444 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2445 return 0;
2446 }
2447
2448 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2449 {
2450 /* validate */
2451 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2452 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2453
2454 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2455 return 0;
2456 }
2457
2458 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2459 {
2460 /* validate */
2461 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2462 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2463
2464 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2465 return 0;
2466 }
2467
2468 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2469 {
2470 /* validate */
2471 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2472 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2473
2474 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2475 return 0;
2476 }
2477
2478 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2479 {
2480 /* validate */
2481 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2482 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2483
2484 /* execute */
2485 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2486 if (RT_FAILURE(pReq->Hdr.rc))
2487 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2488 return 0;
2489 }
2490
2491 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2492 {
2493 /* validate */
2494 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2495 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2496 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2497 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2498 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2499
2500 /* execute */
2501 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2502 if (RT_FAILURE(pReq->Hdr.rc))
2503 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2504 return 0;
2505 }
2506
2507 default:
2508 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2509 break;
2510 }
2511 return VERR_GENERAL_FAILURE;
2512}
2513
2514
2515/**
2516 * I/O Control inner worker for the restricted operations.
2517 *
2518 * @returns IPRT status code.
2519 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2520 *
2521 * @param uIOCtl Function number.
2522 * @param pDevExt Device extention.
2523 * @param pSession Session data.
2524 * @param pReqHdr The request header.
2525 */
2526static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2527{
2528 /*
2529 * The switch.
2530 */
2531 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2532 {
2533 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2534 {
2535 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2536 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2537 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2538 {
2539 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2540 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2541 return 0;
2542 }
2543
2544 /*
2545 * Match the version.
2546 * The current logic is very simple, match the major interface version.
2547 */
2548 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2549 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2550 {
2551 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2552 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2553 pReq->u.Out.u32Cookie = 0xffffffff;
2554 pReq->u.Out.u32SessionCookie = 0xffffffff;
2555 pReq->u.Out.u32SessionVersion = 0xffffffff;
2556 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2557 pReq->u.Out.pSession = NULL;
2558 pReq->u.Out.cFunctions = 0;
2559 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2560 return 0;
2561 }
2562
2563 /*
2564 * Fill in return data and be gone.
2565 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2566 * u32SessionVersion <= u32ReqVersion!
2567 */
2568 /** @todo Somehow validate the client and negotiate a secure cookie... */
2569 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2570 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2571 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2572 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2573 pReq->u.Out.pSession = pSession;
2574 pReq->u.Out.cFunctions = 0;
2575 pReq->Hdr.rc = VINF_SUCCESS;
2576 return 0;
2577 }
2578
2579 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2580 {
2581 /* validate */
2582 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2583 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2584
2585 /* execute */
2586 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2587 if (RT_FAILURE(pReq->Hdr.rc))
2588 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2589 return 0;
2590 }
2591
2592 default:
2593 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2594 break;
2595 }
2596 return VERR_GENERAL_FAILURE;
2597}
2598
2599
2600/**
2601 * I/O Control worker.
2602 *
2603 * @returns IPRT status code.
2604 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2605 *
2606 * @param uIOCtl Function number.
2607 * @param pDevExt Device extention.
2608 * @param pSession Session data.
2609 * @param pReqHdr The request header.
2610 * @param cbReq The size of the request buffer.
2611 */
2612int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2613{
2614 int rc;
2615 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2616
2617 /*
2618 * Validate the request.
2619 */
2620 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2621 {
2622 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2623 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2624 return VERR_INVALID_PARAMETER;
2625 }
2626 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2627 || pReqHdr->cbIn < sizeof(*pReqHdr)
2628 || pReqHdr->cbIn > cbReq
2629 || pReqHdr->cbOut < sizeof(*pReqHdr)
2630 || pReqHdr->cbOut > cbReq))
2631 {
2632 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2633 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2634 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2635 return VERR_INVALID_PARAMETER;
2636 }
2637 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2638 {
2639 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2640 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2641 return VERR_INVALID_PARAMETER;
2642 }
2643 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2644 {
2645 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2646 {
2647 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2648 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2649 return VERR_INVALID_PARAMETER;
2650 }
2651 }
2652 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2653 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2654 {
2655 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2656 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2657 return VERR_INVALID_PARAMETER;
2658 }
2659
2660 /*
2661 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2662 */
2663 if (pSession->fUnrestricted)
2664 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2665 else
2666 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2667
2668 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2669 return rc;
2670}
2671
2672
2673/**
2674 * Inter-Driver Communication (IDC) worker.
2675 *
2676 * @returns VBox status code.
2677 * @retval VINF_SUCCESS on success.
2678 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2679 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2680 *
2681 * @param uReq The request (function) code.
2682 * @param pDevExt Device extention.
2683 * @param pSession Session data.
2684 * @param pReqHdr The request header.
2685 */
2686int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2687{
2688 /*
2689 * The OS specific code has already validated the pSession
2690 * pointer, and the request size being greater or equal to
2691 * size of the header.
2692 *
2693 * So, just check that pSession is a kernel context session.
2694 */
2695 if (RT_UNLIKELY( pSession
2696 && pSession->R0Process != NIL_RTR0PROCESS))
2697 return VERR_INVALID_PARAMETER;
2698
2699/*
2700 * Validation macro.
2701 */
2702#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2703 do { \
2704 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2705 { \
2706 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2707 (long)pReqHdr->cb, (long)(cbExpect))); \
2708 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2709 } \
2710 } while (0)
2711
2712 switch (uReq)
2713 {
2714 case SUPDRV_IDC_REQ_CONNECT:
2715 {
2716 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2717 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2718
2719 /*
2720 * Validate the cookie and other input.
2721 */
2722 if (pReq->Hdr.pSession != NULL)
2723 {
2724 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2725 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2726 }
2727 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2728 {
2729 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2730 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2731 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2732 }
2733 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2734 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2735 {
2736 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2737 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2738 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2739 }
2740 if (pSession != NULL)
2741 {
2742 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2743 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2744 }
2745
2746 /*
2747 * Match the version.
2748 * The current logic is very simple, match the major interface version.
2749 */
2750 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2751 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2752 {
2753 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2754 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2755 pReq->u.Out.pSession = NULL;
2756 pReq->u.Out.uSessionVersion = 0xffffffff;
2757 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2758 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2759 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2760 return VINF_SUCCESS;
2761 }
2762
2763 pReq->u.Out.pSession = NULL;
2764 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2765 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2766 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2767
2768 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2769 if (RT_FAILURE(pReq->Hdr.rc))
2770 {
2771 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2772 return VINF_SUCCESS;
2773 }
2774
2775 pReq->u.Out.pSession = pSession;
2776 pReq->Hdr.pSession = pSession;
2777
2778 return VINF_SUCCESS;
2779 }
2780
2781 case SUPDRV_IDC_REQ_DISCONNECT:
2782 {
2783 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2784
2785 supdrvSessionRelease(pSession);
2786 return pReqHdr->rc = VINF_SUCCESS;
2787 }
2788
2789 case SUPDRV_IDC_REQ_GET_SYMBOL:
2790 {
2791 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2792 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2793
2794 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2795 return VINF_SUCCESS;
2796 }
2797
2798 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2799 {
2800 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2801 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2802
2803 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2804 return VINF_SUCCESS;
2805 }
2806
2807 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2808 {
2809 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2810 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2811
2812 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2813 return VINF_SUCCESS;
2814 }
2815
2816 default:
2817 Log(("Unknown IDC %#lx\n", (long)uReq));
2818 break;
2819 }
2820
2821#undef REQ_CHECK_IDC_SIZE
2822 return VERR_NOT_SUPPORTED;
2823}
2824
2825
2826/**
2827 * Register a object for reference counting.
2828 * The object is registered with one reference in the specified session.
2829 *
2830 * @returns Unique identifier on success (pointer).
2831 * All future reference must use this identifier.
2832 * @returns NULL on failure.
2833 * @param pSession The caller's session.
2834 * @param enmType The object type.
2835 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2836 * @param pvUser1 The first user argument.
2837 * @param pvUser2 The second user argument.
2838 */
2839SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2840{
2841 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2842 PSUPDRVOBJ pObj;
2843 PSUPDRVUSAGE pUsage;
2844
2845 /*
2846 * Validate the input.
2847 */
2848 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2849 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2850 AssertPtrReturn(pfnDestructor, NULL);
2851
2852 /*
2853 * Allocate and initialize the object.
2854 */
2855 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2856 if (!pObj)
2857 return NULL;
2858 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2859 pObj->enmType = enmType;
2860 pObj->pNext = NULL;
2861 pObj->cUsage = 1;
2862 pObj->pfnDestructor = pfnDestructor;
2863 pObj->pvUser1 = pvUser1;
2864 pObj->pvUser2 = pvUser2;
2865 pObj->CreatorUid = pSession->Uid;
2866 pObj->CreatorGid = pSession->Gid;
2867 pObj->CreatorProcess= pSession->Process;
2868 supdrvOSObjInitCreator(pObj, pSession);
2869
2870 /*
2871 * Allocate the usage record.
2872 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2873 */
2874 RTSpinlockAcquire(pDevExt->Spinlock);
2875
2876 pUsage = pDevExt->pUsageFree;
2877 if (pUsage)
2878 pDevExt->pUsageFree = pUsage->pNext;
2879 else
2880 {
2881 RTSpinlockRelease(pDevExt->Spinlock);
2882 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2883 if (!pUsage)
2884 {
2885 RTMemFree(pObj);
2886 return NULL;
2887 }
2888 RTSpinlockAcquire(pDevExt->Spinlock);
2889 }
2890
2891 /*
2892 * Insert the object and create the session usage record.
2893 */
2894 /* The object. */
2895 pObj->pNext = pDevExt->pObjs;
2896 pDevExt->pObjs = pObj;
2897
2898 /* The session record. */
2899 pUsage->cUsage = 1;
2900 pUsage->pObj = pObj;
2901 pUsage->pNext = pSession->pUsage;
2902 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2903 pSession->pUsage = pUsage;
2904
2905 RTSpinlockRelease(pDevExt->Spinlock);
2906
2907 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2908 return pObj;
2909}
2910SUPR0_EXPORT_SYMBOL(SUPR0ObjRegister);
2911
2912
2913/**
2914 * Increment the reference counter for the object associating the reference
2915 * with the specified session.
2916 *
2917 * @returns IPRT status code.
2918 * @param pvObj The identifier returned by SUPR0ObjRegister().
2919 * @param pSession The session which is referencing the object.
2920 *
2921 * @remarks The caller should not own any spinlocks and must carefully protect
2922 * itself against potential race with the destructor so freed memory
2923 * isn't accessed here.
2924 */
2925SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2926{
2927 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2928}
2929SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRef);
2930
2931
2932/**
2933 * Increment the reference counter for the object associating the reference
2934 * with the specified session.
2935 *
2936 * @returns IPRT status code.
2937 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2938 * couldn't be allocated. (If you see this you're not doing the right
2939 * thing and it won't ever work reliably.)
2940 *
2941 * @param pvObj The identifier returned by SUPR0ObjRegister().
2942 * @param pSession The session which is referencing the object.
2943 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2944 * first reference to an object in a session with this
2945 * argument set.
2946 *
2947 * @remarks The caller should not own any spinlocks and must carefully protect
2948 * itself against potential race with the destructor so freed memory
2949 * isn't accessed here.
2950 */
2951SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2952{
2953 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2954 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2955 int rc = VINF_SUCCESS;
2956 PSUPDRVUSAGE pUsagePre;
2957 PSUPDRVUSAGE pUsage;
2958
2959 /*
2960 * Validate the input.
2961 * Be ready for the destruction race (someone might be stuck in the
2962 * destructor waiting a lock we own).
2963 */
2964 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2965 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2966 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2967 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2968 VERR_INVALID_PARAMETER);
2969
2970 RTSpinlockAcquire(pDevExt->Spinlock);
2971
2972 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2973 {
2974 RTSpinlockRelease(pDevExt->Spinlock);
2975
2976 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2977 return VERR_WRONG_ORDER;
2978 }
2979
2980 /*
2981 * Preallocate the usage record if we can.
2982 */
2983 pUsagePre = pDevExt->pUsageFree;
2984 if (pUsagePre)
2985 pDevExt->pUsageFree = pUsagePre->pNext;
2986 else if (!fNoBlocking)
2987 {
2988 RTSpinlockRelease(pDevExt->Spinlock);
2989 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2990 if (!pUsagePre)
2991 return VERR_NO_MEMORY;
2992
2993 RTSpinlockAcquire(pDevExt->Spinlock);
2994 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2995 {
2996 RTSpinlockRelease(pDevExt->Spinlock);
2997
2998 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2999 return VERR_WRONG_ORDER;
3000 }
3001 }
3002
3003 /*
3004 * Reference the object.
3005 */
3006 pObj->cUsage++;
3007
3008 /*
3009 * Look for the session record.
3010 */
3011 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
3012 {
3013 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3014 if (pUsage->pObj == pObj)
3015 break;
3016 }
3017 if (pUsage)
3018 pUsage->cUsage++;
3019 else if (pUsagePre)
3020 {
3021 /* create a new session record. */
3022 pUsagePre->cUsage = 1;
3023 pUsagePre->pObj = pObj;
3024 pUsagePre->pNext = pSession->pUsage;
3025 pSession->pUsage = pUsagePre;
3026 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
3027
3028 pUsagePre = NULL;
3029 }
3030 else
3031 {
3032 pObj->cUsage--;
3033 rc = VERR_TRY_AGAIN;
3034 }
3035
3036 /*
3037 * Put any unused usage record into the free list..
3038 */
3039 if (pUsagePre)
3040 {
3041 pUsagePre->pNext = pDevExt->pUsageFree;
3042 pDevExt->pUsageFree = pUsagePre;
3043 }
3044
3045 RTSpinlockRelease(pDevExt->Spinlock);
3046
3047 return rc;
3048}
3049SUPR0_EXPORT_SYMBOL(SUPR0ObjAddRefEx);
3050
3051
3052/**
3053 * Decrement / destroy a reference counter record for an object.
3054 *
3055 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3056 *
3057 * @returns IPRT status code.
3058 * @retval VINF_SUCCESS if not destroyed.
3059 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3060 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3061 * string builds.
3062 *
3063 * @param pvObj The identifier returned by SUPR0ObjRegister().
3064 * @param pSession The session which is referencing the object.
3065 */
3066SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3067{
3068 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3069 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3070 int rc = VERR_INVALID_PARAMETER;
3071 PSUPDRVUSAGE pUsage;
3072 PSUPDRVUSAGE pUsagePrev;
3073
3074 /*
3075 * Validate the input.
3076 */
3077 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3078 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3079 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3080 VERR_INVALID_PARAMETER);
3081
3082 /*
3083 * Acquire the spinlock and look for the usage record.
3084 */
3085 RTSpinlockAcquire(pDevExt->Spinlock);
3086
3087 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3088 pUsage;
3089 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3090 {
3091 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3092 if (pUsage->pObj == pObj)
3093 {
3094 rc = VINF_SUCCESS;
3095 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3096 if (pUsage->cUsage > 1)
3097 {
3098 pObj->cUsage--;
3099 pUsage->cUsage--;
3100 }
3101 else
3102 {
3103 /*
3104 * Free the session record.
3105 */
3106 if (pUsagePrev)
3107 pUsagePrev->pNext = pUsage->pNext;
3108 else
3109 pSession->pUsage = pUsage->pNext;
3110 pUsage->pNext = pDevExt->pUsageFree;
3111 pDevExt->pUsageFree = pUsage;
3112
3113 /* What about the object? */
3114 if (pObj->cUsage > 1)
3115 pObj->cUsage--;
3116 else
3117 {
3118 /*
3119 * Object is to be destroyed, unlink it.
3120 */
3121 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3122 rc = VINF_OBJECT_DESTROYED;
3123 if (pDevExt->pObjs == pObj)
3124 pDevExt->pObjs = pObj->pNext;
3125 else
3126 {
3127 PSUPDRVOBJ pObjPrev;
3128 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3129 if (pObjPrev->pNext == pObj)
3130 {
3131 pObjPrev->pNext = pObj->pNext;
3132 break;
3133 }
3134 Assert(pObjPrev);
3135 }
3136 }
3137 }
3138 break;
3139 }
3140 }
3141
3142 RTSpinlockRelease(pDevExt->Spinlock);
3143
3144 /*
3145 * Call the destructor and free the object if required.
3146 */
3147 if (rc == VINF_OBJECT_DESTROYED)
3148 {
3149 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3150 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3151 if (pObj->pfnDestructor)
3152 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3153 RTMemFree(pObj);
3154 }
3155
3156 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3157 return rc;
3158}
3159SUPR0_EXPORT_SYMBOL(SUPR0ObjRelease);
3160
3161
3162/**
3163 * Verifies that the current process can access the specified object.
3164 *
3165 * @returns The following IPRT status code:
3166 * @retval VINF_SUCCESS if access was granted.
3167 * @retval VERR_PERMISSION_DENIED if denied access.
3168 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3169 *
3170 * @param pvObj The identifier returned by SUPR0ObjRegister().
3171 * @param pSession The session which wishes to access the object.
3172 * @param pszObjName Object string name. This is optional and depends on the object type.
3173 *
3174 * @remark The caller is responsible for making sure the object isn't removed while
3175 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3176 */
3177SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3178{
3179 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3180 int rc;
3181
3182 /*
3183 * Validate the input.
3184 */
3185 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3186 AssertMsgReturn(RT_VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3187 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3188 VERR_INVALID_PARAMETER);
3189
3190 /*
3191 * Check access. (returns true if a decision has been made.)
3192 */
3193 rc = VERR_INTERNAL_ERROR;
3194 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3195 return rc;
3196
3197 /*
3198 * Default policy is to allow the user to access his own
3199 * stuff but nothing else.
3200 */
3201 if (pObj->CreatorUid == pSession->Uid)
3202 return VINF_SUCCESS;
3203 return VERR_PERMISSION_DENIED;
3204}
3205SUPR0_EXPORT_SYMBOL(SUPR0ObjVerifyAccess);
3206
3207
3208/**
3209 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3210 *
3211 * @returns The associated VM pointer.
3212 * @param pSession The session of the current thread.
3213 */
3214SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3215{
3216 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3217 return pSession->pSessionVM;
3218}
3219SUPR0_EXPORT_SYMBOL(SUPR0GetSessionVM);
3220
3221
3222/**
3223 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3224 *
3225 * @returns The associated GVM pointer.
3226 * @param pSession The session of the current thread.
3227 */
3228SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3229{
3230 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3231 return pSession->pSessionGVM;
3232}
3233SUPR0_EXPORT_SYMBOL(SUPR0GetSessionGVM);
3234
3235
3236/**
3237 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3238 *
3239 * This will fail if there is already a VM associated with the session and pVM
3240 * isn't NULL.
3241 *
3242 * @retval VINF_SUCCESS
3243 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3244 * session.
3245 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3246 * the session is invalid.
3247 *
3248 * @param pSession The session of the current thread.
3249 * @param pGVM The GVM to associate with the session. Pass NULL to
3250 * dissassociate.
3251 * @param pVM The VM to associate with the session. Pass NULL to
3252 * dissassociate.
3253 */
3254SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3255{
3256 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3257 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3258
3259 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3260 if (pGVM)
3261 {
3262 if (!pSession->pSessionGVM)
3263 {
3264 pSession->pSessionGVM = pGVM;
3265 pSession->pSessionVM = pVM;
3266 pSession->pFastIoCtrlVM = NULL;
3267 }
3268 else
3269 {
3270 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3271 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3272 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3273 return VERR_ALREADY_EXISTS;
3274 }
3275 }
3276 else
3277 {
3278 pSession->pSessionGVM = NULL;
3279 pSession->pSessionVM = NULL;
3280 pSession->pFastIoCtrlVM = NULL;
3281 }
3282 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3283 return VINF_SUCCESS;
3284}
3285SUPR0_EXPORT_SYMBOL(SUPR0SetSessionVM);
3286
3287
3288/** @copydoc RTLogDefaultInstanceEx
3289 * @remarks To allow overriding RTLogDefaultInstanceEx locally. */
3290SUPR0DECL(struct RTLOGGER *) SUPR0DefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3291{
3292 return RTLogDefaultInstanceEx(fFlagsAndGroup);
3293}
3294SUPR0_EXPORT_SYMBOL(SUPR0DefaultLogInstanceEx);
3295
3296
3297/** @copydoc RTLogGetDefaultInstanceEx
3298 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3299SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3300{
3301 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3302}
3303SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogInstanceEx);
3304
3305
3306/** @copydoc RTLogRelGetDefaultInstanceEx
3307 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3308SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3309{
3310 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3311}
3312SUPR0_EXPORT_SYMBOL(SUPR0GetDefaultLogRelInstanceEx);
3313
3314
3315/**
3316 * Lock pages.
3317 *
3318 * @returns IPRT status code.
3319 * @param pSession Session to which the locked memory should be associated.
3320 * @param pvR3 Start of the memory range to lock.
3321 * This must be page aligned.
3322 * @param cPages Number of pages to lock.
3323 * @param paPages Where to put the physical addresses of locked memory.
3324 */
3325SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3326{
3327 int rc;
3328 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3329 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3330 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3331
3332 /*
3333 * Verify input.
3334 */
3335 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3336 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3337 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3338 || !pvR3)
3339 {
3340 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3341 return VERR_INVALID_PARAMETER;
3342 }
3343
3344 /*
3345 * Let IPRT do the job.
3346 */
3347 Mem.eType = MEMREF_TYPE_LOCKED;
3348 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3349 if (RT_SUCCESS(rc))
3350 {
3351 uint32_t iPage = cPages;
3352 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3353 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3354
3355 while (iPage-- > 0)
3356 {
3357 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3358 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3359 {
3360 AssertMsgFailed(("iPage=%d\n", iPage));
3361 rc = VERR_INTERNAL_ERROR;
3362 break;
3363 }
3364 }
3365 if (RT_SUCCESS(rc))
3366 rc = supdrvMemAdd(&Mem, pSession);
3367 if (RT_FAILURE(rc))
3368 {
3369 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3370 AssertRC(rc2);
3371 }
3372 }
3373
3374 return rc;
3375}
3376SUPR0_EXPORT_SYMBOL(SUPR0LockMem);
3377
3378
3379/**
3380 * Unlocks the memory pointed to by pv.
3381 *
3382 * @returns IPRT status code.
3383 * @param pSession Session to which the memory was locked.
3384 * @param pvR3 Memory to unlock.
3385 */
3386SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3387{
3388 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3389 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3390 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3391}
3392SUPR0_EXPORT_SYMBOL(SUPR0UnlockMem);
3393
3394
3395/**
3396 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3397 * backing.
3398 *
3399 * @returns IPRT status code.
3400 * @param pSession Session data.
3401 * @param cPages Number of pages to allocate.
3402 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3403 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3404 * @param pHCPhys Where to put the physical address of allocated memory.
3405 */
3406SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3407{
3408 int rc;
3409 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3410 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3411
3412 /*
3413 * Validate input.
3414 */
3415 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3416 if (!ppvR3 || !ppvR0 || !pHCPhys)
3417 {
3418 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3419 pSession, ppvR0, ppvR3, pHCPhys));
3420 return VERR_INVALID_PARAMETER;
3421
3422 }
3423 if (cPages < 1 || cPages >= 256)
3424 {
3425 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3426 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3427 }
3428
3429 /*
3430 * Let IPRT do the job.
3431 */
3432 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3433 if (RT_SUCCESS(rc))
3434 {
3435 int rc2;
3436 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3437 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3438 if (RT_SUCCESS(rc))
3439 {
3440 Mem.eType = MEMREF_TYPE_CONT;
3441 rc = supdrvMemAdd(&Mem, pSession);
3442 if (!rc)
3443 {
3444 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3445 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3446 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3447 return 0;
3448 }
3449
3450 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3451 AssertRC(rc2);
3452 }
3453 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3454 AssertRC(rc2);
3455 }
3456
3457 return rc;
3458}
3459SUPR0_EXPORT_SYMBOL(SUPR0ContAlloc);
3460
3461
3462/**
3463 * Frees memory allocated using SUPR0ContAlloc().
3464 *
3465 * @returns IPRT status code.
3466 * @param pSession The session to which the memory was allocated.
3467 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3468 */
3469SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3470{
3471 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3472 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3473 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3474}
3475SUPR0_EXPORT_SYMBOL(SUPR0ContFree);
3476
3477
3478/**
3479 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3480 *
3481 * The memory isn't zeroed.
3482 *
3483 * @returns IPRT status code.
3484 * @param pSession Session data.
3485 * @param cPages Number of pages to allocate.
3486 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3487 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3488 * @param paPages Where to put the physical addresses of allocated memory.
3489 */
3490SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3491{
3492 unsigned iPage;
3493 int rc;
3494 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3495 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3496
3497 /*
3498 * Validate input.
3499 */
3500 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3501 if (!ppvR3 || !ppvR0 || !paPages)
3502 {
3503 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3504 pSession, ppvR3, ppvR0, paPages));
3505 return VERR_INVALID_PARAMETER;
3506
3507 }
3508 if (cPages < 1 || cPages >= 256)
3509 {
3510 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3511 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3512 }
3513
3514 /*
3515 * Let IPRT do the work.
3516 */
3517 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3518 if (RT_SUCCESS(rc))
3519 {
3520 int rc2;
3521 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3522 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3523 if (RT_SUCCESS(rc))
3524 {
3525 Mem.eType = MEMREF_TYPE_LOW;
3526 rc = supdrvMemAdd(&Mem, pSession);
3527 if (!rc)
3528 {
3529 for (iPage = 0; iPage < cPages; iPage++)
3530 {
3531 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3532 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3533 }
3534 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3535 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3536 return 0;
3537 }
3538
3539 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3540 AssertRC(rc2);
3541 }
3542
3543 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3544 AssertRC(rc2);
3545 }
3546
3547 return rc;
3548}
3549SUPR0_EXPORT_SYMBOL(SUPR0LowAlloc);
3550
3551
3552/**
3553 * Frees memory allocated using SUPR0LowAlloc().
3554 *
3555 * @returns IPRT status code.
3556 * @param pSession The session to which the memory was allocated.
3557 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3558 */
3559SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3560{
3561 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3562 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3563 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3564}
3565SUPR0_EXPORT_SYMBOL(SUPR0LowFree);
3566
3567
3568
3569/**
3570 * Allocates a chunk of memory with both R0 and R3 mappings.
3571 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3572 *
3573 * @returns IPRT status code.
3574 * @param pSession The session to associated the allocation with.
3575 * @param cb Number of bytes to allocate.
3576 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3577 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3578 */
3579SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3580{
3581 int rc;
3582 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3583 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3584
3585 /*
3586 * Validate input.
3587 */
3588 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3589 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3590 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3591 if (cb < 1 || cb >= _4M)
3592 {
3593 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3594 return VERR_INVALID_PARAMETER;
3595 }
3596
3597 /*
3598 * Let IPRT do the work.
3599 */
3600 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3601 if (RT_SUCCESS(rc))
3602 {
3603 int rc2;
3604 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3605 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3606 if (RT_SUCCESS(rc))
3607 {
3608 Mem.eType = MEMREF_TYPE_MEM;
3609 rc = supdrvMemAdd(&Mem, pSession);
3610 if (!rc)
3611 {
3612 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3613 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3614 return VINF_SUCCESS;
3615 }
3616
3617 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3618 AssertRC(rc2);
3619 }
3620
3621 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3622 AssertRC(rc2);
3623 }
3624
3625 return rc;
3626}
3627SUPR0_EXPORT_SYMBOL(SUPR0MemAlloc);
3628
3629
3630/**
3631 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3632 *
3633 * @returns IPRT status code.
3634 * @param pSession The session to which the memory was allocated.
3635 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3636 * @param paPages Where to store the physical addresses.
3637 */
3638SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3639{
3640 PSUPDRVBUNDLE pBundle;
3641 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3642
3643 /*
3644 * Validate input.
3645 */
3646 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3647 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3648 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3649
3650 /*
3651 * Search for the address.
3652 */
3653 RTSpinlockAcquire(pSession->Spinlock);
3654 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3655 {
3656 if (pBundle->cUsed > 0)
3657 {
3658 unsigned i;
3659 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3660 {
3661 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3662 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3663 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3664 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3665 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3666 )
3667 )
3668 {
3669 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3670 size_t iPage;
3671 for (iPage = 0; iPage < cPages; iPage++)
3672 {
3673 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3674 paPages[iPage].uReserved = 0;
3675 }
3676 RTSpinlockRelease(pSession->Spinlock);
3677 return VINF_SUCCESS;
3678 }
3679 }
3680 }
3681 }
3682 RTSpinlockRelease(pSession->Spinlock);
3683 Log(("Failed to find %p!!!\n", (void *)uPtr));
3684 return VERR_INVALID_PARAMETER;
3685}
3686SUPR0_EXPORT_SYMBOL(SUPR0MemGetPhys);
3687
3688
3689/**
3690 * Free memory allocated by SUPR0MemAlloc().
3691 *
3692 * @returns IPRT status code.
3693 * @param pSession The session owning the allocation.
3694 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3695 */
3696SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3697{
3698 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3699 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3700 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3701}
3702SUPR0_EXPORT_SYMBOL(SUPR0MemFree);
3703
3704
3705/**
3706 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3707 *
3708 * The memory is fixed and it's possible to query the physical addresses using
3709 * SUPR0MemGetPhys().
3710 *
3711 * @returns IPRT status code.
3712 * @param pSession The session to associated the allocation with.
3713 * @param cPages The number of pages to allocate.
3714 * @param fFlags Flags, reserved for the future. Must be zero.
3715 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3716 * NULL if no ring-3 mapping.
3717 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3718 * NULL if no ring-0 mapping.
3719 * @param paPages Where to store the addresses of the pages. Optional.
3720 */
3721SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3722{
3723 int rc;
3724 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3725 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3726
3727 /*
3728 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3729 */
3730 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3731 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3732 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3733 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3734 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3735 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3736 {
3737 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3738 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3739 }
3740
3741 /*
3742 * Let IPRT do the work.
3743 */
3744 if (ppvR0)
3745 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3746 else
3747 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3748 if (RT_SUCCESS(rc))
3749 {
3750 int rc2;
3751 if (ppvR3)
3752 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3753 else
3754 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3755 if (RT_SUCCESS(rc))
3756 {
3757 Mem.eType = MEMREF_TYPE_PAGE;
3758 rc = supdrvMemAdd(&Mem, pSession);
3759 if (!rc)
3760 {
3761 if (ppvR3)
3762 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3763 if (ppvR0)
3764 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3765 if (paPages)
3766 {
3767 uint32_t iPage = cPages;
3768 while (iPage-- > 0)
3769 {
3770 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3771 Assert(paPages[iPage] != NIL_RTHCPHYS);
3772 }
3773 }
3774 return VINF_SUCCESS;
3775 }
3776
3777 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3778 AssertRC(rc2);
3779 }
3780
3781 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3782 AssertRC(rc2);
3783 }
3784 return rc;
3785}
3786SUPR0_EXPORT_SYMBOL(SUPR0PageAllocEx);
3787
3788
3789/**
3790 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3791 * space.
3792 *
3793 * @returns IPRT status code.
3794 * @param pSession The session to associated the allocation with.
3795 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3796 * @param offSub Where to start mapping. Must be page aligned.
3797 * @param cbSub How much to map. Must be page aligned.
3798 * @param fFlags Flags, MBZ.
3799 * @param ppvR0 Where to return the address of the ring-0 mapping on
3800 * success.
3801 */
3802SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3803 uint32_t fFlags, PRTR0PTR ppvR0)
3804{
3805 int rc;
3806 PSUPDRVBUNDLE pBundle;
3807 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3808 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3809
3810 /*
3811 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3812 */
3813 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3814 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3815 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3816 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3817 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3818 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3819
3820 /*
3821 * Find the memory object.
3822 */
3823 RTSpinlockAcquire(pSession->Spinlock);
3824 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3825 {
3826 if (pBundle->cUsed > 0)
3827 {
3828 unsigned i;
3829 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3830 {
3831 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3832 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3833 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3834 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3835 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3836 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3837 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3838 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3839 {
3840 hMemObj = pBundle->aMem[i].MemObj;
3841 break;
3842 }
3843 }
3844 }
3845 }
3846 RTSpinlockRelease(pSession->Spinlock);
3847
3848 rc = VERR_INVALID_PARAMETER;
3849 if (hMemObj != NIL_RTR0MEMOBJ)
3850 {
3851 /*
3852 * Do some further input validations before calling IPRT.
3853 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3854 */
3855 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3856 if ( offSub < cbMemObj
3857 && cbSub <= cbMemObj
3858 && offSub + cbSub <= cbMemObj)
3859 {
3860 RTR0MEMOBJ hMapObj;
3861 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3862 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3863 if (RT_SUCCESS(rc))
3864 *ppvR0 = RTR0MemObjAddress(hMapObj);
3865 }
3866 else
3867 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3868
3869 }
3870 return rc;
3871}
3872SUPR0_EXPORT_SYMBOL(SUPR0PageMapKernel);
3873
3874
3875/**
3876 * Changes the page level protection of one or more pages previously allocated
3877 * by SUPR0PageAllocEx.
3878 *
3879 * @returns IPRT status code.
3880 * @param pSession The session to associated the allocation with.
3881 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3882 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3883 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3884 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3885 * @param offSub Where to start changing. Must be page aligned.
3886 * @param cbSub How much to change. Must be page aligned.
3887 * @param fProt The new page level protection, see RTMEM_PROT_*.
3888 */
3889SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3890{
3891 int rc;
3892 PSUPDRVBUNDLE pBundle;
3893 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3894 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3895 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3896
3897 /*
3898 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3899 */
3900 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3901 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3902 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3903 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3904 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3905
3906 /*
3907 * Find the memory object.
3908 */
3909 RTSpinlockAcquire(pSession->Spinlock);
3910 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3911 {
3912 if (pBundle->cUsed > 0)
3913 {
3914 unsigned i;
3915 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3916 {
3917 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3918 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3919 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3920 || pvR3 == NIL_RTR3PTR)
3921 && ( pvR0 == NIL_RTR0PTR
3922 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3923 && ( pvR3 == NIL_RTR3PTR
3924 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3925 {
3926 if (pvR0 != NIL_RTR0PTR)
3927 hMemObjR0 = pBundle->aMem[i].MemObj;
3928 if (pvR3 != NIL_RTR3PTR)
3929 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3930 break;
3931 }
3932 }
3933 }
3934 }
3935 RTSpinlockRelease(pSession->Spinlock);
3936
3937 rc = VERR_INVALID_PARAMETER;
3938 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3939 || hMemObjR3 != NIL_RTR0MEMOBJ)
3940 {
3941 /*
3942 * Do some further input validations before calling IPRT.
3943 */
3944 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3945 if ( offSub < cbMemObj
3946 && cbSub <= cbMemObj
3947 && offSub + cbSub <= cbMemObj)
3948 {
3949 rc = VINF_SUCCESS;
3950 if (hMemObjR3 != NIL_RTR0PTR)
3951 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3952 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3953 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3954 }
3955 else
3956 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3957
3958 }
3959 return rc;
3960
3961}
3962SUPR0_EXPORT_SYMBOL(SUPR0PageProtect);
3963
3964
3965/**
3966 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3967 *
3968 * @returns IPRT status code.
3969 * @param pSession The session owning the allocation.
3970 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3971 * SUPR0PageAllocEx().
3972 */
3973SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3974{
3975 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3976 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3977 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3978}
3979SUPR0_EXPORT_SYMBOL(SUPR0PageFree);
3980
3981
3982/**
3983 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3984 *
3985 * @param pDevExt The device extension.
3986 * @param pszFile The source file where the caller detected the bad
3987 * context.
3988 * @param uLine The line number in @a pszFile.
3989 * @param pszExtra Optional additional message to give further hints.
3990 */
3991void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3992{
3993 uint32_t cCalls;
3994
3995 /*
3996 * Shorten the filename before displaying the message.
3997 */
3998 for (;;)
3999 {
4000 const char *pszTmp = strchr(pszFile, '/');
4001 if (!pszTmp)
4002 pszTmp = strchr(pszFile, '\\');
4003 if (!pszTmp)
4004 break;
4005 pszFile = pszTmp + 1;
4006 }
4007 if (RT_VALID_PTR(pszExtra) && *pszExtra)
4008 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
4009 else
4010 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
4011
4012 /*
4013 * Record the incident so that we stand a chance of blocking I/O controls
4014 * before panicing the system.
4015 */
4016 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
4017 if (cCalls > UINT32_MAX - _1K)
4018 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
4019}
4020
4021
4022/**
4023 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
4024 *
4025 * @param pSession The session of the caller.
4026 * @param pszFile The source file where the caller detected the bad
4027 * context.
4028 * @param uLine The line number in @a pszFile.
4029 * @param pszExtra Optional additional message to give further hints.
4030 */
4031SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
4032{
4033 PSUPDRVDEVEXT pDevExt;
4034
4035 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
4036 pDevExt = pSession->pDevExt;
4037
4038 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
4039}
4040SUPR0_EXPORT_SYMBOL(SUPR0BadContext);
4041
4042
4043/**
4044 * Gets the paging mode of the current CPU.
4045 *
4046 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4047 */
4048SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4049{
4050 SUPPAGINGMODE enmMode;
4051
4052 RTR0UINTREG cr0 = ASMGetCR0();
4053 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4054 enmMode = SUPPAGINGMODE_INVALID;
4055 else
4056 {
4057 RTR0UINTREG cr4 = ASMGetCR4();
4058 uint32_t fNXEPlusLMA = 0;
4059 if (cr4 & X86_CR4_PAE)
4060 {
4061 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
4062 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
4063 {
4064 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4065 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4066 fNXEPlusLMA |= RT_BIT(0);
4067 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4068 fNXEPlusLMA |= RT_BIT(1);
4069 }
4070 }
4071
4072 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4073 {
4074 case 0:
4075 enmMode = SUPPAGINGMODE_32_BIT;
4076 break;
4077
4078 case X86_CR4_PGE:
4079 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4080 break;
4081
4082 case X86_CR4_PAE:
4083 enmMode = SUPPAGINGMODE_PAE;
4084 break;
4085
4086 case X86_CR4_PAE | RT_BIT(0):
4087 enmMode = SUPPAGINGMODE_PAE_NX;
4088 break;
4089
4090 case X86_CR4_PAE | X86_CR4_PGE:
4091 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4092 break;
4093
4094 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4095 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4096 break;
4097
4098 case RT_BIT(1) | X86_CR4_PAE:
4099 enmMode = SUPPAGINGMODE_AMD64;
4100 break;
4101
4102 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4103 enmMode = SUPPAGINGMODE_AMD64_NX;
4104 break;
4105
4106 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4107 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4108 break;
4109
4110 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4111 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4112 break;
4113
4114 default:
4115 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4116 enmMode = SUPPAGINGMODE_INVALID;
4117 break;
4118 }
4119 }
4120 return enmMode;
4121}
4122SUPR0_EXPORT_SYMBOL(SUPR0GetPagingMode);
4123
4124
4125/**
4126 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4127 *
4128 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4129 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4130 * for code with interrupts enabled.
4131 *
4132 * @returns the old CR4 value.
4133 *
4134 * @param fOrMask bits to be set in CR4.
4135 * @param fAndMask bits to be cleard in CR4.
4136 *
4137 * @remarks Must be called with preemption/interrupts disabled.
4138 */
4139SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4140{
4141#ifdef RT_OS_LINUX
4142 return supdrvOSChangeCR4(fOrMask, fAndMask);
4143#else
4144 RTCCUINTREG uOld = ASMGetCR4();
4145 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4146 if (uNew != uOld)
4147 ASMSetCR4(uNew);
4148 return uOld;
4149#endif
4150}
4151SUPR0_EXPORT_SYMBOL(SUPR0ChangeCR4);
4152
4153
4154/**
4155 * Enables or disabled hardware virtualization extensions using native OS APIs.
4156 *
4157 * @returns VBox status code.
4158 * @retval VINF_SUCCESS on success.
4159 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4160 *
4161 * @param fEnable Whether to enable or disable.
4162 */
4163SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4164{
4165#ifdef RT_OS_DARWIN
4166 return supdrvOSEnableVTx(fEnable);
4167#else
4168 RT_NOREF1(fEnable);
4169 return VERR_NOT_SUPPORTED;
4170#endif
4171}
4172SUPR0_EXPORT_SYMBOL(SUPR0EnableVTx);
4173
4174
4175/**
4176 * Suspends hardware virtualization extensions using the native OS API.
4177 *
4178 * This is called prior to entering raw-mode context.
4179 *
4180 * @returns @c true if suspended, @c false if not.
4181 */
4182SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4183{
4184#ifdef RT_OS_DARWIN
4185 return supdrvOSSuspendVTxOnCpu();
4186#else
4187 return false;
4188#endif
4189}
4190SUPR0_EXPORT_SYMBOL(SUPR0SuspendVTxOnCpu);
4191
4192
4193/**
4194 * Resumes hardware virtualization extensions using the native OS API.
4195 *
4196 * This is called after to entering raw-mode context.
4197 *
4198 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4199 */
4200SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4201{
4202#ifdef RT_OS_DARWIN
4203 supdrvOSResumeVTxOnCpu(fSuspended);
4204#else
4205 RT_NOREF1(fSuspended);
4206 Assert(!fSuspended);
4207#endif
4208}
4209SUPR0_EXPORT_SYMBOL(SUPR0ResumeVTxOnCpu);
4210
4211
4212SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4213{
4214#ifdef RT_OS_LINUX
4215 return supdrvOSGetCurrentGdtRw(pGdtRw);
4216#else
4217 NOREF(pGdtRw);
4218 return VERR_NOT_IMPLEMENTED;
4219#endif
4220}
4221SUPR0_EXPORT_SYMBOL(SUPR0GetCurrentGdtRw);
4222
4223
4224/**
4225 * Gets AMD-V and VT-x support for the calling CPU.
4226 *
4227 * @returns VBox status code.
4228 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4229 * (SUPVTCAPS_AMD_V) is supported.
4230 */
4231SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4232{
4233 Assert(pfCaps);
4234 *pfCaps = 0;
4235
4236 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4237 if (ASMHasCpuId())
4238 {
4239 /* Check the range of standard CPUID leafs. */
4240 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4241 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4242 if (ASMIsValidStdRange(uMaxLeaf))
4243 {
4244 /* Query the standard CPUID leaf. */
4245 uint32_t fFeatEcx, fFeatEdx, uDummy;
4246 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4247
4248 /* Check if the vendor is Intel (or compatible). */
4249 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4250 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4251 || ASMIsShanghaiCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4252 {
4253 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4254 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4255 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4256 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4257 {
4258 *pfCaps = SUPVTCAPS_VT_X;
4259 return VINF_SUCCESS;
4260 }
4261 return VERR_VMX_NO_VMX;
4262 }
4263
4264 /* Check if the vendor is AMD (or compatible). */
4265 if ( ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4266 || ASMIsHygonCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4267 {
4268 uint32_t fExtFeatEcx, uExtMaxId;
4269 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4270 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4271
4272 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4273 if ( ASMIsValidExtRange(uExtMaxId)
4274 && uExtMaxId >= 0x8000000a
4275 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4276 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4277 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4278 {
4279 *pfCaps = SUPVTCAPS_AMD_V;
4280 return VINF_SUCCESS;
4281 }
4282 return VERR_SVM_NO_SVM;
4283 }
4284 }
4285 }
4286 return VERR_UNSUPPORTED_CPU;
4287}
4288SUPR0_EXPORT_SYMBOL(SUPR0GetVTSupport);
4289
4290
4291/**
4292 * Checks if Intel VT-x feature is usable on this CPU.
4293 *
4294 * @returns VBox status code.
4295 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4296 * ambiguity that makes us unsure whether we
4297 * really can use VT-x or not.
4298 *
4299 * @remarks Must be called with preemption disabled.
4300 * The caller is also expected to check that the CPU is an Intel (or
4301 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4302 * function might throw a \#GP fault as it tries to read/write MSRs
4303 * that may not be present!
4304 */
4305SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4306{
4307 uint64_t fFeatMsr;
4308 bool fMaybeSmxMode;
4309 bool fMsrLocked;
4310 bool fSmxVmxAllowed;
4311 bool fVmxAllowed;
4312 bool fIsSmxModeAmbiguous;
4313 int rc;
4314
4315 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4316
4317 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4318 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4319 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4320 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4321 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4322 fIsSmxModeAmbiguous = false;
4323 rc = VERR_INTERNAL_ERROR_5;
4324
4325 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4326 if (fMsrLocked)
4327 {
4328 if (fVmxAllowed && fSmxVmxAllowed)
4329 rc = VINF_SUCCESS;
4330 else if (!fVmxAllowed && !fSmxVmxAllowed)
4331 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4332 else if (!fMaybeSmxMode)
4333 {
4334 if (fVmxAllowed)
4335 rc = VINF_SUCCESS;
4336 else
4337 rc = VERR_VMX_MSR_VMX_DISABLED;
4338 }
4339 else
4340 {
4341 /*
4342 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4343 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4344 * See @bugref{6873}.
4345 */
4346 Assert(fMaybeSmxMode == true);
4347 fIsSmxModeAmbiguous = true;
4348 rc = VINF_SUCCESS;
4349 }
4350 }
4351 else
4352 {
4353 /*
4354 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4355 * this MSR can no longer be modified.
4356 *
4357 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4358 * accurately. See @bugref{6873}.
4359 *
4360 * We need to check for SMX hardware support here, before writing the MSR as
4361 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4362 * for it.
4363 */
4364 uint32_t fFeaturesECX, uDummy;
4365#ifdef VBOX_STRICT
4366 /* Callers should have verified these at some point. */
4367 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4368 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4369 Assert(ASMIsValidStdRange(uMaxId));
4370 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4371 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4372 || ASMIsShanghaiCpuEx( uVendorEBX, uVendorECX, uVendorEDX));
4373#endif
4374 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4375 bool fSmxVmxHwSupport = false;
4376 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4377 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4378 fSmxVmxHwSupport = true;
4379
4380 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4381 | MSR_IA32_FEATURE_CONTROL_VMXON;
4382 if (fSmxVmxHwSupport)
4383 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4384
4385 /*
4386 * Commit.
4387 */
4388 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4389
4390 /*
4391 * Verify.
4392 */
4393 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4394 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4395 if (fMsrLocked)
4396 {
4397 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4398 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4399 if ( fVmxAllowed
4400 && ( !fSmxVmxHwSupport
4401 || fSmxVmxAllowed))
4402 rc = VINF_SUCCESS;
4403 else
4404 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4405 }
4406 else
4407 rc = VERR_VMX_MSR_LOCKING_FAILED;
4408 }
4409
4410 if (pfIsSmxModeAmbiguous)
4411 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4412
4413 return rc;
4414}
4415SUPR0_EXPORT_SYMBOL(SUPR0GetVmxUsability);
4416
4417
4418/**
4419 * Checks if AMD-V SVM feature is usable on this CPU.
4420 *
4421 * @returns VBox status code.
4422 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4423 *
4424 * @remarks Must be called with preemption disabled.
4425 */
4426SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4427{
4428 int rc;
4429 uint64_t fVmCr;
4430 uint64_t fEfer;
4431
4432 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4433 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4434 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4435 {
4436 rc = VINF_SUCCESS;
4437 if (fInitSvm)
4438 {
4439 /* Turn on SVM in the EFER MSR. */
4440 fEfer = ASMRdMsr(MSR_K6_EFER);
4441 if (fEfer & MSR_K6_EFER_SVME)
4442 rc = VERR_SVM_IN_USE;
4443 else
4444 {
4445 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4446
4447 /* Paranoia. */
4448 fEfer = ASMRdMsr(MSR_K6_EFER);
4449 if (fEfer & MSR_K6_EFER_SVME)
4450 {
4451 /* Restore previous value. */
4452 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4453 }
4454 else
4455 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4456 }
4457 }
4458 }
4459 else
4460 rc = VERR_SVM_DISABLED;
4461 return rc;
4462}
4463SUPR0_EXPORT_SYMBOL(SUPR0GetSvmUsability);
4464
4465
4466/**
4467 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4468 *
4469 * @returns VBox status code.
4470 * @retval VERR_VMX_NO_VMX
4471 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4472 * @retval VERR_VMX_MSR_VMX_DISABLED
4473 * @retval VERR_VMX_MSR_LOCKING_FAILED
4474 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4475 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4476 * @retval VERR_SVM_NO_SVM
4477 * @retval VERR_SVM_DISABLED
4478 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4479 * (centaur)/Shanghai CPU.
4480 *
4481 * @param pfCaps Where to store the capabilities.
4482 */
4483int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4484{
4485 int rc = VERR_UNSUPPORTED_CPU;
4486 bool fIsSmxModeAmbiguous = false;
4487 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4488
4489 /*
4490 * Input validation.
4491 */
4492 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4493 *pfCaps = 0;
4494
4495 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4496 RTThreadPreemptDisable(&PreemptState);
4497
4498 /* Check if VT-x/AMD-V is supported. */
4499 rc = SUPR0GetVTSupport(pfCaps);
4500 if (RT_SUCCESS(rc))
4501 {
4502 /* Check if VT-x is supported. */
4503 if (*pfCaps & SUPVTCAPS_VT_X)
4504 {
4505 /* Check if VT-x is usable. */
4506 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4507 if (RT_SUCCESS(rc))
4508 {
4509 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4510 VMXCTLSMSR vtCaps;
4511 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4512 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4513 {
4514 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4515 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4516 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4517 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4518 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4519 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4520 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4521 }
4522 }
4523 }
4524 /* Check if AMD-V is supported. */
4525 else if (*pfCaps & SUPVTCAPS_AMD_V)
4526 {
4527 /* Check is SVM is usable. */
4528 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4529 if (RT_SUCCESS(rc))
4530 {
4531 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4532 uint32_t uDummy, fSvmFeatures;
4533 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4534 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4535 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4536 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD)
4537 *pfCaps |= SUPVTCAPS_AMDV_VIRT_VMSAVE_VMLOAD;
4538 }
4539 }
4540 }
4541
4542 /* Restore preemption. */
4543 RTThreadPreemptRestore(&PreemptState);
4544
4545 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4546 if (fIsSmxModeAmbiguous)
4547 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4548
4549 return rc;
4550}
4551
4552
4553/**
4554 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4555 *
4556 * @returns VBox status code.
4557 * @retval VERR_VMX_NO_VMX
4558 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4559 * @retval VERR_VMX_MSR_VMX_DISABLED
4560 * @retval VERR_VMX_MSR_LOCKING_FAILED
4561 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4562 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4563 * @retval VERR_SVM_NO_SVM
4564 * @retval VERR_SVM_DISABLED
4565 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4566 * (centaur)/Shanghai CPU.
4567 *
4568 * @param pSession The session handle.
4569 * @param pfCaps Where to store the capabilities.
4570 */
4571SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4572{
4573 /*
4574 * Input validation.
4575 */
4576 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4577 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4578
4579 /*
4580 * Call common worker.
4581 */
4582 return supdrvQueryVTCapsInternal(pfCaps);
4583}
4584SUPR0_EXPORT_SYMBOL(SUPR0QueryVTCaps);
4585
4586
4587/**
4588 * Queries the CPU microcode revision.
4589 *
4590 * @returns VBox status code.
4591 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4592 * readable microcode rev.
4593 *
4594 * @param puRevision Where to store the microcode revision.
4595 */
4596static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4597{
4598 int rc = VERR_UNSUPPORTED_CPU;
4599 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4600
4601 /*
4602 * Input validation.
4603 */
4604 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4605
4606 *puRevision = 0;
4607
4608 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4609 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4610 RTThreadPreemptDisable(&PreemptState);
4611
4612 if (ASMHasCpuId())
4613 {
4614 uint32_t uDummy, uTFMSEAX;
4615 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4616
4617 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4618 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4619
4620 if (ASMIsValidStdRange(uMaxId))
4621 {
4622 uint64_t uRevMsr;
4623 if (ASMIsIntelCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4624 {
4625 /* Architectural MSR available on Pentium Pro and later. */
4626 if (ASMGetCpuFamily(uTFMSEAX) >= 6)
4627 {
4628 /* Revision is in the high dword. */
4629 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4630 *puRevision = RT_HIDWORD(uRevMsr);
4631 rc = VINF_SUCCESS;
4632 }
4633 }
4634 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4635 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4636 {
4637 /* Not well documented, but at least all AMD64 CPUs support this. */
4638 if (ASMGetCpuFamily(uTFMSEAX) >= 15)
4639 {
4640 /* Revision is in the low dword. */
4641 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4642 *puRevision = RT_LODWORD(uRevMsr);
4643 rc = VINF_SUCCESS;
4644 }
4645 }
4646 }
4647 }
4648
4649 RTThreadPreemptRestore(&PreemptState);
4650
4651 return rc;
4652}
4653
4654
4655/**
4656 * Queries the CPU microcode revision.
4657 *
4658 * @returns VBox status code.
4659 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4660 * readable microcode rev.
4661 *
4662 * @param pSession The session handle.
4663 * @param puRevision Where to store the microcode revision.
4664 */
4665SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4666{
4667 /*
4668 * Input validation.
4669 */
4670 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4671 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4672
4673 /*
4674 * Call common worker.
4675 */
4676 return supdrvQueryUcodeRev(puRevision);
4677}
4678SUPR0_EXPORT_SYMBOL(SUPR0QueryUcodeRev);
4679
4680
4681/**
4682 * Gets hardware-virtualization MSRs of the calling CPU.
4683 *
4684 * @returns VBox status code.
4685 * @param pMsrs Where to store the hardware-virtualization MSRs.
4686 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4687 * to explicitly check for the presence of VT-x/AMD-V before
4688 * querying MSRs.
4689 * @param fForce Force querying of MSRs from the hardware.
4690 */
4691SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4692{
4693 NOREF(fForce);
4694
4695 int rc;
4696 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4697
4698 /*
4699 * Input validation.
4700 */
4701 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4702
4703 /*
4704 * Disable preemption so we make sure we don't migrate CPUs and because
4705 * we access global data.
4706 */
4707 RTThreadPreemptDisable(&PreemptState);
4708
4709 /*
4710 * Query the MSRs from the hardware.
4711 */
4712 SUPHWVIRTMSRS Msrs;
4713 RT_ZERO(Msrs);
4714
4715 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4716 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4717 rc = SUPR0GetVTSupport(&fCaps);
4718 else
4719 rc = VINF_SUCCESS;
4720 if (RT_SUCCESS(rc))
4721 {
4722 if (fCaps & SUPVTCAPS_VT_X)
4723 {
4724 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4725 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4726 Msrs.u.vmx.PinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4727 Msrs.u.vmx.ProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4728 Msrs.u.vmx.ExitCtls.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4729 Msrs.u.vmx.EntryCtls.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4730 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4731 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4732 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4733 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4734 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4735 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4736
4737 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4738 {
4739 Msrs.u.vmx.TruePinCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4740 Msrs.u.vmx.TrueProcCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4741 Msrs.u.vmx.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4742 Msrs.u.vmx.TrueExitCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4743 }
4744
4745 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4746 {
4747 Msrs.u.vmx.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4748
4749 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4750 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4751
4752 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
4753 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4754 }
4755
4756 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TERTIARY_CTLS)
4757 Msrs.u.vmx.u64ProcCtls3 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS3);
4758 }
4759 else if (fCaps & SUPVTCAPS_AMD_V)
4760 {
4761 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4762 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4763 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4764 }
4765 else
4766 {
4767 RTThreadPreemptRestore(&PreemptState);
4768 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4769 VERR_INTERNAL_ERROR_2);
4770 }
4771
4772 /*
4773 * Copy the MSRs out.
4774 */
4775 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4776 }
4777
4778 RTThreadPreemptRestore(&PreemptState);
4779
4780 return rc;
4781}
4782SUPR0_EXPORT_SYMBOL(SUPR0GetHwvirtMsrs);
4783
4784
4785/**
4786 * Register a component factory with the support driver.
4787 *
4788 * This is currently restricted to kernel sessions only.
4789 *
4790 * @returns VBox status code.
4791 * @retval VINF_SUCCESS on success.
4792 * @retval VERR_NO_MEMORY if we're out of memory.
4793 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4794 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4795 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4796 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4797 *
4798 * @param pSession The SUPDRV session (must be a ring-0 session).
4799 * @param pFactory Pointer to the component factory registration structure.
4800 *
4801 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4802 */
4803SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4804{
4805 PSUPDRVFACTORYREG pNewReg;
4806 const char *psz;
4807 int rc;
4808
4809 /*
4810 * Validate parameters.
4811 */
4812 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4813 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4814 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4815 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4816 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4817 AssertReturn(psz, VERR_INVALID_PARAMETER);
4818
4819 /*
4820 * Allocate and initialize a new registration structure.
4821 */
4822 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4823 if (pNewReg)
4824 {
4825 pNewReg->pNext = NULL;
4826 pNewReg->pFactory = pFactory;
4827 pNewReg->pSession = pSession;
4828 pNewReg->cchName = psz - &pFactory->szName[0];
4829
4830 /*
4831 * Add it to the tail of the list after checking for prior registration.
4832 */
4833 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4834 if (RT_SUCCESS(rc))
4835 {
4836 PSUPDRVFACTORYREG pPrev = NULL;
4837 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4838 while (pCur && pCur->pFactory != pFactory)
4839 {
4840 pPrev = pCur;
4841 pCur = pCur->pNext;
4842 }
4843 if (!pCur)
4844 {
4845 if (pPrev)
4846 pPrev->pNext = pNewReg;
4847 else
4848 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4849 rc = VINF_SUCCESS;
4850 }
4851 else
4852 rc = VERR_ALREADY_EXISTS;
4853
4854 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4855 }
4856
4857 if (RT_FAILURE(rc))
4858 RTMemFree(pNewReg);
4859 }
4860 else
4861 rc = VERR_NO_MEMORY;
4862 return rc;
4863}
4864SUPR0_EXPORT_SYMBOL(SUPR0ComponentRegisterFactory);
4865
4866
4867/**
4868 * Deregister a component factory.
4869 *
4870 * @returns VBox status code.
4871 * @retval VINF_SUCCESS on success.
4872 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4873 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4874 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4875 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4876 *
4877 * @param pSession The SUPDRV session (must be a ring-0 session).
4878 * @param pFactory Pointer to the component factory registration structure
4879 * previously passed SUPR0ComponentRegisterFactory().
4880 *
4881 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4882 */
4883SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4884{
4885 int rc;
4886
4887 /*
4888 * Validate parameters.
4889 */
4890 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4891 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4892 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4893
4894 /*
4895 * Take the lock and look for the registration record.
4896 */
4897 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4898 if (RT_SUCCESS(rc))
4899 {
4900 PSUPDRVFACTORYREG pPrev = NULL;
4901 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4902 while (pCur && pCur->pFactory != pFactory)
4903 {
4904 pPrev = pCur;
4905 pCur = pCur->pNext;
4906 }
4907 if (pCur)
4908 {
4909 if (!pPrev)
4910 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4911 else
4912 pPrev->pNext = pCur->pNext;
4913
4914 pCur->pNext = NULL;
4915 pCur->pFactory = NULL;
4916 pCur->pSession = NULL;
4917 rc = VINF_SUCCESS;
4918 }
4919 else
4920 rc = VERR_NOT_FOUND;
4921
4922 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4923
4924 RTMemFree(pCur);
4925 }
4926 return rc;
4927}
4928SUPR0_EXPORT_SYMBOL(SUPR0ComponentDeregisterFactory);
4929
4930
4931/**
4932 * Queries a component factory.
4933 *
4934 * @returns VBox status code.
4935 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4936 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4937 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4938 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4939 *
4940 * @param pSession The SUPDRV session.
4941 * @param pszName The name of the component factory.
4942 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4943 * @param ppvFactoryIf Where to store the factory interface.
4944 */
4945SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4946{
4947 const char *pszEnd;
4948 size_t cchName;
4949 int rc;
4950
4951 /*
4952 * Validate parameters.
4953 */
4954 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4955
4956 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4957 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4958 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4959 cchName = pszEnd - pszName;
4960
4961 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4962 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4963 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4964
4965 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4966 *ppvFactoryIf = NULL;
4967
4968 /*
4969 * Take the lock and try all factories by this name.
4970 */
4971 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4972 if (RT_SUCCESS(rc))
4973 {
4974 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4975 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4976 while (pCur)
4977 {
4978 if ( pCur->cchName == cchName
4979 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4980 {
4981 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4982 if (pvFactory)
4983 {
4984 *ppvFactoryIf = pvFactory;
4985 rc = VINF_SUCCESS;
4986 break;
4987 }
4988 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4989 }
4990
4991 /* next */
4992 pCur = pCur->pNext;
4993 }
4994
4995 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4996 }
4997 return rc;
4998}
4999SUPR0_EXPORT_SYMBOL(SUPR0ComponentQueryFactory);
5000
5001
5002/**
5003 * Adds a memory object to the session.
5004 *
5005 * @returns IPRT status code.
5006 * @param pMem Memory tracking structure containing the
5007 * information to track.
5008 * @param pSession The session.
5009 */
5010static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
5011{
5012 PSUPDRVBUNDLE pBundle;
5013
5014 /*
5015 * Find free entry and record the allocation.
5016 */
5017 RTSpinlockAcquire(pSession->Spinlock);
5018 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5019 {
5020 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
5021 {
5022 unsigned i;
5023 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5024 {
5025 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
5026 {
5027 pBundle->cUsed++;
5028 pBundle->aMem[i] = *pMem;
5029 RTSpinlockRelease(pSession->Spinlock);
5030 return VINF_SUCCESS;
5031 }
5032 }
5033 AssertFailed(); /* !!this can't be happening!!! */
5034 }
5035 }
5036 RTSpinlockRelease(pSession->Spinlock);
5037
5038 /*
5039 * Need to allocate a new bundle.
5040 * Insert into the last entry in the bundle.
5041 */
5042 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
5043 if (!pBundle)
5044 return VERR_NO_MEMORY;
5045
5046 /* take last entry. */
5047 pBundle->cUsed++;
5048 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
5049
5050 /* insert into list. */
5051 RTSpinlockAcquire(pSession->Spinlock);
5052 pBundle->pNext = pSession->Bundle.pNext;
5053 pSession->Bundle.pNext = pBundle;
5054 RTSpinlockRelease(pSession->Spinlock);
5055
5056 return VINF_SUCCESS;
5057}
5058
5059
5060/**
5061 * Releases a memory object referenced by pointer and type.
5062 *
5063 * @returns IPRT status code.
5064 * @param pSession Session data.
5065 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
5066 * @param eType Memory type.
5067 */
5068static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
5069{
5070 PSUPDRVBUNDLE pBundle;
5071
5072 /*
5073 * Validate input.
5074 */
5075 if (!uPtr)
5076 {
5077 Log(("Illegal address %p\n", (void *)uPtr));
5078 return VERR_INVALID_PARAMETER;
5079 }
5080
5081 /*
5082 * Search for the address.
5083 */
5084 RTSpinlockAcquire(pSession->Spinlock);
5085 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5086 {
5087 if (pBundle->cUsed > 0)
5088 {
5089 unsigned i;
5090 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5091 {
5092 if ( pBundle->aMem[i].eType == eType
5093 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5094 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5095 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5096 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5097 )
5098 {
5099 /* Make a copy of it and release it outside the spinlock. */
5100 SUPDRVMEMREF Mem = pBundle->aMem[i];
5101 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5102 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5103 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5104 RTSpinlockRelease(pSession->Spinlock);
5105
5106 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5107 {
5108 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5109 AssertRC(rc); /** @todo figure out how to handle this. */
5110 }
5111 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5112 {
5113 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5114 AssertRC(rc); /** @todo figure out how to handle this. */
5115 }
5116 return VINF_SUCCESS;
5117 }
5118 }
5119 }
5120 }
5121 RTSpinlockRelease(pSession->Spinlock);
5122 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5123 return VERR_INVALID_PARAMETER;
5124}
5125
5126
5127/**
5128 * Opens an image. If it's the first time it's opened the call must upload
5129 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5130 *
5131 * This is the 1st step of the loading.
5132 *
5133 * @returns IPRT status code.
5134 * @param pDevExt Device globals.
5135 * @param pSession Session data.
5136 * @param pReq The open request.
5137 */
5138static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5139{
5140 int rc;
5141 PSUPDRVLDRIMAGE pImage;
5142 void *pv;
5143 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5144 SUPDRV_CHECK_SMAP_SETUP();
5145 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5146 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5147
5148 /*
5149 * Check if we got an instance of the image already.
5150 */
5151 supdrvLdrLock(pDevExt);
5152 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5153 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5154 {
5155 if ( pImage->szName[cchName] == '\0'
5156 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5157 {
5158 /** @todo Add an _1M (or something) per session reference. */
5159 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
5160 {
5161 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5162 * that indicates that the images are different. */
5163 pReq->u.Out.pvImageBase = pImage->pvImage;
5164 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5165 pReq->u.Out.fNativeLoader = pImage->fNative;
5166 supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5167 supdrvLdrUnlock(pDevExt);
5168 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5169 return VINF_SUCCESS;
5170 }
5171 supdrvLdrUnlock(pDevExt);
5172 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5173 return VERR_TOO_MANY_REFERENCES;
5174 }
5175 }
5176 /* (not found - add it!) */
5177
5178 /* If the loader interface is locked down, make userland fail early */
5179 if (pDevExt->fLdrLockedDown)
5180 {
5181 supdrvLdrUnlock(pDevExt);
5182 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5183 return VERR_PERMISSION_DENIED;
5184 }
5185
5186 /* Stop if caller doesn't wish to prepare loading things. */
5187 if (!pReq->u.In.cbImageBits)
5188 {
5189 supdrvLdrUnlock(pDevExt);
5190 Log(("supdrvIOCtl_LdrOpen: Returning VERR_MODULE_NOT_FOUND for '%s'!\n", pReq->u.In.szName));
5191 return VERR_MODULE_NOT_FOUND;
5192 }
5193
5194 /*
5195 * Allocate memory.
5196 */
5197 Assert(cchName < sizeof(pImage->szName));
5198 pv = RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5199 if (!pv)
5200 {
5201 supdrvLdrUnlock(pDevExt);
5202 Log(("supdrvIOCtl_LdrOpen: RTMemAllocZ() failed\n"));
5203 return VERR_NO_MEMORY;
5204 }
5205 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5206
5207 /*
5208 * Setup and link in the LDR stuff.
5209 */
5210 pImage = (PSUPDRVLDRIMAGE)pv;
5211 pImage->pvImage = NULL;
5212#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5213 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5214#else
5215 pImage->pvImageAlloc = NULL;
5216#endif
5217 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5218 pImage->cbImageBits = pReq->u.In.cbImageBits;
5219 pImage->cSymbols = 0;
5220 pImage->paSymbols = NULL;
5221 pImage->pachStrTab = NULL;
5222 pImage->cbStrTab = 0;
5223 pImage->cSegments = 0;
5224 pImage->paSegments = NULL;
5225 pImage->pfnModuleInit = NULL;
5226 pImage->pfnModuleTerm = NULL;
5227 pImage->pfnServiceReqHandler = NULL;
5228 pImage->uState = SUP_IOCTL_LDR_OPEN;
5229 pImage->cImgUsage = 0; /* Increased by supdrvLdrAddUsage later */
5230 pImage->pDevExt = pDevExt;
5231 pImage->pImageImport = NULL;
5232 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5233 pImage->pWrappedModInfo = NULL;
5234 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5235
5236 /*
5237 * Try load it using the native loader, if that isn't supported, fall back
5238 * on the older method.
5239 */
5240 pImage->fNative = true;
5241 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5242 if (rc == VERR_NOT_SUPPORTED)
5243 {
5244#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5245 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5246 if (RT_SUCCESS(rc))
5247 {
5248 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5249 pImage->fNative = false;
5250 }
5251#else
5252 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
5253 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
5254 pImage->fNative = false;
5255 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
5256#endif
5257 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5258 }
5259 if (RT_SUCCESS(rc))
5260 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
5261 if (RT_FAILURE(rc))
5262 {
5263 supdrvLdrUnlock(pDevExt);
5264 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5265 RTMemFree(pImage);
5266 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5267 return rc;
5268 }
5269 Assert(RT_VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5270
5271 /*
5272 * Link it.
5273 */
5274 pImage->pNext = pDevExt->pLdrImages;
5275 pDevExt->pLdrImages = pImage;
5276
5277 pReq->u.Out.pvImageBase = pImage->pvImage;
5278 pReq->u.Out.fNeedsLoading = true;
5279 pReq->u.Out.fNativeLoader = pImage->fNative;
5280 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5281
5282 supdrvLdrUnlock(pDevExt);
5283 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5284 return VINF_SUCCESS;
5285}
5286
5287
5288/**
5289 * Formats a load error message.
5290 *
5291 * @returns @a rc
5292 * @param rc Return code.
5293 * @param pReq The request.
5294 * @param pszFormat The error message format string.
5295 * @param ... Argument to the format string.
5296 */
5297int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5298{
5299 va_list va;
5300 va_start(va, pszFormat);
5301 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5302 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5303 va_end(va);
5304 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5305 return rc;
5306}
5307
5308
5309/**
5310 * Worker that validates a pointer to an image entrypoint.
5311 *
5312 * Calls supdrvLdrLoadError on error.
5313 *
5314 * @returns IPRT status code.
5315 * @param pDevExt The device globals.
5316 * @param pImage The loader image.
5317 * @param pv The pointer into the image.
5318 * @param fMayBeNull Whether it may be NULL.
5319 * @param pszSymbol The entrypoint name or log name. If the symbol is
5320 * capitalized it signifies a specific symbol, otherwise it
5321 * for logging.
5322 * @param pbImageBits The image bits prepared by ring-3.
5323 * @param pReq The request for passing to supdrvLdrLoadError.
5324 *
5325 * @note Will leave the loader lock on failure!
5326 */
5327static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5328 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5329{
5330 if (!fMayBeNull || pv)
5331 {
5332 uint32_t iSeg;
5333
5334 /* Must be within the image bits: */
5335 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5336 if (uRva >= pImage->cbImageBits)
5337 {
5338 supdrvLdrUnlock(pDevExt);
5339 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5340 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5341 pv, pszSymbol, uRva, pImage->cbImageBits);
5342 }
5343
5344 /* Must be in an executable segment: */
5345 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5346 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5347 {
5348 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5349 break;
5350 supdrvLdrUnlock(pDevExt);
5351 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5352 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5353 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5354 pImage->paSegments[iSeg].fProt);
5355 }
5356 if (iSeg >= pImage->cSegments)
5357 {
5358 supdrvLdrUnlock(pDevExt);
5359 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5360 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5361 pv, pszSymbol, uRva);
5362 }
5363
5364 if (pImage->fNative)
5365 {
5366 /** @todo pass pReq along to the native code. */
5367 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5368 if (RT_FAILURE(rc))
5369 {
5370 supdrvLdrUnlock(pDevExt);
5371 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5372 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5373 }
5374 }
5375 }
5376 return VINF_SUCCESS;
5377}
5378
5379
5380/**
5381 * Loads the image bits.
5382 *
5383 * This is the 2nd step of the loading.
5384 *
5385 * @returns IPRT status code.
5386 * @param pDevExt Device globals.
5387 * @param pSession Session data.
5388 * @param pReq The request.
5389 */
5390static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5391{
5392 PSUPDRVLDRUSAGE pUsage;
5393 PSUPDRVLDRIMAGE pImage;
5394 PSUPDRVLDRIMAGE pImageImport;
5395 int rc;
5396 SUPDRV_CHECK_SMAP_SETUP();
5397 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5398 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5399
5400 /*
5401 * Find the ldr image.
5402 */
5403 supdrvLdrLock(pDevExt);
5404 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5405
5406 pUsage = pSession->pLdrUsage;
5407 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5408 pUsage = pUsage->pNext;
5409 if (!pUsage)
5410 {
5411 supdrvLdrUnlock(pDevExt);
5412 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5413 }
5414 pImage = pUsage->pImage;
5415
5416 /*
5417 * Validate input.
5418 */
5419 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5420 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5421 {
5422 supdrvLdrUnlock(pDevExt);
5423 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5424 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5425 }
5426
5427 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5428 {
5429 unsigned uState = pImage->uState;
5430 supdrvLdrUnlock(pDevExt);
5431 if (uState != SUP_IOCTL_LDR_LOAD)
5432 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5433 pReq->u.Out.uErrorMagic = 0;
5434 return VERR_ALREADY_LOADED;
5435 }
5436
5437 /* If the loader interface is locked down, don't load new images */
5438 if (pDevExt->fLdrLockedDown)
5439 {
5440 supdrvLdrUnlock(pDevExt);
5441 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5442 }
5443
5444 /*
5445 * If the new image is a dependant of VMMR0.r0, resolve it via the
5446 * caller's usage list and make sure it's in ready state.
5447 */
5448 pImageImport = NULL;
5449 if (pReq->u.In.fFlags & SUPLDRLOAD_F_DEP_VMMR0)
5450 {
5451 PSUPDRVLDRUSAGE pUsageDependency = pSession->pLdrUsage;
5452 while (pUsageDependency && pUsageDependency->pImage->pvImage != pDevExt->pvVMMR0)
5453 pUsageDependency = pUsageDependency->pNext;
5454 if (!pUsageDependency || !pDevExt->pvVMMR0)
5455 {
5456 supdrvLdrUnlock(pDevExt);
5457 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 not loaded by session");
5458 }
5459 pImageImport = pUsageDependency->pImage;
5460 if (pImageImport->uState != SUP_IOCTL_LDR_LOAD)
5461 {
5462 supdrvLdrUnlock(pDevExt);
5463 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 is not ready (state %#x)", pImageImport->uState);
5464 }
5465 }
5466
5467 /*
5468 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5469 */
5470 pImage->cSegments = pReq->u.In.cSegments;
5471 {
5472 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5473 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
5474 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5475 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5476 else
5477 {
5478 supdrvLdrUnlock(pDevExt);
5479 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5480 }
5481 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5482 }
5483
5484 /*
5485 * Validate entrypoints.
5486 */
5487 switch (pReq->u.In.eEPType)
5488 {
5489 case SUPLDRLOADEP_NOTHING:
5490 break;
5491
5492 case SUPLDRLOADEP_VMMR0:
5493 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5494 if (RT_FAILURE(rc))
5495 return rc;
5496 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5497 if (RT_FAILURE(rc))
5498 return rc;
5499
5500 /* Fail here if there is already a VMMR0 module. */
5501 if (pDevExt->pvVMMR0 != NULL)
5502 {
5503 supdrvLdrUnlock(pDevExt);
5504 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "There is already a VMMR0 module loaded (%p)", pDevExt->pvVMMR0);
5505 }
5506 break;
5507
5508 case SUPLDRLOADEP_SERVICE:
5509 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5510 if (RT_FAILURE(rc))
5511 return rc;
5512 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5513 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5514 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5515 {
5516 supdrvLdrUnlock(pDevExt);
5517 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5518 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5519 pReq->u.In.EP.Service.apvReserved[2]);
5520 }
5521 break;
5522
5523 default:
5524 supdrvLdrUnlock(pDevExt);
5525 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5526 }
5527
5528 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5529 if (RT_FAILURE(rc))
5530 return rc;
5531 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5532 if (RT_FAILURE(rc))
5533 return rc;
5534 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5535
5536 /*
5537 * Allocate and copy the tables if non-native.
5538 * (No need to do try/except as this is a buffered request.)
5539 */
5540 if (!pImage->fNative)
5541 {
5542 pImage->cbStrTab = pReq->u.In.cbStrTab;
5543 if (pImage->cbStrTab)
5544 {
5545 pImage->pachStrTab = (char *)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5546 if (!pImage->pachStrTab)
5547 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5548 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5549 }
5550
5551 pImage->cSymbols = pReq->u.In.cSymbols;
5552 if (RT_SUCCESS(rc) && pImage->cSymbols)
5553 {
5554 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5555 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
5556 if (!pImage->paSymbols)
5557 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5558 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5559 }
5560 }
5561
5562 /*
5563 * Copy the bits and apply permissions / complete native loading.
5564 */
5565 if (RT_SUCCESS(rc))
5566 {
5567 pImage->uState = SUP_IOCTL_LDR_LOAD;
5568 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5569 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5570
5571 if (pImage->fNative)
5572 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5573 else
5574 {
5575#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5576 uint32_t i;
5577 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5578
5579 for (i = 0; i < pImage->cSegments; i++)
5580 {
5581 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5582 pImage->paSegments[i].fProt);
5583 if (RT_SUCCESS(rc))
5584 continue;
5585 if (rc == VERR_NOT_SUPPORTED)
5586 rc = VINF_SUCCESS;
5587 else
5588 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5589 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5590 break;
5591 }
5592#else
5593 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5594#endif
5595 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5596 }
5597 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5598 }
5599
5600 /*
5601 * On success call the module initialization.
5602 */
5603 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5604 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5605 {
5606 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5607 pDevExt->pLdrInitImage = pImage;
5608 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5609 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5610 rc = pImage->pfnModuleInit(pImage);
5611 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5612 pDevExt->pLdrInitImage = NULL;
5613 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5614 if (RT_FAILURE(rc))
5615 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5616 }
5617 if (RT_SUCCESS(rc))
5618 {
5619 /*
5620 * Publish any standard entry points.
5621 */
5622 switch (pReq->u.In.eEPType)
5623 {
5624 case SUPLDRLOADEP_VMMR0:
5625 Assert(!pDevExt->pvVMMR0);
5626 Assert(!pDevExt->pfnVMMR0EntryFast);
5627 Assert(!pDevExt->pfnVMMR0EntryEx);
5628 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
5629 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
5630 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryFast);
5631 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
5632 (void *)(uintptr_t) pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5633 break;
5634 case SUPLDRLOADEP_SERVICE:
5635 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5636 break;
5637 default:
5638 break;
5639 }
5640
5641 /*
5642 * Increase the usage counter of any imported image.
5643 */
5644 if (pImageImport)
5645 {
5646 pImageImport->cImgUsage++;
5647 if (pImageImport->cImgUsage == 2 && pImageImport->pWrappedModInfo)
5648 supdrvOSLdrRetainWrapperModule(pDevExt, pImageImport);
5649 pImage->pImageImport = pImageImport;
5650 }
5651
5652 /*
5653 * Done!
5654 */
5655 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5656 pReq->u.Out.uErrorMagic = 0;
5657 pReq->u.Out.szError[0] = '\0';
5658 }
5659 else
5660 {
5661 /* Inform the tracing component in case ModuleInit registered TPs. */
5662 supdrvTracerModuleUnloading(pDevExt, pImage);
5663
5664 pImage->uState = SUP_IOCTL_LDR_OPEN;
5665 pImage->pfnModuleInit = NULL;
5666 pImage->pfnModuleTerm = NULL;
5667 pImage->pfnServiceReqHandler= NULL;
5668 pImage->cbStrTab = 0;
5669 RTMemFree(pImage->pachStrTab);
5670 pImage->pachStrTab = NULL;
5671 RTMemFree(pImage->paSymbols);
5672 pImage->paSymbols = NULL;
5673 pImage->cSymbols = 0;
5674 }
5675
5676 supdrvLdrUnlock(pDevExt);
5677 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5678 return rc;
5679}
5680
5681
5682/**
5683 * Registers a .r0 module wrapped in a native one and manually loaded.
5684 *
5685 * @returns VINF_SUCCESS or error code (no info statuses).
5686 * @param pDevExt Device globals.
5687 * @param pWrappedModInfo The wrapped module info.
5688 * @param pvNative OS specific information.
5689 * @param phMod Where to store the module handle.
5690 */
5691int VBOXCALL supdrvLdrRegisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo,
5692 void *pvNative, void **phMod)
5693{
5694 size_t cchName;
5695 PSUPDRVLDRIMAGE pImage;
5696 PCSUPLDRWRAPMODSYMBOL paSymbols;
5697 uint16_t idx;
5698 const char *pszPrevSymbol;
5699 int rc;
5700 SUPDRV_CHECK_SMAP_SETUP();
5701 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5702
5703 /*
5704 * Validate input.
5705 */
5706 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
5707 *phMod = NULL;
5708 AssertPtrReturn(pDevExt, VERR_INTERNAL_ERROR_2);
5709
5710 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
5711 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5712 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5713 VERR_INVALID_MAGIC);
5714 AssertMsgReturn(pWrappedModInfo->uVersion == SUPLDRWRAPPEDMODULE_VERSION,
5715 ("Unsupported uVersion=%#x, current version %#x\n", pWrappedModInfo->uVersion, SUPLDRWRAPPEDMODULE_VERSION),
5716 VERR_VERSION_MISMATCH);
5717 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
5718 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
5719 VERR_INVALID_MAGIC);
5720 AssertMsgReturn(pWrappedModInfo->fFlags <= SUPLDRWRAPPEDMODULE_F_VMMR0, ("Unknown flags in: %#x\n", pWrappedModInfo->fFlags),
5721 VERR_INVALID_FLAGS);
5722
5723 /* szName: */
5724 AssertReturn(RTStrEnd(pWrappedModInfo->szName, sizeof(pWrappedModInfo->szName)) != NULL, VERR_INVALID_NAME);
5725 AssertReturn(supdrvIsLdrModuleNameValid(pWrappedModInfo->szName), VERR_INVALID_NAME);
5726 AssertCompile(sizeof(pImage->szName) == sizeof(pWrappedModInfo->szName));
5727 cchName = strlen(pWrappedModInfo->szName);
5728
5729 /* Image range: */
5730 AssertPtrReturn(pWrappedModInfo->pvImageStart, VERR_INVALID_POINTER);
5731 AssertPtrReturn(pWrappedModInfo->pvImageEnd, VERR_INVALID_POINTER);
5732 AssertReturn((uintptr_t)pWrappedModInfo->pvImageEnd > (uintptr_t)pWrappedModInfo->pvImageStart, VERR_INVALID_PARAMETER);
5733
5734 /* Symbol table: */
5735 AssertMsgReturn(pWrappedModInfo->cSymbols <= _8K, ("Too many symbols: %u, max 8192\n", pWrappedModInfo->cSymbols),
5736 VERR_TOO_MANY_SYMLINKS);
5737 pszPrevSymbol = "\x7f";
5738 paSymbols = pWrappedModInfo->paSymbols;
5739 idx = pWrappedModInfo->cSymbols;
5740 while (idx-- > 0)
5741 {
5742 const char *pszSymbol = paSymbols[idx].pszSymbol;
5743 AssertMsgReturn(RT_VALID_PTR(pszSymbol) && RT_VALID_PTR(paSymbols[idx].pfnValue),
5744 ("paSymbols[%u]: %p/%p\n", idx, pszSymbol, paSymbols[idx].pfnValue),
5745 VERR_INVALID_POINTER);
5746 AssertReturn(*pszSymbol != '\0', VERR_EMPTY_STRING);
5747 AssertMsgReturn(strcmp(pszSymbol, pszPrevSymbol) < 0,
5748 ("symbol table out of order at index %u: '%s' vs '%s'\n", idx, pszSymbol, pszPrevSymbol),
5749 VERR_WRONG_ORDER);
5750 pszPrevSymbol = pszSymbol;
5751 }
5752
5753 /* Standard entry points: */
5754 AssertPtrNullReturn(pWrappedModInfo->pfnModuleInit, VERR_INVALID_POINTER);
5755 AssertPtrNullReturn(pWrappedModInfo->pfnModuleTerm, VERR_INVALID_POINTER);
5756 AssertReturn((uintptr_t)pWrappedModInfo->pfnModuleInit != (uintptr_t)pWrappedModInfo->pfnModuleTerm || pWrappedModInfo->pfnModuleInit == NULL,
5757 VERR_INVALID_PARAMETER);
5758 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
5759 {
5760 AssertReturn(pWrappedModInfo->pfnServiceReqHandler == NULL, VERR_INVALID_PARAMETER);
5761 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryFast, VERR_INVALID_POINTER);
5762 AssertPtrReturn(pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_POINTER);
5763 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast != pWrappedModInfo->pfnVMMR0EntryEx, VERR_INVALID_PARAMETER);
5764 }
5765 else
5766 {
5767 AssertPtrNullReturn(pWrappedModInfo->pfnServiceReqHandler, VERR_INVALID_POINTER);
5768 AssertReturn(pWrappedModInfo->pfnVMMR0EntryFast == NULL, VERR_INVALID_PARAMETER);
5769 AssertReturn(pWrappedModInfo->pfnVMMR0EntryEx == NULL, VERR_INVALID_PARAMETER);
5770 }
5771
5772 /*
5773 * Check if we got an instance of the image already.
5774 */
5775 supdrvLdrLock(pDevExt);
5776 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5777 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5778 {
5779 if ( pImage->szName[cchName] == '\0'
5780 && !memcmp(pImage->szName, pWrappedModInfo->szName, cchName))
5781 {
5782 supdrvLdrUnlock(pDevExt);
5783 Log(("supdrvLdrRegisterWrappedModule: '%s' already loaded!\n", pWrappedModInfo->szName));
5784 return VERR_ALREADY_LOADED;
5785 }
5786 }
5787 /* (not found - add it!) */
5788
5789 /* If the loader interface is locked down, make userland fail early */
5790 if (pDevExt->fLdrLockedDown)
5791 {
5792 supdrvLdrUnlock(pDevExt);
5793 Log(("supdrvLdrRegisterWrappedModule: Not adding '%s' to image list, loader interface is locked down!\n", pWrappedModInfo->szName));
5794 return VERR_PERMISSION_DENIED;
5795 }
5796
5797 /* Only one VMMR0: */
5798 if ( pDevExt->pvVMMR0 != NULL
5799 && (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0))
5800 {
5801 supdrvLdrUnlock(pDevExt);
5802 Log(("supdrvLdrRegisterWrappedModule: Rejecting '%s' as we already got a VMMR0 module!\n", pWrappedModInfo->szName));
5803 return VERR_ALREADY_EXISTS;
5804 }
5805
5806 /*
5807 * Allocate memory.
5808 */
5809 Assert(cchName < sizeof(pImage->szName));
5810 pImage = (PSUPDRVLDRIMAGE)RTMemAllocZ(sizeof(SUPDRVLDRIMAGE));
5811 if (!pImage)
5812 {
5813 supdrvLdrUnlock(pDevExt);
5814 Log(("supdrvLdrRegisterWrappedModule: RTMemAllocZ() failed\n"));
5815 return VERR_NO_MEMORY;
5816 }
5817 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5818
5819 /*
5820 * Setup and link in the LDR stuff.
5821 */
5822 pImage->pvImage = (void *)pWrappedModInfo->pvImageStart;
5823#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5824 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5825#else
5826 pImage->pvImageAlloc = NULL;
5827#endif
5828 pImage->cbImageWithEverything
5829 = pImage->cbImageBits = (uintptr_t)pWrappedModInfo->pvImageEnd - (uintptr_t)pWrappedModInfo->pvImageStart;
5830 pImage->cSymbols = 0;
5831 pImage->paSymbols = NULL;
5832 pImage->pachStrTab = NULL;
5833 pImage->cbStrTab = 0;
5834 pImage->cSegments = 0;
5835 pImage->paSegments = NULL;
5836 pImage->pfnModuleInit = pWrappedModInfo->pfnModuleInit;
5837 pImage->pfnModuleTerm = pWrappedModInfo->pfnModuleTerm;
5838 pImage->pfnServiceReqHandler = NULL; /* Only setting this after module init */
5839 pImage->uState = SUP_IOCTL_LDR_LOAD;
5840 pImage->cImgUsage = 1; /* Held by the wrapper module till unload. */
5841 pImage->pDevExt = pDevExt;
5842 pImage->pImageImport = NULL;
5843 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5844 pImage->pWrappedModInfo = pWrappedModInfo;
5845 pImage->pvWrappedNative = pvNative;
5846 pImage->fNative = true;
5847 memcpy(pImage->szName, pWrappedModInfo->szName, cchName + 1);
5848
5849 /*
5850 * Link it.
5851 */
5852 pImage->pNext = pDevExt->pLdrImages;
5853 pDevExt->pLdrImages = pImage;
5854
5855 /*
5856 * Call module init function if found.
5857 */
5858 rc = VINF_SUCCESS;
5859 if (pImage->pfnModuleInit)
5860 {
5861 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5862 pDevExt->pLdrInitImage = pImage;
5863 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5864 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5865 rc = pImage->pfnModuleInit(pImage);
5866 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5867 pDevExt->pLdrInitImage = NULL;
5868 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5869 }
5870 if (RT_SUCCESS(rc))
5871 {
5872 /*
5873 * Update entry points.
5874 */
5875 if (pWrappedModInfo->fFlags & SUPLDRWRAPPEDMODULE_F_VMMR0)
5876 {
5877 Assert(!pDevExt->pvVMMR0);
5878 Assert(!pDevExt->pfnVMMR0EntryFast);
5879 Assert(!pDevExt->pfnVMMR0EntryEx);
5880 ASMAtomicWritePtrVoid(&pDevExt->pvVMMR0, pImage->pvImage);
5881 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryFast,
5882 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryFast);
5883 ASMAtomicWritePtrVoid((void * volatile *)(uintptr_t)&pDevExt->pfnVMMR0EntryEx,
5884 (void *)(uintptr_t) pWrappedModInfo->pfnVMMR0EntryEx);
5885 }
5886 else
5887 pImage->pfnServiceReqHandler = pWrappedModInfo->pfnServiceReqHandler;
5888#ifdef IN_RING3
5889# error "WTF?"
5890#endif
5891 *phMod = pImage;
5892 }
5893 else
5894 {
5895 /*
5896 * Module init failed - bail, no module term callout.
5897 */
5898 SUPR0Printf("ModuleInit failed for '%s': %Rrc\n", pImage->szName, rc);
5899
5900 pImage->pfnModuleTerm = NULL;
5901 pImage->uState = SUP_IOCTL_LDR_OPEN;
5902 supdrvLdrFree(pDevExt, pImage);
5903 }
5904
5905 supdrvLdrUnlock(pDevExt);
5906 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5907 return VINF_SUCCESS;
5908}
5909
5910
5911/**
5912 * Decrements SUPDRVLDRIMAGE::cImgUsage when two or greater.
5913 *
5914 * @param pDevExt Device globals.
5915 * @param pImage The image.
5916 * @param cReference Number of references being removed.
5917 */
5918DECLINLINE(void) supdrvLdrSubtractUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, uint32_t cReference)
5919{
5920 Assert(cReference > 0);
5921 Assert(pImage->cImgUsage > cReference);
5922 pImage->cImgUsage -= cReference;
5923 if (pImage->cImgUsage == 1 && pImage->pWrappedModInfo)
5924 supdrvOSLdrReleaseWrapperModule(pDevExt, pImage);
5925}
5926
5927
5928/**
5929 * Frees a previously loaded (prep'ed) image.
5930 *
5931 * @returns IPRT status code.
5932 * @param pDevExt Device globals.
5933 * @param pSession Session data.
5934 * @param pReq The request.
5935 */
5936static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
5937{
5938 int rc;
5939 PSUPDRVLDRUSAGE pUsagePrev;
5940 PSUPDRVLDRUSAGE pUsage;
5941 PSUPDRVLDRIMAGE pImage;
5942 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
5943
5944 /*
5945 * Find the ldr image.
5946 */
5947 supdrvLdrLock(pDevExt);
5948 pUsagePrev = NULL;
5949 pUsage = pSession->pLdrUsage;
5950 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5951 {
5952 pUsagePrev = pUsage;
5953 pUsage = pUsage->pNext;
5954 }
5955 if (!pUsage)
5956 {
5957 supdrvLdrUnlock(pDevExt);
5958 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
5959 return VERR_INVALID_HANDLE;
5960 }
5961 if (pUsage->cRing3Usage == 0)
5962 {
5963 supdrvLdrUnlock(pDevExt);
5964 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
5965 return VERR_CALLER_NO_REFERENCE;
5966 }
5967
5968 /*
5969 * Check if we can remove anything.
5970 */
5971 rc = VINF_SUCCESS;
5972 pImage = pUsage->pImage;
5973 Log(("SUP_IOCTL_LDR_FREE: pImage=%p %s cImgUsage=%d r3=%d r0=%u\n",
5974 pImage, pImage->szName, pImage->cImgUsage, pUsage->cRing3Usage, pUsage->cRing0Usage));
5975 if (pImage->cImgUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
5976 {
5977 /*
5978 * Check if there are any objects with destructors in the image, if
5979 * so leave it for the session cleanup routine so we get a chance to
5980 * clean things up in the right order and not leave them all dangling.
5981 */
5982 RTSpinlockAcquire(pDevExt->Spinlock);
5983 if (pImage->cImgUsage <= 1)
5984 {
5985 PSUPDRVOBJ pObj;
5986 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5987 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5988 {
5989 rc = VERR_DANGLING_OBJECTS;
5990 break;
5991 }
5992 }
5993 else
5994 {
5995 PSUPDRVUSAGE pGenUsage;
5996 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5997 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5998 {
5999 rc = VERR_DANGLING_OBJECTS;
6000 break;
6001 }
6002 }
6003 RTSpinlockRelease(pDevExt->Spinlock);
6004 if (rc == VINF_SUCCESS)
6005 {
6006 /* unlink it */
6007 if (pUsagePrev)
6008 pUsagePrev->pNext = pUsage->pNext;
6009 else
6010 pSession->pLdrUsage = pUsage->pNext;
6011
6012 /* free it */
6013 pUsage->pImage = NULL;
6014 pUsage->pNext = NULL;
6015 RTMemFree(pUsage);
6016
6017 /*
6018 * Dereference the image.
6019 */
6020 if (pImage->cImgUsage <= 1)
6021 supdrvLdrFree(pDevExt, pImage);
6022 else
6023 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6024 }
6025 else
6026 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
6027 }
6028 else
6029 {
6030 /*
6031 * Dereference both image and usage.
6032 */
6033 pUsage->cRing3Usage--;
6034 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6035 }
6036
6037 supdrvLdrUnlock(pDevExt);
6038 return rc;
6039}
6040
6041
6042/**
6043 * Deregisters a wrapped .r0 module.
6044 *
6045 * @param pDevExt Device globals.
6046 * @param pWrappedModInfo The wrapped module info.
6047 * @param phMod Where to store the module is stored (NIL'ed on
6048 * success).
6049 */
6050int VBOXCALL supdrvLdrDeregisterWrappedModule(PSUPDRVDEVEXT pDevExt, PCSUPLDRWRAPPEDMODULE pWrappedModInfo, void **phMod)
6051{
6052 PSUPDRVLDRIMAGE pImage;
6053 uint32_t cSleeps;
6054
6055 /*
6056 * Validate input.
6057 */
6058 AssertPtrReturn(pWrappedModInfo, VERR_INVALID_POINTER);
6059 AssertMsgReturn(pWrappedModInfo->uMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6060 ("uMagic=%#x, expected %#x\n", pWrappedModInfo->uMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6061 VERR_INVALID_MAGIC);
6062 AssertMsgReturn(pWrappedModInfo->uEndMagic == SUPLDRWRAPPEDMODULE_MAGIC,
6063 ("uEndMagic=%#x, expected %#x\n", pWrappedModInfo->uEndMagic, SUPLDRWRAPPEDMODULE_MAGIC),
6064 VERR_INVALID_MAGIC);
6065
6066 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6067 pImage = *(PSUPDRVLDRIMAGE *)phMod;
6068 if (!pImage)
6069 return VINF_SUCCESS;
6070 AssertPtrReturn(pImage, VERR_INVALID_POINTER);
6071 AssertMsgReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, ("pImage=%p uMagic=%#x\n", pImage, pImage->uMagic),
6072 VERR_INVALID_MAGIC);
6073 AssertMsgReturn(pImage->pvImage == pWrappedModInfo->pvImageStart,
6074 ("pWrappedModInfo(%p)->pvImageStart=%p vs. pImage(=%p)->pvImage=%p\n",
6075 pWrappedModInfo, pWrappedModInfo->pvImageStart, pImage, pImage->pvImage),
6076 VERR_MISMATCH);
6077
6078 AssertPtrReturn(pDevExt, VERR_INVALID_POINTER);
6079
6080 /*
6081 * Try free it, but first we have to wait for its usage count to reach 1 (our).
6082 */
6083 supdrvLdrLock(pDevExt);
6084 for (cSleeps = 0; ; cSleeps++)
6085 {
6086 PSUPDRVLDRIMAGE pCur;
6087
6088 /* Check that the image is in the list. */
6089 for (pCur = pDevExt->pLdrImages; pCur; pCur = pCur->pNext)
6090 if (pCur == pImage)
6091 break;
6092 AssertBreak(pCur == pImage);
6093
6094 /* Anyone still using it? */
6095 if (pImage->cImgUsage <= 1)
6096 break;
6097
6098 /* Someone is using it, wait and check again. */
6099 if (!(cSleeps % 60))
6100 SUPR0Printf("supdrvLdrUnregisterWrappedModule: Still %u users of wrapped image '%s' ...\n",
6101 pImage->cImgUsage, pImage->szName);
6102 supdrvLdrUnlock(pDevExt);
6103 RTThreadSleep(1000);
6104 supdrvLdrLock(pDevExt);
6105 }
6106
6107 /* We're the last 'user', free it. */
6108 supdrvLdrFree(pDevExt, pImage);
6109
6110 supdrvLdrUnlock(pDevExt);
6111
6112 *phMod = NULL;
6113 return VINF_SUCCESS;
6114}
6115
6116
6117/**
6118 * Lock down the image loader interface.
6119 *
6120 * @returns IPRT status code.
6121 * @param pDevExt Device globals.
6122 */
6123static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
6124{
6125 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
6126
6127 supdrvLdrLock(pDevExt);
6128 if (!pDevExt->fLdrLockedDown)
6129 {
6130 pDevExt->fLdrLockedDown = true;
6131 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
6132 }
6133 supdrvLdrUnlock(pDevExt);
6134
6135 return VINF_SUCCESS;
6136}
6137
6138
6139/**
6140 * Worker for getting the address of a symbol in an image.
6141 *
6142 * @returns IPRT status code.
6143 * @param pDevExt Device globals.
6144 * @param pImage The image to search.
6145 * @param pszSymbol The symbol name.
6146 * @param cchSymbol The length of the symbol name.
6147 * @param ppvValue Where to return the symbol
6148 * @note Caller owns the loader lock.
6149 */
6150static int supdrvLdrQuerySymbolWorker(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage,
6151 const char *pszSymbol, size_t cchSymbol, void **ppvValue)
6152{
6153 int rc = VERR_SYMBOL_NOT_FOUND;
6154 if (pImage->fNative && !pImage->pWrappedModInfo)
6155 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cchSymbol, ppvValue);
6156 else if (pImage->fNative && pImage->pWrappedModInfo)
6157 {
6158 PCSUPLDRWRAPMODSYMBOL paSymbols = pImage->pWrappedModInfo->paSymbols;
6159 uint32_t iEnd = pImage->pWrappedModInfo->cSymbols;
6160 uint32_t iStart = 0;
6161 while (iStart < iEnd)
6162 {
6163 uint32_t const i = iStart + (iEnd - iStart) / 2;
6164 int const iDiff = strcmp(paSymbols[i].pszSymbol, pszSymbol);
6165 if (iDiff < 0)
6166 iStart = i + 1;
6167 else if (iDiff > 0)
6168 iEnd = i;
6169 else
6170 {
6171 *ppvValue = (void *)(uintptr_t)paSymbols[i].pfnValue;
6172 rc = VINF_SUCCESS;
6173 break;
6174 }
6175 }
6176#ifdef VBOX_STRICT
6177 if (rc != VINF_SUCCESS)
6178 for (iStart = 0, iEnd = pImage->pWrappedModInfo->cSymbols; iStart < iEnd; iStart++)
6179 Assert(strcmp(paSymbols[iStart].pszSymbol, pszSymbol));
6180#endif
6181 }
6182 else
6183 {
6184 const char *pchStrings = pImage->pachStrTab;
6185 PSUPLDRSYM paSyms = pImage->paSymbols;
6186 uint32_t i;
6187 Assert(!pImage->pWrappedModInfo);
6188 for (i = 0; i < pImage->cSymbols; i++)
6189 {
6190 if ( paSyms[i].offName + cchSymbol + 1 <= pImage->cbStrTab
6191 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cchSymbol + 1))
6192 {
6193 /*
6194 * Note! The int32_t is for native loading on solaris where the data
6195 * and text segments are in very different places.
6196 */
6197 *ppvValue = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
6198 rc = VINF_SUCCESS;
6199 break;
6200 }
6201 }
6202 }
6203 return rc;
6204}
6205
6206
6207/**
6208 * Queries the address of a symbol in an open image.
6209 *
6210 * @returns IPRT status code.
6211 * @param pDevExt Device globals.
6212 * @param pSession Session data.
6213 * @param pReq The request buffer.
6214 */
6215static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
6216{
6217 PSUPDRVLDRIMAGE pImage;
6218 PSUPDRVLDRUSAGE pUsage;
6219 const size_t cchSymbol = strlen(pReq->u.In.szSymbol);
6220 void *pvSymbol = NULL;
6221 int rc;
6222 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
6223
6224 /*
6225 * Find the ldr image.
6226 */
6227 supdrvLdrLock(pDevExt);
6228
6229 pUsage = pSession->pLdrUsage;
6230 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
6231 pUsage = pUsage->pNext;
6232 if (pUsage)
6233 {
6234 pImage = pUsage->pImage;
6235 if (pImage->uState == SUP_IOCTL_LDR_LOAD)
6236 {
6237 /*
6238 * Search the image exports / symbol strings.
6239 */
6240 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pReq->u.In.szSymbol, cchSymbol, &pvSymbol);
6241 }
6242 else
6243 {
6244 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", pImage->uState, pImage->uState));
6245 rc = VERR_WRONG_ORDER;
6246 }
6247 }
6248 else
6249 {
6250 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
6251 rc = VERR_INVALID_HANDLE;
6252 }
6253
6254 supdrvLdrUnlock(pDevExt);
6255
6256 pReq->u.Out.pvSymbol = pvSymbol;
6257 return rc;
6258}
6259
6260
6261/**
6262 * Gets the address of a symbol in an open image or the support driver.
6263 *
6264 * @returns VBox status code.
6265 * @param pDevExt Device globals.
6266 * @param pSession Session data.
6267 * @param pReq The request buffer.
6268 */
6269static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
6270{
6271 const char *pszSymbol = pReq->u.In.pszSymbol;
6272 const char *pszModule = pReq->u.In.pszModule;
6273 size_t cchSymbol;
6274 char const *pszEnd;
6275 uint32_t i;
6276 int rc;
6277
6278 /*
6279 * Input validation.
6280 */
6281 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
6282 pszEnd = RTStrEnd(pszSymbol, 512);
6283 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6284 cchSymbol = pszEnd - pszSymbol;
6285
6286 if (pszModule)
6287 {
6288 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
6289 pszEnd = RTStrEnd(pszModule, 64);
6290 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
6291 }
6292 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
6293
6294 if ( !pszModule
6295 || !strcmp(pszModule, "SupDrv"))
6296 {
6297 /*
6298 * Search the support driver export table.
6299 */
6300 rc = VERR_SYMBOL_NOT_FOUND;
6301 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6302 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6303 {
6304 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
6305 rc = VINF_SUCCESS;
6306 break;
6307 }
6308 }
6309 else
6310 {
6311 /*
6312 * Find the loader image.
6313 */
6314 PSUPDRVLDRIMAGE pImage;
6315
6316 supdrvLdrLock(pDevExt);
6317
6318 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6319 if (!strcmp(pImage->szName, pszModule))
6320 break;
6321 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
6322 {
6323 /*
6324 * Search the image exports / symbol strings. Do usage counting on the session.
6325 */
6326 rc = supdrvLdrQuerySymbolWorker(pDevExt, pImage, pszSymbol, cchSymbol, (void **)&pReq->u.Out.pfnSymbol);
6327 if (RT_SUCCESS(rc))
6328 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, true /*fRing3Usage*/);
6329 }
6330 else
6331 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
6332
6333 supdrvLdrUnlock(pDevExt);
6334 }
6335 return rc;
6336}
6337
6338
6339/**
6340 * Looks up a symbol in g_aFunctions
6341 *
6342 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
6343 * @param pszSymbol The symbol to look up.
6344 * @param puValue Where to return the value.
6345 */
6346int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
6347{
6348 uint32_t i;
6349 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
6350 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
6351 {
6352 *puValue = (uintptr_t)g_aFunctions[i].pfn;
6353 return VINF_SUCCESS;
6354 }
6355
6356 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
6357 {
6358 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
6359 return VINF_SUCCESS;
6360 }
6361
6362 return VERR_SYMBOL_NOT_FOUND;
6363}
6364
6365
6366/**
6367 * Adds a usage reference in the specified session of an image.
6368 *
6369 * Called while owning the loader semaphore.
6370 *
6371 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
6372 * @param pDevExt Pointer to device extension.
6373 * @param pSession Session in question.
6374 * @param pImage Image which the session is using.
6375 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
6376 */
6377static int supdrvLdrAddUsage(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
6378{
6379 PSUPDRVLDRUSAGE pUsage;
6380 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
6381
6382 /*
6383 * Referenced it already?
6384 */
6385 pUsage = pSession->pLdrUsage;
6386 while (pUsage)
6387 {
6388 if (pUsage->pImage == pImage)
6389 {
6390 if (fRing3Usage)
6391 pUsage->cRing3Usage++;
6392 else
6393 pUsage->cRing0Usage++;
6394 Assert(pImage->cImgUsage > 1 || !pImage->pWrappedModInfo);
6395 pImage->cImgUsage++;
6396 return VINF_SUCCESS;
6397 }
6398 pUsage = pUsage->pNext;
6399 }
6400
6401 /*
6402 * Allocate new usage record.
6403 */
6404 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
6405 AssertReturn(pUsage, VERR_NO_MEMORY);
6406 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
6407 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
6408 pUsage->pImage = pImage;
6409 pUsage->pNext = pSession->pLdrUsage;
6410 pSession->pLdrUsage = pUsage;
6411
6412 /*
6413 * Wrapped modules needs to retain a native module reference.
6414 */
6415 pImage->cImgUsage++;
6416 if (pImage->cImgUsage == 2 && pImage->pWrappedModInfo)
6417 supdrvOSLdrRetainWrapperModule(pDevExt, pImage);
6418
6419 return VINF_SUCCESS;
6420}
6421
6422
6423/**
6424 * Frees a load image.
6425 *
6426 * @param pDevExt Pointer to device extension.
6427 * @param pImage Pointer to the image we're gonna free.
6428 * This image must exit!
6429 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
6430 */
6431static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
6432{
6433 unsigned cLoops;
6434 for (cLoops = 0; ; cLoops++)
6435 {
6436 PSUPDRVLDRIMAGE pImagePrev;
6437 PSUPDRVLDRIMAGE pImageImport;
6438 LogFlow(("supdrvLdrFree: pImage=%p %s [loop %u]\n", pImage, pImage->szName, cLoops));
6439 AssertBreak(cLoops < 2);
6440
6441 /*
6442 * Warn if we're releasing images while the image loader interface is
6443 * locked down -- we won't be able to reload them!
6444 */
6445 if (pDevExt->fLdrLockedDown)
6446 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6447
6448 /* find it - arg. should've used doubly linked list. */
6449 Assert(pDevExt->pLdrImages);
6450 pImagePrev = NULL;
6451 if (pDevExt->pLdrImages != pImage)
6452 {
6453 pImagePrev = pDevExt->pLdrImages;
6454 while (pImagePrev->pNext != pImage)
6455 pImagePrev = pImagePrev->pNext;
6456 Assert(pImagePrev->pNext == pImage);
6457 }
6458
6459 /* unlink */
6460 if (pImagePrev)
6461 pImagePrev->pNext = pImage->pNext;
6462 else
6463 pDevExt->pLdrImages = pImage->pNext;
6464
6465 /* check if this is VMMR0.r0 unset its entry point pointers. */
6466 if (pDevExt->pvVMMR0 == pImage->pvImage)
6467 {
6468 pDevExt->pvVMMR0 = NULL;
6469 pDevExt->pfnVMMR0EntryFast = NULL;
6470 pDevExt->pfnVMMR0EntryEx = NULL;
6471 }
6472
6473 /* check for objects with destructors in this image. (Shouldn't happen.) */
6474 if (pDevExt->pObjs)
6475 {
6476 unsigned cObjs = 0;
6477 PSUPDRVOBJ pObj;
6478 RTSpinlockAcquire(pDevExt->Spinlock);
6479 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6480 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6481 {
6482 pObj->pfnDestructor = NULL;
6483 cObjs++;
6484 }
6485 RTSpinlockRelease(pDevExt->Spinlock);
6486 if (cObjs)
6487 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6488 }
6489
6490 /* call termination function if fully loaded. */
6491 if ( pImage->pfnModuleTerm
6492 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6493 {
6494 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6495 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6496 pImage->pfnModuleTerm(pImage);
6497 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6498 }
6499
6500 /* Inform the tracing component. */
6501 supdrvTracerModuleUnloading(pDevExt, pImage);
6502
6503 /* Do native unload if appropriate, then inform the native code about the
6504 unloading (mainly for non-native loading case). */
6505 if (pImage->fNative)
6506 supdrvOSLdrUnload(pDevExt, pImage);
6507 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6508
6509 /* free the image */
6510 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6511 pImage->cImgUsage = 0;
6512 pImage->pDevExt = NULL;
6513 pImage->pNext = NULL;
6514 pImage->uState = SUP_IOCTL_LDR_FREE;
6515#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
6516 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6517 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6518#else
6519 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
6520 pImage->pvImageAlloc = NULL;
6521#endif
6522 pImage->pvImage = NULL;
6523 RTMemFree(pImage->pachStrTab);
6524 pImage->pachStrTab = NULL;
6525 RTMemFree(pImage->paSymbols);
6526 pImage->paSymbols = NULL;
6527 RTMemFree(pImage->paSegments);
6528 pImage->paSegments = NULL;
6529
6530 pImageImport = pImage->pImageImport;
6531 pImage->pImageImport = NULL;
6532
6533 RTMemFree(pImage);
6534
6535 /*
6536 * Deal with any import image.
6537 */
6538 if (!pImageImport)
6539 break;
6540 if (pImageImport->cImgUsage > 1)
6541 {
6542 supdrvLdrSubtractUsage(pDevExt, pImageImport, 1);
6543 break;
6544 }
6545 pImage = pImageImport;
6546 }
6547}
6548
6549
6550/**
6551 * Acquires the loader lock.
6552 *
6553 * @returns IPRT status code.
6554 * @param pDevExt The device extension.
6555 * @note Not recursive on all platforms yet.
6556 */
6557DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6558{
6559#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6560 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6561#else
6562 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6563#endif
6564 AssertRC(rc);
6565 return rc;
6566}
6567
6568
6569/**
6570 * Releases the loader lock.
6571 *
6572 * @returns IPRT status code.
6573 * @param pDevExt The device extension.
6574 */
6575DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6576{
6577#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6578 return RTSemMutexRelease(pDevExt->mtxLdr);
6579#else
6580 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6581#endif
6582}
6583
6584
6585/**
6586 * Acquires the global loader lock.
6587 *
6588 * This can be useful when accessing structures being modified by the ModuleInit
6589 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6590 *
6591 * @returns VBox status code.
6592 * @param pSession The session doing the locking.
6593 *
6594 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6595 */
6596SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6597{
6598 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6599 return supdrvLdrLock(pSession->pDevExt);
6600}
6601SUPR0_EXPORT_SYMBOL(SUPR0LdrLock);
6602
6603
6604/**
6605 * Releases the global loader lock.
6606 *
6607 * Must correspond to a SUPR0LdrLock call!
6608 *
6609 * @returns VBox status code.
6610 * @param pSession The session doing the locking.
6611 *
6612 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6613 */
6614SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6615{
6616 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6617 return supdrvLdrUnlock(pSession->pDevExt);
6618}
6619SUPR0_EXPORT_SYMBOL(SUPR0LdrUnlock);
6620
6621
6622/**
6623 * For checking lock ownership in Assert() statements during ModuleInit and
6624 * ModuleTerm.
6625 *
6626 * @returns Whether we own the loader lock or not.
6627 * @param hMod The module in question.
6628 * @param fWantToHear For hosts where it is difficult to know who owns the
6629 * lock, this will be returned instead.
6630 */
6631SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6632{
6633 PSUPDRVDEVEXT pDevExt;
6634 RTNATIVETHREAD hOwner;
6635
6636 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6637 AssertPtrReturn(pImage, fWantToHear);
6638 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6639
6640 pDevExt = pImage->pDevExt;
6641 AssertPtrReturn(pDevExt, fWantToHear);
6642
6643 /*
6644 * Expecting this to be called at init/term time only, so this will be sufficient.
6645 */
6646 hOwner = pDevExt->hLdrInitThread;
6647 if (hOwner == NIL_RTNATIVETHREAD)
6648 hOwner = pDevExt->hLdrTermThread;
6649 if (hOwner != NIL_RTNATIVETHREAD)
6650 return hOwner == RTThreadNativeSelf();
6651
6652 /*
6653 * Neither of the two semaphore variants currently offers very good
6654 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6655 */
6656#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6657 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6658#else
6659 return fWantToHear;
6660#endif
6661}
6662SUPR0_EXPORT_SYMBOL(SUPR0LdrIsLockOwnerByMod);
6663
6664
6665/**
6666 * Locates and retains the given module for ring-0 usage.
6667 *
6668 * @returns VBox status code.
6669 * @param pSession The session to associate the module reference with.
6670 * @param pszName The module name (no path).
6671 * @param phMod Where to return the module handle. The module is
6672 * referenced and a call to SUPR0LdrModRelease() is
6673 * necessary when done with it.
6674 */
6675SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6676{
6677 int rc;
6678 size_t cchName;
6679 PSUPDRVDEVEXT pDevExt;
6680
6681 /*
6682 * Validate input.
6683 */
6684 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6685 *phMod = NULL;
6686 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6687 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6688 cchName = strlen(pszName);
6689 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6690 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6691
6692 /*
6693 * Do the lookup.
6694 */
6695 pDevExt = pSession->pDevExt;
6696 rc = supdrvLdrLock(pDevExt);
6697 if (RT_SUCCESS(rc))
6698 {
6699 PSUPDRVLDRIMAGE pImage;
6700 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6701 {
6702 if ( pImage->szName[cchName] == '\0'
6703 && !memcmp(pImage->szName, pszName, cchName))
6704 {
6705 /*
6706 * Check the state and make sure we don't overflow the reference counter before return it.
6707 */
6708 uint32_t uState = pImage->uState;
6709 if (uState == SUP_IOCTL_LDR_LOAD)
6710 {
6711 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6712 {
6713 supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6714 *phMod = pImage;
6715 supdrvLdrUnlock(pDevExt);
6716 return VINF_SUCCESS;
6717 }
6718 supdrvLdrUnlock(pDevExt);
6719 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6720 return VERR_TOO_MANY_REFERENCES;
6721 }
6722 supdrvLdrUnlock(pDevExt);
6723 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6724 return VERR_INVALID_STATE;
6725 }
6726 }
6727 supdrvLdrUnlock(pDevExt);
6728 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6729 rc = VERR_MODULE_NOT_FOUND;
6730 }
6731 return rc;
6732}
6733SUPR0_EXPORT_SYMBOL(SUPR0LdrModByName);
6734
6735
6736/**
6737 * Retains a ring-0 module reference.
6738 *
6739 * Release reference when done by calling SUPR0LdrModRelease().
6740 *
6741 * @returns VBox status code.
6742 * @param pSession The session to reference the module in. A usage
6743 * record is added if needed.
6744 * @param hMod The handle to the module to retain.
6745 */
6746SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6747{
6748 PSUPDRVDEVEXT pDevExt;
6749 PSUPDRVLDRIMAGE pImage;
6750 int rc;
6751
6752 /* Validate input a little. */
6753 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6754 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6755 pImage = (PSUPDRVLDRIMAGE)hMod;
6756 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6757
6758 /* Reference the module: */
6759 pDevExt = pSession->pDevExt;
6760 rc = supdrvLdrLock(pDevExt);
6761 if (RT_SUCCESS(rc))
6762 {
6763 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6764 {
6765 if (RT_LIKELY(pImage->cImgUsage < UINT32_MAX / 2U))
6766 rc = supdrvLdrAddUsage(pDevExt, pSession, pImage, false /*fRing3Usage*/);
6767 else
6768 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6769 }
6770 else
6771 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6772 supdrvLdrUnlock(pDevExt);
6773 }
6774 return rc;
6775}
6776SUPR0_EXPORT_SYMBOL(SUPR0LdrModRetain);
6777
6778
6779/**
6780 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6781 * SUPR0LdrModRetain().
6782 *
6783 * @returns VBox status code.
6784 * @param pSession The session that the module was retained in.
6785 * @param hMod The module handle. NULL is silently ignored.
6786 */
6787SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6788{
6789 PSUPDRVDEVEXT pDevExt;
6790 PSUPDRVLDRIMAGE pImage;
6791 int rc;
6792
6793 /*
6794 * Validate input.
6795 */
6796 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6797 if (!hMod)
6798 return VINF_SUCCESS;
6799 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6800 pImage = (PSUPDRVLDRIMAGE)hMod;
6801 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6802
6803 /*
6804 * Take the loader lock and revalidate the module:
6805 */
6806 pDevExt = pSession->pDevExt;
6807 rc = supdrvLdrLock(pDevExt);
6808 if (RT_SUCCESS(rc))
6809 {
6810 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6811 {
6812 /*
6813 * Find the usage record for the module:
6814 */
6815 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6816 PSUPDRVLDRUSAGE pUsage;
6817
6818 rc = VERR_MODULE_NOT_FOUND;
6819 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6820 {
6821 if (pUsage->pImage == pImage)
6822 {
6823 /*
6824 * Drop a ring-0 reference:
6825 */
6826 Assert(pImage->cImgUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6827 if (pUsage->cRing0Usage > 0)
6828 {
6829 if (pImage->cImgUsage > 1)
6830 {
6831 pUsage->cRing0Usage -= 1;
6832 supdrvLdrSubtractUsage(pDevExt, pImage, 1);
6833 rc = VINF_SUCCESS;
6834 }
6835 else
6836 {
6837 Assert(!pImage->pWrappedModInfo /* (The wrapper kmod has the last reference.) */);
6838 supdrvLdrFree(pDevExt, pImage);
6839
6840 if (pPrevUsage)
6841 pPrevUsage->pNext = pUsage->pNext;
6842 else
6843 pSession->pLdrUsage = pUsage->pNext;
6844 pUsage->pNext = NULL;
6845 pUsage->pImage = NULL;
6846 pUsage->cRing0Usage = 0;
6847 pUsage->cRing3Usage = 0;
6848 RTMemFree(pUsage);
6849
6850 rc = VINF_OBJECT_DESTROYED;
6851 }
6852 }
6853 else
6854 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6855 break;
6856 }
6857 pPrevUsage = pUsage;
6858 }
6859 }
6860 else
6861 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6862 supdrvLdrUnlock(pDevExt);
6863 }
6864 return rc;
6865
6866}
6867SUPR0_EXPORT_SYMBOL(SUPR0LdrModRelease);
6868
6869
6870/**
6871 * Implements the service call request.
6872 *
6873 * @returns VBox status code.
6874 * @param pDevExt The device extension.
6875 * @param pSession The calling session.
6876 * @param pReq The request packet, valid.
6877 */
6878static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6879{
6880#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
6881 int rc;
6882
6883 /*
6884 * Find the module first in the module referenced by the calling session.
6885 */
6886 rc = supdrvLdrLock(pDevExt);
6887 if (RT_SUCCESS(rc))
6888 {
6889 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
6890 PSUPDRVLDRUSAGE pUsage;
6891
6892 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6893 if ( pUsage->pImage->pfnServiceReqHandler
6894 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
6895 {
6896 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
6897 break;
6898 }
6899 supdrvLdrUnlock(pDevExt);
6900
6901 if (pfnServiceReqHandler)
6902 {
6903 /*
6904 * Call it.
6905 */
6906 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
6907 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
6908 else
6909 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
6910 }
6911 else
6912 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
6913 }
6914
6915 /* log it */
6916 if ( RT_FAILURE(rc)
6917 && rc != VERR_INTERRUPTED
6918 && rc != VERR_TIMEOUT)
6919 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6920 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6921 else
6922 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6923 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6924 return rc;
6925#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6926 RT_NOREF3(pDevExt, pSession, pReq);
6927 return VERR_NOT_IMPLEMENTED;
6928#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6929}
6930
6931
6932/**
6933 * Implements the logger settings request.
6934 *
6935 * @returns VBox status code.
6936 * @param pReq The request.
6937 */
6938static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
6939{
6940 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
6941 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
6942 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
6943 PRTLOGGER pLogger = NULL;
6944 int rc;
6945
6946 /*
6947 * Some further validation.
6948 */
6949 switch (pReq->u.In.fWhat)
6950 {
6951 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6952 case SUPLOGGERSETTINGS_WHAT_CREATE:
6953 break;
6954
6955 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6956 if (*pszGroup || *pszFlags || *pszDest)
6957 return VERR_INVALID_PARAMETER;
6958 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
6959 return VERR_ACCESS_DENIED;
6960 break;
6961
6962 default:
6963 return VERR_INTERNAL_ERROR;
6964 }
6965
6966 /*
6967 * Get the logger.
6968 */
6969 switch (pReq->u.In.fWhich)
6970 {
6971 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6972 pLogger = RTLogGetDefaultInstance();
6973 break;
6974
6975 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6976 pLogger = RTLogRelGetDefaultInstance();
6977 break;
6978
6979 default:
6980 return VERR_INTERNAL_ERROR;
6981 }
6982
6983 /*
6984 * Do the job.
6985 */
6986 switch (pReq->u.In.fWhat)
6987 {
6988 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6989 if (pLogger)
6990 {
6991 rc = RTLogFlags(pLogger, pszFlags);
6992 if (RT_SUCCESS(rc))
6993 rc = RTLogGroupSettings(pLogger, pszGroup);
6994 NOREF(pszDest);
6995 }
6996 else
6997 rc = VERR_NOT_FOUND;
6998 break;
6999
7000 case SUPLOGGERSETTINGS_WHAT_CREATE:
7001 {
7002 if (pLogger)
7003 rc = VERR_ALREADY_EXISTS;
7004 else
7005 {
7006 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
7007
7008 rc = RTLogCreate(&pLogger,
7009 0 /* fFlags */,
7010 pszGroup,
7011 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
7012 ? "VBOX_LOG"
7013 : "VBOX_RELEASE_LOG",
7014 RT_ELEMENTS(s_apszGroups),
7015 s_apszGroups,
7016 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
7017 NULL);
7018 if (RT_SUCCESS(rc))
7019 {
7020 rc = RTLogFlags(pLogger, pszFlags);
7021 NOREF(pszDest);
7022 if (RT_SUCCESS(rc))
7023 {
7024 switch (pReq->u.In.fWhich)
7025 {
7026 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7027 pLogger = RTLogSetDefaultInstance(pLogger);
7028 break;
7029 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7030 pLogger = RTLogRelSetDefaultInstance(pLogger);
7031 break;
7032 }
7033 }
7034 RTLogDestroy(pLogger);
7035 }
7036 }
7037 break;
7038 }
7039
7040 case SUPLOGGERSETTINGS_WHAT_DESTROY:
7041 switch (pReq->u.In.fWhich)
7042 {
7043 case SUPLOGGERSETTINGS_WHICH_DEBUG:
7044 pLogger = RTLogSetDefaultInstance(NULL);
7045 break;
7046 case SUPLOGGERSETTINGS_WHICH_RELEASE:
7047 pLogger = RTLogRelSetDefaultInstance(NULL);
7048 break;
7049 }
7050 rc = RTLogDestroy(pLogger);
7051 break;
7052
7053 default:
7054 {
7055 rc = VERR_INTERNAL_ERROR;
7056 break;
7057 }
7058 }
7059
7060 return rc;
7061}
7062
7063
7064/**
7065 * Implements the MSR prober operations.
7066 *
7067 * @returns VBox status code.
7068 * @param pDevExt The device extension.
7069 * @param pReq The request.
7070 */
7071static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
7072{
7073#ifdef SUPDRV_WITH_MSR_PROBER
7074 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
7075 int rc;
7076
7077 switch (pReq->u.In.enmOp)
7078 {
7079 case SUPMSRPROBEROP_READ:
7080 {
7081 uint64_t uValue;
7082 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
7083 if (RT_SUCCESS(rc))
7084 {
7085 pReq->u.Out.uResults.Read.uValue = uValue;
7086 pReq->u.Out.uResults.Read.fGp = false;
7087 }
7088 else if (rc == VERR_ACCESS_DENIED)
7089 {
7090 pReq->u.Out.uResults.Read.uValue = 0;
7091 pReq->u.Out.uResults.Read.fGp = true;
7092 rc = VINF_SUCCESS;
7093 }
7094 break;
7095 }
7096
7097 case SUPMSRPROBEROP_WRITE:
7098 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
7099 if (RT_SUCCESS(rc))
7100 pReq->u.Out.uResults.Write.fGp = false;
7101 else if (rc == VERR_ACCESS_DENIED)
7102 {
7103 pReq->u.Out.uResults.Write.fGp = true;
7104 rc = VINF_SUCCESS;
7105 }
7106 break;
7107
7108 case SUPMSRPROBEROP_MODIFY:
7109 case SUPMSRPROBEROP_MODIFY_FASTER:
7110 rc = supdrvOSMsrProberModify(idCpu, pReq);
7111 break;
7112
7113 default:
7114 return VERR_INVALID_FUNCTION;
7115 }
7116 RT_NOREF1(pDevExt);
7117 return rc;
7118#else
7119 RT_NOREF2(pDevExt, pReq);
7120 return VERR_NOT_IMPLEMENTED;
7121#endif
7122}
7123
7124
7125/**
7126 * Resume built-in keyboard on MacBook Air and Pro hosts.
7127 * If there is no built-in keyboard device, return success anyway.
7128 *
7129 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
7130 */
7131static int supdrvIOCtl_ResumeSuspendedKbds(void)
7132{
7133#if defined(RT_OS_DARWIN)
7134 return supdrvDarwinResumeSuspendedKbds();
7135#else
7136 return VERR_NOT_IMPLEMENTED;
7137#endif
7138}
7139
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette