VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 85542

Last change on this file since 85542 was 85542, checked in by vboxsync, 4 years ago

IPRT/mp-r0drv-linux.c: Move the cpu set allocation & initialization out of the block running with preemption disabled. This hope to fix being on the wrong CPU when re-enabling preemption (seen once this morning after running supdrvTscMeasureDeltaOne, possibly involving the calling CPU).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 255.5 KB
Line 
1/* $Id: SUPDrv.cpp 85542 2020-07-30 09:05:38Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_vmx.h>
68
69#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
70# include "dtrace/SUPDrv.h"
71#else
72# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
73# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
74# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
75# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
76#endif
77
78/*
79 * Logging assignments:
80 * Log - useful stuff, like failures.
81 * LogFlow - program flow, except the really noisy bits.
82 * Log2 - Cleanup.
83 * Log3 - Loader flow noise.
84 * Log4 - Call VMMR0 flow noise.
85 * Log5 - Native yet-to-be-defined noise.
86 * Log6 - Native ioctl flow noise.
87 *
88 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
89 * instantiation in log-vbox.c(pp).
90 */
91
92
93/*********************************************************************************************************************************
94* Defined Constants And Macros *
95*********************************************************************************************************************************/
96/** @def VBOX_SVN_REV
97 * The makefile should define this if it can. */
98#ifndef VBOX_SVN_REV
99# define VBOX_SVN_REV 0
100#endif
101
102/** @ SUPDRV_CHECK_SMAP_SETUP
103 * SMAP check setup. */
104/** @def SUPDRV_CHECK_SMAP_CHECK
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
106 * will be logged and @a a_BadExpr is executed. */
107#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
108# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
109# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
110 do { \
111 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
112 { \
113 RTCCUINTREG fEfl = ASMGetFlags(); \
114 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
115 { /* likely */ } \
116 else \
117 { \
118 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
119 a_BadExpr; \
120 } \
121 } \
122 } while (0)
123#else
124# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
125# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
133static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
134static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
135static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
136static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
137static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
138static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
139static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
140static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
141static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
142static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
143static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
144static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
145static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
146DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
147DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
148static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
149static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
150static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
151static int supdrvIOCtl_ResumeSuspendedKbds(void);
152
153
154/*********************************************************************************************************************************
155* Global Variables *
156*********************************************************************************************************************************/
157/**
158 * Array of the R0 SUP API.
159 *
160 * While making changes to these exports, make sure to update the IOC
161 * minor version (SUPDRV_IOC_VERSION).
162 *
163 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
164 * produce definition files from which import libraries are generated.
165 * Take care when commenting things and especially with \#ifdef'ing.
166 */
167static SUPFUNC g_aFunctions[] =
168{
169/* SED: START */
170 /* name function */
171 /* Entries with absolute addresses determined at runtime, fixup
172 code makes ugly ASSUMPTIONS about the order here: */
173 { "SUPR0AbsIs64bit", (void *)0 },
174 { "SUPR0Abs64bitKernelCS", (void *)0 },
175 { "SUPR0Abs64bitKernelSS", (void *)0 },
176 { "SUPR0Abs64bitKernelDS", (void *)0 },
177 { "SUPR0AbsKernelCS", (void *)0 },
178 { "SUPR0AbsKernelSS", (void *)0 },
179 { "SUPR0AbsKernelDS", (void *)0 },
180 { "SUPR0AbsKernelES", (void *)0 },
181 { "SUPR0AbsKernelFS", (void *)0 },
182 { "SUPR0AbsKernelGS", (void *)0 },
183 /* Normal function pointers: */
184 { "g_pSUPGlobalInfoPage", (void *)&g_pSUPGlobalInfoPage }, /* SED: DATA */
185 { "SUPGetGIP", (void *)(uintptr_t)SUPGetGIP },
186 { "SUPReadTscWithDelta", (void *)(uintptr_t)SUPReadTscWithDelta },
187 { "SUPGetTscDeltaSlow", (void *)(uintptr_t)SUPGetTscDeltaSlow },
188 { "SUPGetCpuHzFromGipForAsyncMode", (void *)(uintptr_t)SUPGetCpuHzFromGipForAsyncMode },
189 { "SUPIsTscFreqCompatible", (void *)(uintptr_t)SUPIsTscFreqCompatible },
190 { "SUPIsTscFreqCompatibleEx", (void *)(uintptr_t)SUPIsTscFreqCompatibleEx },
191 { "SUPR0BadContext", (void *)(uintptr_t)SUPR0BadContext },
192 { "SUPR0ComponentDeregisterFactory", (void *)(uintptr_t)SUPR0ComponentDeregisterFactory },
193 { "SUPR0ComponentQueryFactory", (void *)(uintptr_t)SUPR0ComponentQueryFactory },
194 { "SUPR0ComponentRegisterFactory", (void *)(uintptr_t)SUPR0ComponentRegisterFactory },
195 { "SUPR0ContAlloc", (void *)(uintptr_t)SUPR0ContAlloc },
196 { "SUPR0ContFree", (void *)(uintptr_t)SUPR0ContFree },
197 { "SUPR0ChangeCR4", (void *)(uintptr_t)SUPR0ChangeCR4 },
198 { "SUPR0EnableVTx", (void *)(uintptr_t)SUPR0EnableVTx },
199 { "SUPR0SuspendVTxOnCpu", (void *)(uintptr_t)SUPR0SuspendVTxOnCpu },
200 { "SUPR0ResumeVTxOnCpu", (void *)(uintptr_t)SUPR0ResumeVTxOnCpu },
201 { "SUPR0GetCurrentGdtRw", (void *)(uintptr_t)SUPR0GetCurrentGdtRw },
202 { "SUPR0GetKernelFeatures", (void *)(uintptr_t)SUPR0GetKernelFeatures },
203 { "SUPR0GetHwvirtMsrs", (void *)(uintptr_t)SUPR0GetHwvirtMsrs },
204 { "SUPR0GetPagingMode", (void *)(uintptr_t)SUPR0GetPagingMode },
205 { "SUPR0GetSvmUsability", (void *)(uintptr_t)SUPR0GetSvmUsability },
206 { "SUPR0GetVTSupport", (void *)(uintptr_t)SUPR0GetVTSupport },
207 { "SUPR0GetVmxUsability", (void *)(uintptr_t)SUPR0GetVmxUsability },
208 { "SUPR0LdrIsLockOwnerByMod", (void *)(uintptr_t)SUPR0LdrIsLockOwnerByMod },
209 { "SUPR0LdrLock", (void *)(uintptr_t)SUPR0LdrLock },
210 { "SUPR0LdrUnlock", (void *)(uintptr_t)SUPR0LdrUnlock },
211 { "SUPR0LdrModByName", (void *)(uintptr_t)SUPR0LdrModByName },
212 { "SUPR0LdrModRelease", (void *)(uintptr_t)SUPR0LdrModRelease },
213 { "SUPR0LdrModRetain", (void *)(uintptr_t)SUPR0LdrModRetain },
214 { "SUPR0LockMem", (void *)(uintptr_t)SUPR0LockMem },
215 { "SUPR0LowAlloc", (void *)(uintptr_t)SUPR0LowAlloc },
216 { "SUPR0LowFree", (void *)(uintptr_t)SUPR0LowFree },
217 { "SUPR0MemAlloc", (void *)(uintptr_t)SUPR0MemAlloc },
218 { "SUPR0MemFree", (void *)(uintptr_t)SUPR0MemFree },
219 { "SUPR0MemGetPhys", (void *)(uintptr_t)SUPR0MemGetPhys },
220 { "SUPR0ObjAddRef", (void *)(uintptr_t)SUPR0ObjAddRef },
221 { "SUPR0ObjAddRefEx", (void *)(uintptr_t)SUPR0ObjAddRefEx },
222 { "SUPR0ObjRegister", (void *)(uintptr_t)SUPR0ObjRegister },
223 { "SUPR0ObjRelease", (void *)(uintptr_t)SUPR0ObjRelease },
224 { "SUPR0ObjVerifyAccess", (void *)(uintptr_t)SUPR0ObjVerifyAccess },
225 { "SUPR0PageAllocEx", (void *)(uintptr_t)SUPR0PageAllocEx },
226 { "SUPR0PageFree", (void *)(uintptr_t)SUPR0PageFree },
227 { "SUPR0PageMapKernel", (void *)(uintptr_t)SUPR0PageMapKernel },
228 { "SUPR0PageProtect", (void *)(uintptr_t)SUPR0PageProtect },
229#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
230 { "SUPR0HCPhysToVirt", (void *)(uintptr_t)SUPR0HCPhysToVirt }, /* only-linux, only solaris */
231#endif
232 { "SUPR0Printf", (void *)(uintptr_t)SUPR0Printf },
233 { "SUPR0GetSessionGVM", (void *)(uintptr_t)SUPR0GetSessionGVM },
234 { "SUPR0GetSessionVM", (void *)(uintptr_t)SUPR0GetSessionVM },
235 { "SUPR0SetSessionVM", (void *)(uintptr_t)SUPR0SetSessionVM },
236 { "SUPR0TscDeltaMeasureBySetIndex", (void *)(uintptr_t)SUPR0TscDeltaMeasureBySetIndex },
237 { "SUPR0TracerDeregisterDrv", (void *)(uintptr_t)SUPR0TracerDeregisterDrv },
238 { "SUPR0TracerDeregisterImpl", (void *)(uintptr_t)SUPR0TracerDeregisterImpl },
239 { "SUPR0TracerFireProbe", (void *)(uintptr_t)SUPR0TracerFireProbe },
240 { "SUPR0TracerRegisterDrv", (void *)(uintptr_t)SUPR0TracerRegisterDrv },
241 { "SUPR0TracerRegisterImpl", (void *)(uintptr_t)SUPR0TracerRegisterImpl },
242 { "SUPR0TracerRegisterModule", (void *)(uintptr_t)SUPR0TracerRegisterModule },
243 { "SUPR0TracerUmodProbeFire", (void *)(uintptr_t)SUPR0TracerUmodProbeFire },
244 { "SUPR0UnlockMem", (void *)(uintptr_t)SUPR0UnlockMem },
245#ifdef RT_OS_WINDOWS
246 { "SUPR0IoCtlSetupForHandle", (void *)(uintptr_t)SUPR0IoCtlSetupForHandle }, /* only-windows */
247 { "SUPR0IoCtlPerform", (void *)(uintptr_t)SUPR0IoCtlPerform }, /* only-windows */
248 { "SUPR0IoCtlCleanup", (void *)(uintptr_t)SUPR0IoCtlCleanup }, /* only-windows */
249#endif
250 { "SUPSemEventClose", (void *)(uintptr_t)SUPSemEventClose },
251 { "SUPSemEventCreate", (void *)(uintptr_t)SUPSemEventCreate },
252 { "SUPSemEventGetResolution", (void *)(uintptr_t)SUPSemEventGetResolution },
253 { "SUPSemEventMultiClose", (void *)(uintptr_t)SUPSemEventMultiClose },
254 { "SUPSemEventMultiCreate", (void *)(uintptr_t)SUPSemEventMultiCreate },
255 { "SUPSemEventMultiGetResolution", (void *)(uintptr_t)SUPSemEventMultiGetResolution },
256 { "SUPSemEventMultiReset", (void *)(uintptr_t)SUPSemEventMultiReset },
257 { "SUPSemEventMultiSignal", (void *)(uintptr_t)SUPSemEventMultiSignal },
258 { "SUPSemEventMultiWait", (void *)(uintptr_t)SUPSemEventMultiWait },
259 { "SUPSemEventMultiWaitNoResume", (void *)(uintptr_t)SUPSemEventMultiWaitNoResume },
260 { "SUPSemEventMultiWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsAbsIntr },
261 { "SUPSemEventMultiWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsRelIntr },
262 { "SUPSemEventSignal", (void *)(uintptr_t)SUPSemEventSignal },
263 { "SUPSemEventWait", (void *)(uintptr_t)SUPSemEventWait },
264 { "SUPSemEventWaitNoResume", (void *)(uintptr_t)SUPSemEventWaitNoResume },
265 { "SUPSemEventWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventWaitNsAbsIntr },
266 { "SUPSemEventWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventWaitNsRelIntr },
267
268 { "RTAssertAreQuiet", (void *)(uintptr_t)RTAssertAreQuiet },
269 { "RTAssertMayPanic", (void *)(uintptr_t)RTAssertMayPanic },
270 { "RTAssertMsg1", (void *)(uintptr_t)RTAssertMsg1 },
271 { "RTAssertMsg2AddV", (void *)(uintptr_t)RTAssertMsg2AddV },
272 { "RTAssertMsg2V", (void *)(uintptr_t)RTAssertMsg2V },
273 { "RTAssertSetMayPanic", (void *)(uintptr_t)RTAssertSetMayPanic },
274 { "RTAssertSetQuiet", (void *)(uintptr_t)RTAssertSetQuiet },
275 { "RTCrc32", (void *)(uintptr_t)RTCrc32 },
276 { "RTCrc32Finish", (void *)(uintptr_t)RTCrc32Finish },
277 { "RTCrc32Process", (void *)(uintptr_t)RTCrc32Process },
278 { "RTCrc32Start", (void *)(uintptr_t)RTCrc32Start },
279 { "RTErrConvertFromErrno", (void *)(uintptr_t)RTErrConvertFromErrno },
280 { "RTErrConvertToErrno", (void *)(uintptr_t)RTErrConvertToErrno },
281 { "RTHandleTableAllocWithCtx", (void *)(uintptr_t)RTHandleTableAllocWithCtx },
282 { "RTHandleTableCreate", (void *)(uintptr_t)RTHandleTableCreate },
283 { "RTHandleTableCreateEx", (void *)(uintptr_t)RTHandleTableCreateEx },
284 { "RTHandleTableDestroy", (void *)(uintptr_t)RTHandleTableDestroy },
285 { "RTHandleTableFreeWithCtx", (void *)(uintptr_t)RTHandleTableFreeWithCtx },
286 { "RTHandleTableLookupWithCtx", (void *)(uintptr_t)RTHandleTableLookupWithCtx },
287 { "RTLogDefaultInstance", (void *)(uintptr_t)RTLogDefaultInstance },
288 { "RTLogDefaultInstanceEx", (void *)(uintptr_t)RTLogDefaultInstanceEx },
289 { "RTLogGetDefaultInstance", (void *)(uintptr_t)RTLogGetDefaultInstance },
290 { "RTLogGetDefaultInstanceEx", (void *)(uintptr_t)RTLogGetDefaultInstanceEx },
291 { "SUPR0GetDefaultLogInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogInstanceEx },
292 { "RTLogLoggerExV", (void *)(uintptr_t)RTLogLoggerExV },
293 { "RTLogPrintfV", (void *)(uintptr_t)RTLogPrintfV },
294 { "RTLogRelGetDefaultInstance", (void *)(uintptr_t)RTLogRelGetDefaultInstance },
295 { "RTLogRelGetDefaultInstanceEx", (void *)(uintptr_t)RTLogRelGetDefaultInstanceEx },
296 { "SUPR0GetDefaultLogRelInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogRelInstanceEx },
297 { "RTLogSetDefaultInstanceThread", (void *)(uintptr_t)RTLogSetDefaultInstanceThread },
298 { "RTMemAllocExTag", (void *)(uintptr_t)RTMemAllocExTag },
299 { "RTMemAllocTag", (void *)(uintptr_t)RTMemAllocTag },
300 { "RTMemAllocVarTag", (void *)(uintptr_t)RTMemAllocVarTag },
301 { "RTMemAllocZTag", (void *)(uintptr_t)RTMemAllocZTag },
302 { "RTMemAllocZVarTag", (void *)(uintptr_t)RTMemAllocZVarTag },
303 { "RTMemDupExTag", (void *)(uintptr_t)RTMemDupExTag },
304 { "RTMemDupTag", (void *)(uintptr_t)RTMemDupTag },
305 { "RTMemFree", (void *)(uintptr_t)RTMemFree },
306 { "RTMemFreeEx", (void *)(uintptr_t)RTMemFreeEx },
307 { "RTMemReallocTag", (void *)(uintptr_t)RTMemReallocTag },
308 { "RTMpCpuId", (void *)(uintptr_t)RTMpCpuId },
309 { "RTMpCpuIdFromSetIndex", (void *)(uintptr_t)RTMpCpuIdFromSetIndex },
310 { "RTMpCpuIdToSetIndex", (void *)(uintptr_t)RTMpCpuIdToSetIndex },
311 { "RTMpCurSetIndex", (void *)(uintptr_t)RTMpCurSetIndex },
312 { "RTMpCurSetIndexAndId", (void *)(uintptr_t)RTMpCurSetIndexAndId },
313 { "RTMpGetArraySize", (void *)(uintptr_t)RTMpGetArraySize },
314 { "RTMpGetCount", (void *)(uintptr_t)RTMpGetCount },
315 { "RTMpGetMaxCpuId", (void *)(uintptr_t)RTMpGetMaxCpuId },
316 { "RTMpGetOnlineCount", (void *)(uintptr_t)RTMpGetOnlineCount },
317 { "RTMpGetOnlineSet", (void *)(uintptr_t)RTMpGetOnlineSet },
318 { "RTMpGetSet", (void *)(uintptr_t)RTMpGetSet },
319 { "RTMpIsCpuOnline", (void *)(uintptr_t)RTMpIsCpuOnline },
320 { "RTMpIsCpuPossible", (void *)(uintptr_t)RTMpIsCpuPossible },
321 { "RTMpIsCpuWorkPending", (void *)(uintptr_t)RTMpIsCpuWorkPending },
322 { "RTMpNotificationDeregister", (void *)(uintptr_t)RTMpNotificationDeregister },
323 { "RTMpNotificationRegister", (void *)(uintptr_t)RTMpNotificationRegister },
324 { "RTMpOnAll", (void *)(uintptr_t)RTMpOnAll },
325 { "RTMpOnOthers", (void *)(uintptr_t)RTMpOnOthers },
326 { "RTMpOnSpecific", (void *)(uintptr_t)RTMpOnSpecific },
327 { "RTMpPokeCpu", (void *)(uintptr_t)RTMpPokeCpu },
328 { "RTNetIPv4AddDataChecksum", (void *)(uintptr_t)RTNetIPv4AddDataChecksum },
329 { "RTNetIPv4AddTCPChecksum", (void *)(uintptr_t)RTNetIPv4AddTCPChecksum },
330 { "RTNetIPv4AddUDPChecksum", (void *)(uintptr_t)RTNetIPv4AddUDPChecksum },
331 { "RTNetIPv4FinalizeChecksum", (void *)(uintptr_t)RTNetIPv4FinalizeChecksum },
332 { "RTNetIPv4HdrChecksum", (void *)(uintptr_t)RTNetIPv4HdrChecksum },
333 { "RTNetIPv4IsDHCPValid", (void *)(uintptr_t)RTNetIPv4IsDHCPValid },
334 { "RTNetIPv4IsHdrValid", (void *)(uintptr_t)RTNetIPv4IsHdrValid },
335 { "RTNetIPv4IsTCPSizeValid", (void *)(uintptr_t)RTNetIPv4IsTCPSizeValid },
336 { "RTNetIPv4IsTCPValid", (void *)(uintptr_t)RTNetIPv4IsTCPValid },
337 { "RTNetIPv4IsUDPSizeValid", (void *)(uintptr_t)RTNetIPv4IsUDPSizeValid },
338 { "RTNetIPv4IsUDPValid", (void *)(uintptr_t)RTNetIPv4IsUDPValid },
339 { "RTNetIPv4PseudoChecksum", (void *)(uintptr_t)RTNetIPv4PseudoChecksum },
340 { "RTNetIPv4PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv4PseudoChecksumBits },
341 { "RTNetIPv4TCPChecksum", (void *)(uintptr_t)RTNetIPv4TCPChecksum },
342 { "RTNetIPv4UDPChecksum", (void *)(uintptr_t)RTNetIPv4UDPChecksum },
343 { "RTNetIPv6PseudoChecksum", (void *)(uintptr_t)RTNetIPv6PseudoChecksum },
344 { "RTNetIPv6PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv6PseudoChecksumBits },
345 { "RTNetIPv6PseudoChecksumEx", (void *)(uintptr_t)RTNetIPv6PseudoChecksumEx },
346 { "RTNetTCPChecksum", (void *)(uintptr_t)RTNetTCPChecksum },
347 { "RTNetUDPChecksum", (void *)(uintptr_t)RTNetUDPChecksum },
348 { "RTPowerNotificationDeregister", (void *)(uintptr_t)RTPowerNotificationDeregister },
349 { "RTPowerNotificationRegister", (void *)(uintptr_t)RTPowerNotificationRegister },
350 { "RTProcSelf", (void *)(uintptr_t)RTProcSelf },
351 { "RTR0AssertPanicSystem", (void *)(uintptr_t)RTR0AssertPanicSystem },
352#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
353 { "RTR0DbgKrnlInfoOpen", (void *)(uintptr_t)RTR0DbgKrnlInfoOpen }, /* only-darwin, only-solaris, only-windows */
354 { "RTR0DbgKrnlInfoQueryMember", (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember }, /* only-darwin, only-solaris, only-windows */
355# if defined(RT_OS_SOLARIS)
356 { "RTR0DbgKrnlInfoQuerySize", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySize }, /* only-solaris */
357# endif
358 { "RTR0DbgKrnlInfoQuerySymbol", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol }, /* only-darwin, only-solaris, only-windows */
359 { "RTR0DbgKrnlInfoRelease", (void *)(uintptr_t)RTR0DbgKrnlInfoRelease }, /* only-darwin, only-solaris, only-windows */
360 { "RTR0DbgKrnlInfoRetain", (void *)(uintptr_t)RTR0DbgKrnlInfoRetain }, /* only-darwin, only-solaris, only-windows */
361#endif
362 { "RTR0MemAreKrnlAndUsrDifferent", (void *)(uintptr_t)RTR0MemAreKrnlAndUsrDifferent },
363 { "RTR0MemKernelIsValidAddr", (void *)(uintptr_t)RTR0MemKernelIsValidAddr },
364 { "RTR0MemKernelCopyFrom", (void *)(uintptr_t)RTR0MemKernelCopyFrom },
365 { "RTR0MemKernelCopyTo", (void *)(uintptr_t)RTR0MemKernelCopyTo },
366 { "RTR0MemObjAddress", (void *)(uintptr_t)RTR0MemObjAddress },
367 { "RTR0MemObjAddressR3", (void *)(uintptr_t)RTR0MemObjAddressR3 },
368 { "RTR0MemObjAllocContTag", (void *)(uintptr_t)RTR0MemObjAllocContTag },
369 { "RTR0MemObjAllocLowTag", (void *)(uintptr_t)RTR0MemObjAllocLowTag },
370 { "RTR0MemObjAllocPageTag", (void *)(uintptr_t)RTR0MemObjAllocPageTag },
371 { "RTR0MemObjAllocPhysExTag", (void *)(uintptr_t)RTR0MemObjAllocPhysExTag },
372 { "RTR0MemObjAllocPhysNCTag", (void *)(uintptr_t)RTR0MemObjAllocPhysNCTag },
373 { "RTR0MemObjAllocPhysTag", (void *)(uintptr_t)RTR0MemObjAllocPhysTag },
374 { "RTR0MemObjEnterPhysTag", (void *)(uintptr_t)RTR0MemObjEnterPhysTag },
375 { "RTR0MemObjFree", (void *)(uintptr_t)RTR0MemObjFree },
376 { "RTR0MemObjGetPagePhysAddr", (void *)(uintptr_t)RTR0MemObjGetPagePhysAddr },
377 { "RTR0MemObjIsMapping", (void *)(uintptr_t)RTR0MemObjIsMapping },
378 { "RTR0MemObjLockUserTag", (void *)(uintptr_t)RTR0MemObjLockUserTag },
379 { "RTR0MemObjMapKernelExTag", (void *)(uintptr_t)RTR0MemObjMapKernelExTag },
380 { "RTR0MemObjMapKernelTag", (void *)(uintptr_t)RTR0MemObjMapKernelTag },
381 { "RTR0MemObjMapUserTag", (void *)(uintptr_t)RTR0MemObjMapUserTag },
382 { "RTR0MemObjMapUserExTag", (void *)(uintptr_t)RTR0MemObjMapUserExTag },
383 { "RTR0MemObjProtect", (void *)(uintptr_t)RTR0MemObjProtect },
384 { "RTR0MemObjSize", (void *)(uintptr_t)RTR0MemObjSize },
385 { "RTR0MemUserCopyFrom", (void *)(uintptr_t)RTR0MemUserCopyFrom },
386 { "RTR0MemUserCopyTo", (void *)(uintptr_t)RTR0MemUserCopyTo },
387 { "RTR0MemUserIsValidAddr", (void *)(uintptr_t)RTR0MemUserIsValidAddr },
388 { "RTR0ProcHandleSelf", (void *)(uintptr_t)RTR0ProcHandleSelf },
389 { "RTSemEventCreate", (void *)(uintptr_t)RTSemEventCreate },
390 { "RTSemEventDestroy", (void *)(uintptr_t)RTSemEventDestroy },
391 { "RTSemEventGetResolution", (void *)(uintptr_t)RTSemEventGetResolution },
392 { "RTSemEventMultiCreate", (void *)(uintptr_t)RTSemEventMultiCreate },
393 { "RTSemEventMultiDestroy", (void *)(uintptr_t)RTSemEventMultiDestroy },
394 { "RTSemEventMultiGetResolution", (void *)(uintptr_t)RTSemEventMultiGetResolution },
395 { "RTSemEventMultiReset", (void *)(uintptr_t)RTSemEventMultiReset },
396 { "RTSemEventMultiSignal", (void *)(uintptr_t)RTSemEventMultiSignal },
397 { "RTSemEventMultiWait", (void *)(uintptr_t)RTSemEventMultiWait },
398 { "RTSemEventMultiWaitEx", (void *)(uintptr_t)RTSemEventMultiWaitEx },
399 { "RTSemEventMultiWaitExDebug", (void *)(uintptr_t)RTSemEventMultiWaitExDebug },
400 { "RTSemEventMultiWaitNoResume", (void *)(uintptr_t)RTSemEventMultiWaitNoResume },
401 { "RTSemEventSignal", (void *)(uintptr_t)RTSemEventSignal },
402 { "RTSemEventWait", (void *)(uintptr_t)RTSemEventWait },
403 { "RTSemEventWaitEx", (void *)(uintptr_t)RTSemEventWaitEx },
404 { "RTSemEventWaitExDebug", (void *)(uintptr_t)RTSemEventWaitExDebug },
405 { "RTSemEventWaitNoResume", (void *)(uintptr_t)RTSemEventWaitNoResume },
406 { "RTSemFastMutexCreate", (void *)(uintptr_t)RTSemFastMutexCreate },
407 { "RTSemFastMutexDestroy", (void *)(uintptr_t)RTSemFastMutexDestroy },
408 { "RTSemFastMutexRelease", (void *)(uintptr_t)RTSemFastMutexRelease },
409 { "RTSemFastMutexRequest", (void *)(uintptr_t)RTSemFastMutexRequest },
410 { "RTSemMutexCreate", (void *)(uintptr_t)RTSemMutexCreate },
411 { "RTSemMutexDestroy", (void *)(uintptr_t)RTSemMutexDestroy },
412 { "RTSemMutexRelease", (void *)(uintptr_t)RTSemMutexRelease },
413 { "RTSemMutexRequest", (void *)(uintptr_t)RTSemMutexRequest },
414 { "RTSemMutexRequestDebug", (void *)(uintptr_t)RTSemMutexRequestDebug },
415 { "RTSemMutexRequestNoResume", (void *)(uintptr_t)RTSemMutexRequestNoResume },
416 { "RTSemMutexRequestNoResumeDebug", (void *)(uintptr_t)RTSemMutexRequestNoResumeDebug },
417 { "RTSpinlockAcquire", (void *)(uintptr_t)RTSpinlockAcquire },
418 { "RTSpinlockCreate", (void *)(uintptr_t)RTSpinlockCreate },
419 { "RTSpinlockDestroy", (void *)(uintptr_t)RTSpinlockDestroy },
420 { "RTSpinlockRelease", (void *)(uintptr_t)RTSpinlockRelease },
421 { "RTStrCopy", (void *)(uintptr_t)RTStrCopy },
422 { "RTStrDupTag", (void *)(uintptr_t)RTStrDupTag },
423 { "RTStrFormat", (void *)(uintptr_t)RTStrFormat },
424 { "RTStrFormatNumber", (void *)(uintptr_t)RTStrFormatNumber },
425 { "RTStrFormatTypeDeregister", (void *)(uintptr_t)RTStrFormatTypeDeregister },
426 { "RTStrFormatTypeRegister", (void *)(uintptr_t)RTStrFormatTypeRegister },
427 { "RTStrFormatTypeSetUser", (void *)(uintptr_t)RTStrFormatTypeSetUser },
428 { "RTStrFormatV", (void *)(uintptr_t)RTStrFormatV },
429 { "RTStrFree", (void *)(uintptr_t)RTStrFree },
430 { "RTStrNCmp", (void *)(uintptr_t)RTStrNCmp },
431 { "RTStrPrintf", (void *)(uintptr_t)RTStrPrintf },
432 { "RTStrPrintfEx", (void *)(uintptr_t)RTStrPrintfEx },
433 { "RTStrPrintfExV", (void *)(uintptr_t)RTStrPrintfExV },
434 { "RTStrPrintfV", (void *)(uintptr_t)RTStrPrintfV },
435 { "RTThreadCreate", (void *)(uintptr_t)RTThreadCreate },
436 { "RTThreadCtxHookIsEnabled", (void *)(uintptr_t)RTThreadCtxHookIsEnabled },
437 { "RTThreadCtxHookCreate", (void *)(uintptr_t)RTThreadCtxHookCreate },
438 { "RTThreadCtxHookDestroy", (void *)(uintptr_t)RTThreadCtxHookDestroy },
439 { "RTThreadCtxHookDisable", (void *)(uintptr_t)RTThreadCtxHookDisable },
440 { "RTThreadCtxHookEnable", (void *)(uintptr_t)RTThreadCtxHookEnable },
441 { "RTThreadGetName", (void *)(uintptr_t)RTThreadGetName },
442 { "RTThreadGetNative", (void *)(uintptr_t)RTThreadGetNative },
443 { "RTThreadGetType", (void *)(uintptr_t)RTThreadGetType },
444 { "RTThreadIsInInterrupt", (void *)(uintptr_t)RTThreadIsInInterrupt },
445 { "RTThreadNativeSelf", (void *)(uintptr_t)RTThreadNativeSelf },
446 { "RTThreadPreemptDisable", (void *)(uintptr_t)RTThreadPreemptDisable },
447 { "RTThreadPreemptIsEnabled", (void *)(uintptr_t)RTThreadPreemptIsEnabled },
448 { "RTThreadPreemptIsPending", (void *)(uintptr_t)RTThreadPreemptIsPending },
449 { "RTThreadPreemptIsPendingTrusty", (void *)(uintptr_t)RTThreadPreemptIsPendingTrusty },
450 { "RTThreadPreemptIsPossible", (void *)(uintptr_t)RTThreadPreemptIsPossible },
451 { "RTThreadPreemptRestore", (void *)(uintptr_t)RTThreadPreemptRestore },
452 { "RTThreadSelf", (void *)(uintptr_t)RTThreadSelf },
453 { "RTThreadSelfName", (void *)(uintptr_t)RTThreadSelfName },
454 { "RTThreadSleep", (void *)(uintptr_t)RTThreadSleep },
455 { "RTThreadUserReset", (void *)(uintptr_t)RTThreadUserReset },
456 { "RTThreadUserSignal", (void *)(uintptr_t)RTThreadUserSignal },
457 { "RTThreadUserWait", (void *)(uintptr_t)RTThreadUserWait },
458 { "RTThreadUserWaitNoResume", (void *)(uintptr_t)RTThreadUserWaitNoResume },
459 { "RTThreadWait", (void *)(uintptr_t)RTThreadWait },
460 { "RTThreadWaitNoResume", (void *)(uintptr_t)RTThreadWaitNoResume },
461 { "RTThreadYield", (void *)(uintptr_t)RTThreadYield },
462 { "RTTimeNow", (void *)(uintptr_t)RTTimeNow },
463 { "RTTimerCanDoHighResolution", (void *)(uintptr_t)RTTimerCanDoHighResolution },
464 { "RTTimerChangeInterval", (void *)(uintptr_t)RTTimerChangeInterval },
465 { "RTTimerCreate", (void *)(uintptr_t)RTTimerCreate },
466 { "RTTimerCreateEx", (void *)(uintptr_t)RTTimerCreateEx },
467 { "RTTimerDestroy", (void *)(uintptr_t)RTTimerDestroy },
468 { "RTTimerGetSystemGranularity", (void *)(uintptr_t)RTTimerGetSystemGranularity },
469 { "RTTimerReleaseSystemGranularity", (void *)(uintptr_t)RTTimerReleaseSystemGranularity },
470 { "RTTimerRequestSystemGranularity", (void *)(uintptr_t)RTTimerRequestSystemGranularity },
471 { "RTTimerStart", (void *)(uintptr_t)RTTimerStart },
472 { "RTTimerStop", (void *)(uintptr_t)RTTimerStop },
473 { "RTTimeSystemMilliTS", (void *)(uintptr_t)RTTimeSystemMilliTS },
474 { "RTTimeSystemNanoTS", (void *)(uintptr_t)RTTimeSystemNanoTS },
475 { "RTUuidCompare", (void *)(uintptr_t)RTUuidCompare },
476 { "RTUuidCompareStr", (void *)(uintptr_t)RTUuidCompareStr },
477 { "RTUuidFromStr", (void *)(uintptr_t)RTUuidFromStr },
478/* SED: END */
479};
480
481#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
482/**
483 * Drag in the rest of IRPT since we share it with the
484 * rest of the kernel modules on darwin.
485 */
486struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
487{
488 /* VBoxNetAdp */
489 { (PFNRT)RTRandBytes },
490 /* VBoxUSB */
491 { (PFNRT)RTPathStripFilename },
492#if !defined(RT_OS_FREEBSD)
493 { (PFNRT)RTHandleTableAlloc },
494 { (PFNRT)RTStrPurgeEncoding },
495#endif
496 { NULL }
497};
498#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
499
500
501
502/**
503 * Initializes the device extentsion structure.
504 *
505 * @returns IPRT status code.
506 * @param pDevExt The device extension to initialize.
507 * @param cbSession The size of the session structure. The size of
508 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
509 * defined because we're skipping the OS specific members
510 * then.
511 */
512int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
513{
514 int rc;
515
516#ifdef SUPDRV_WITH_RELEASE_LOGGER
517 /*
518 * Create the release log.
519 */
520 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
521 PRTLOGGER pRelLogger;
522 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
523 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
524 if (RT_SUCCESS(rc))
525 RTLogRelSetDefaultInstance(pRelLogger);
526 /** @todo Add native hook for getting logger config parameters and setting
527 * them. On linux we should use the module parameter stuff... */
528#endif
529
530#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
531 /*
532 * Require SSE2 to be present.
533 */
534 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
535 {
536 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
537 return VERR_UNSUPPORTED_CPU;
538 }
539#endif
540
541 /*
542 * Initialize it.
543 */
544 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
545 pDevExt->Spinlock = NIL_RTSPINLOCK;
546 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
547 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
548#ifdef SUPDRV_USE_MUTEX_FOR_LDR
549 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
550#else
551 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
552#endif
553#ifdef SUPDRV_USE_MUTEX_FOR_GIP
554 pDevExt->mtxGip = NIL_RTSEMMUTEX;
555 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
556#else
557 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
558 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
559#endif
560
561 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
562 if (RT_SUCCESS(rc))
563 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
564 if (RT_SUCCESS(rc))
565 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
566
567 if (RT_SUCCESS(rc))
568#ifdef SUPDRV_USE_MUTEX_FOR_LDR
569 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
570#else
571 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
572#endif
573 if (RT_SUCCESS(rc))
574#ifdef SUPDRV_USE_MUTEX_FOR_GIP
575 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
576#else
577 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
578#endif
579 if (RT_SUCCESS(rc))
580 {
581 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
582 if (RT_SUCCESS(rc))
583 {
584#ifdef SUPDRV_USE_MUTEX_FOR_GIP
585 rc = RTSemMutexCreate(&pDevExt->mtxGip);
586#else
587 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
588#endif
589 if (RT_SUCCESS(rc))
590 {
591 rc = supdrvGipCreate(pDevExt);
592 if (RT_SUCCESS(rc))
593 {
594 rc = supdrvTracerInit(pDevExt);
595 if (RT_SUCCESS(rc))
596 {
597 pDevExt->pLdrInitImage = NULL;
598 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
599 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
600 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
601 pDevExt->cbSession = (uint32_t)cbSession;
602
603 /*
604 * Fixup the absolute symbols.
605 *
606 * Because of the table indexing assumptions we'll have a little #ifdef orgy
607 * here rather than distributing this to OS specific files. At least for now.
608 */
609#ifdef RT_OS_DARWIN
610# if ARCH_BITS == 32
611 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
612 {
613 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
614 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
615 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
616 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
617 }
618 else
619 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
620 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
621 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
622 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
623 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
624 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
625 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
626# else /* 64-bit darwin: */
627 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
628 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
629 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
630 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
631 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
632 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
633 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
634 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
635 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
636 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
637
638# endif
639#else /* !RT_OS_DARWIN */
640# if ARCH_BITS == 64
641 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
642 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
643 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
644 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
645# else
646 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
647# endif
648 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
649 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
650 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
651 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
652 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
653 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
654#endif /* !RT_OS_DARWIN */
655 return VINF_SUCCESS;
656 }
657
658 supdrvGipDestroy(pDevExt);
659 }
660
661#ifdef SUPDRV_USE_MUTEX_FOR_GIP
662 RTSemMutexDestroy(pDevExt->mtxGip);
663 pDevExt->mtxGip = NIL_RTSEMMUTEX;
664#else
665 RTSemFastMutexDestroy(pDevExt->mtxGip);
666 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
667#endif
668 }
669 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
670 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
671 }
672 }
673
674#ifdef SUPDRV_USE_MUTEX_FOR_GIP
675 RTSemMutexDestroy(pDevExt->mtxTscDelta);
676 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
677#else
678 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
679 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
680#endif
681#ifdef SUPDRV_USE_MUTEX_FOR_LDR
682 RTSemMutexDestroy(pDevExt->mtxLdr);
683 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
684#else
685 RTSemFastMutexDestroy(pDevExt->mtxLdr);
686 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
687#endif
688 RTSpinlockDestroy(pDevExt->Spinlock);
689 pDevExt->Spinlock = NIL_RTSPINLOCK;
690 RTSpinlockDestroy(pDevExt->hGipSpinlock);
691 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
692 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
693 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
694
695#ifdef SUPDRV_WITH_RELEASE_LOGGER
696 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
697 RTLogDestroy(RTLogSetDefaultInstance(NULL));
698#endif
699
700 return rc;
701}
702
703
704/**
705 * Delete the device extension (e.g. cleanup members).
706 *
707 * @param pDevExt The device extension to delete.
708 */
709void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
710{
711 PSUPDRVOBJ pObj;
712 PSUPDRVUSAGE pUsage;
713
714 /*
715 * Kill mutexes and spinlocks.
716 */
717#ifdef SUPDRV_USE_MUTEX_FOR_GIP
718 RTSemMutexDestroy(pDevExt->mtxGip);
719 pDevExt->mtxGip = NIL_RTSEMMUTEX;
720 RTSemMutexDestroy(pDevExt->mtxTscDelta);
721 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
722#else
723 RTSemFastMutexDestroy(pDevExt->mtxGip);
724 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
725 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
726 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
727#endif
728#ifdef SUPDRV_USE_MUTEX_FOR_LDR
729 RTSemMutexDestroy(pDevExt->mtxLdr);
730 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
731#else
732 RTSemFastMutexDestroy(pDevExt->mtxLdr);
733 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
734#endif
735 RTSpinlockDestroy(pDevExt->Spinlock);
736 pDevExt->Spinlock = NIL_RTSPINLOCK;
737 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
738 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
739 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
740 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
741
742 /*
743 * Free lists.
744 */
745 /* objects. */
746 pObj = pDevExt->pObjs;
747 Assert(!pObj); /* (can trigger on forced unloads) */
748 pDevExt->pObjs = NULL;
749 while (pObj)
750 {
751 void *pvFree = pObj;
752 pObj = pObj->pNext;
753 RTMemFree(pvFree);
754 }
755
756 /* usage records. */
757 pUsage = pDevExt->pUsageFree;
758 pDevExt->pUsageFree = NULL;
759 while (pUsage)
760 {
761 void *pvFree = pUsage;
762 pUsage = pUsage->pNext;
763 RTMemFree(pvFree);
764 }
765
766 /* kill the GIP. */
767 supdrvGipDestroy(pDevExt);
768 RTSpinlockDestroy(pDevExt->hGipSpinlock);
769 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
770
771 supdrvTracerTerm(pDevExt);
772
773#ifdef SUPDRV_WITH_RELEASE_LOGGER
774 /* destroy the loggers. */
775 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
776 RTLogDestroy(RTLogSetDefaultInstance(NULL));
777#endif
778}
779
780
781/**
782 * Create session.
783 *
784 * @returns IPRT status code.
785 * @param pDevExt Device extension.
786 * @param fUser Flag indicating whether this is a user or kernel
787 * session.
788 * @param fUnrestricted Unrestricted access (system) or restricted access
789 * (user)?
790 * @param ppSession Where to store the pointer to the session data.
791 */
792int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
793{
794 int rc;
795 PSUPDRVSESSION pSession;
796
797 if (!SUP_IS_DEVEXT_VALID(pDevExt))
798 return VERR_INVALID_PARAMETER;
799
800 /*
801 * Allocate memory for the session data.
802 */
803 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
804 if (pSession)
805 {
806 /* Initialize session data. */
807 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
808 if (!rc)
809 {
810 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
811 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
812 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
813 if (RT_SUCCESS(rc))
814 {
815 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
816 pSession->pDevExt = pDevExt;
817 pSession->u32Cookie = BIRD_INV;
818 pSession->fUnrestricted = fUnrestricted;
819 /*pSession->fInHashTable = false; */
820 pSession->cRefs = 1;
821 /*pSession->pCommonNextHash = NULL;
822 pSession->ppOsSessionPtr = NULL; */
823 if (fUser)
824 {
825 pSession->Process = RTProcSelf();
826 pSession->R0Process = RTR0ProcHandleSelf();
827 }
828 else
829 {
830 pSession->Process = NIL_RTPROCESS;
831 pSession->R0Process = NIL_RTR0PROCESS;
832 }
833 /*pSession->pLdrUsage = NULL;
834 pSession->pVM = NULL;
835 pSession->pUsage = NULL;
836 pSession->pGip = NULL;
837 pSession->fGipReferenced = false;
838 pSession->Bundle.cUsed = 0; */
839 pSession->Uid = NIL_RTUID;
840 pSession->Gid = NIL_RTGID;
841 /*pSession->uTracerData = 0;*/
842 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
843 RTListInit(&pSession->TpProviders);
844 /*pSession->cTpProviders = 0;*/
845 /*pSession->cTpProbesFiring = 0;*/
846 RTListInit(&pSession->TpUmods);
847 /*RT_ZERO(pSession->apTpLookupTable);*/
848
849 VBOXDRV_SESSION_CREATE(pSession, fUser);
850 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
851 return VINF_SUCCESS;
852 }
853
854 RTSpinlockDestroy(pSession->Spinlock);
855 }
856 RTMemFree(pSession);
857 *ppSession = NULL;
858 Log(("Failed to create spinlock, rc=%d!\n", rc));
859 }
860 else
861 rc = VERR_NO_MEMORY;
862
863 return rc;
864}
865
866
867/**
868 * Cleans up the session in the context of the process to which it belongs, the
869 * caller will free the session and the session spinlock.
870 *
871 * This should normally occur when the session is closed or as the process
872 * exits. Careful reference counting in the OS specfic code makes sure that
873 * there cannot be any races between process/handle cleanup callbacks and
874 * threads doing I/O control calls.
875 *
876 * @param pDevExt The device extension.
877 * @param pSession Session data.
878 */
879static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
880{
881 int rc;
882 PSUPDRVBUNDLE pBundle;
883 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
884
885 Assert(!pSession->fInHashTable);
886 Assert(!pSession->ppOsSessionPtr);
887 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
888 ("R0Process=%p cur=%p; curpid=%u\n",
889 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
890
891 /*
892 * Remove logger instances related to this session.
893 */
894 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
895
896 /*
897 * Destroy the handle table.
898 */
899 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
900 AssertRC(rc);
901 pSession->hHandleTable = NIL_RTHANDLETABLE;
902
903 /*
904 * Release object references made in this session.
905 * In theory there should be noone racing us in this session.
906 */
907 Log2(("release objects - start\n"));
908 if (pSession->pUsage)
909 {
910 PSUPDRVUSAGE pUsage;
911 RTSpinlockAcquire(pDevExt->Spinlock);
912
913 while ((pUsage = pSession->pUsage) != NULL)
914 {
915 PSUPDRVOBJ pObj = pUsage->pObj;
916 pSession->pUsage = pUsage->pNext;
917
918 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
919 if (pUsage->cUsage < pObj->cUsage)
920 {
921 pObj->cUsage -= pUsage->cUsage;
922 RTSpinlockRelease(pDevExt->Spinlock);
923 }
924 else
925 {
926 /* Destroy the object and free the record. */
927 if (pDevExt->pObjs == pObj)
928 pDevExt->pObjs = pObj->pNext;
929 else
930 {
931 PSUPDRVOBJ pObjPrev;
932 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
933 if (pObjPrev->pNext == pObj)
934 {
935 pObjPrev->pNext = pObj->pNext;
936 break;
937 }
938 Assert(pObjPrev);
939 }
940 RTSpinlockRelease(pDevExt->Spinlock);
941
942 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
943 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
944 if (pObj->pfnDestructor)
945 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
946 RTMemFree(pObj);
947 }
948
949 /* free it and continue. */
950 RTMemFree(pUsage);
951
952 RTSpinlockAcquire(pDevExt->Spinlock);
953 }
954
955 RTSpinlockRelease(pDevExt->Spinlock);
956 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
957 }
958 Log2(("release objects - done\n"));
959
960 /*
961 * Make sure the associated VM pointers are NULL.
962 */
963 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
964 {
965 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
966 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
967 pSession->pSessionGVM = NULL;
968 pSession->pSessionVM = NULL;
969 pSession->pFastIoCtrlVM = NULL;
970 }
971
972 /*
973 * Do tracer cleanups related to this session.
974 */
975 Log2(("release tracer stuff - start\n"));
976 supdrvTracerCleanupSession(pDevExt, pSession);
977 Log2(("release tracer stuff - end\n"));
978
979 /*
980 * Release memory allocated in the session.
981 *
982 * We do not serialize this as we assume that the application will
983 * not allocated memory while closing the file handle object.
984 */
985 Log2(("freeing memory:\n"));
986 pBundle = &pSession->Bundle;
987 while (pBundle)
988 {
989 PSUPDRVBUNDLE pToFree;
990 unsigned i;
991
992 /*
993 * Check and unlock all entries in the bundle.
994 */
995 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
996 {
997 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
998 {
999 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1000 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1001 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1002 {
1003 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1004 AssertRC(rc); /** @todo figure out how to handle this. */
1005 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1006 }
1007 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1008 AssertRC(rc); /** @todo figure out how to handle this. */
1009 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1010 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1011 }
1012 }
1013
1014 /*
1015 * Advance and free previous bundle.
1016 */
1017 pToFree = pBundle;
1018 pBundle = pBundle->pNext;
1019
1020 pToFree->pNext = NULL;
1021 pToFree->cUsed = 0;
1022 if (pToFree != &pSession->Bundle)
1023 RTMemFree(pToFree);
1024 }
1025 Log2(("freeing memory - done\n"));
1026
1027 /*
1028 * Deregister component factories.
1029 */
1030 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1031 Log2(("deregistering component factories:\n"));
1032 if (pDevExt->pComponentFactoryHead)
1033 {
1034 PSUPDRVFACTORYREG pPrev = NULL;
1035 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1036 while (pCur)
1037 {
1038 if (pCur->pSession == pSession)
1039 {
1040 /* unlink it */
1041 PSUPDRVFACTORYREG pNext = pCur->pNext;
1042 if (pPrev)
1043 pPrev->pNext = pNext;
1044 else
1045 pDevExt->pComponentFactoryHead = pNext;
1046
1047 /* free it */
1048 pCur->pNext = NULL;
1049 pCur->pSession = NULL;
1050 pCur->pFactory = NULL;
1051 RTMemFree(pCur);
1052
1053 /* next */
1054 pCur = pNext;
1055 }
1056 else
1057 {
1058 /* next */
1059 pPrev = pCur;
1060 pCur = pCur->pNext;
1061 }
1062 }
1063 }
1064 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1065 Log2(("deregistering component factories - done\n"));
1066
1067 /*
1068 * Loaded images needs to be dereferenced and possibly freed up.
1069 */
1070 supdrvLdrLock(pDevExt);
1071 Log2(("freeing images:\n"));
1072 if (pSession->pLdrUsage)
1073 {
1074 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1075 pSession->pLdrUsage = NULL;
1076 while (pUsage)
1077 {
1078 void *pvFree = pUsage;
1079 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1080 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1081 if (pImage->cUsage > cUsage)
1082 pImage->cUsage -= cUsage;
1083 else
1084 supdrvLdrFree(pDevExt, pImage);
1085 pUsage->pImage = NULL;
1086 pUsage = pUsage->pNext;
1087 RTMemFree(pvFree);
1088 }
1089 }
1090 supdrvLdrUnlock(pDevExt);
1091 Log2(("freeing images - done\n"));
1092
1093 /*
1094 * Unmap the GIP.
1095 */
1096 Log2(("umapping GIP:\n"));
1097 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1098 {
1099 SUPR0GipUnmap(pSession);
1100 pSession->fGipReferenced = 0;
1101 }
1102 Log2(("umapping GIP - done\n"));
1103}
1104
1105
1106/**
1107 * Common code for freeing a session when the reference count reaches zero.
1108 *
1109 * @param pDevExt Device extension.
1110 * @param pSession Session data.
1111 * This data will be freed by this routine.
1112 */
1113static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1114{
1115 VBOXDRV_SESSION_CLOSE(pSession);
1116
1117 /*
1118 * Cleanup the session first.
1119 */
1120 supdrvCleanupSession(pDevExt, pSession);
1121 supdrvOSCleanupSession(pDevExt, pSession);
1122
1123 /*
1124 * Free the rest of the session stuff.
1125 */
1126 RTSpinlockDestroy(pSession->Spinlock);
1127 pSession->Spinlock = NIL_RTSPINLOCK;
1128 pSession->pDevExt = NULL;
1129 RTMemFree(pSession);
1130 LogFlow(("supdrvDestroySession: returns\n"));
1131}
1132
1133
1134/**
1135 * Inserts the session into the global hash table.
1136 *
1137 * @retval VINF_SUCCESS on success.
1138 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1139 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1140 * session (asserted).
1141 * @retval VERR_DUPLICATE if there is already a session for that pid.
1142 *
1143 * @param pDevExt The device extension.
1144 * @param pSession The session.
1145 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1146 * available and used. This will set to point to the
1147 * session while under the protection of the session
1148 * hash table spinlock. It will also be kept in
1149 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1150 * cleanup use.
1151 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1152 */
1153int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1154 void *pvUser)
1155{
1156 PSUPDRVSESSION pCur;
1157 unsigned iHash;
1158
1159 /*
1160 * Validate input.
1161 */
1162 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1163 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1164
1165 /*
1166 * Calculate the hash table index and acquire the spinlock.
1167 */
1168 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1169
1170 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1171
1172 /*
1173 * If there are a collisions, we need to carefully check if we got a
1174 * duplicate. There can only be one open session per process.
1175 */
1176 pCur = pDevExt->apSessionHashTab[iHash];
1177 if (pCur)
1178 {
1179 while (pCur && pCur->Process != pSession->Process)
1180 pCur = pCur->pCommonNextHash;
1181
1182 if (pCur)
1183 {
1184 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1185 if (pCur == pSession)
1186 {
1187 Assert(pSession->fInHashTable);
1188 AssertFailed();
1189 return VERR_WRONG_ORDER;
1190 }
1191 Assert(!pSession->fInHashTable);
1192 if (pCur->R0Process == pSession->R0Process)
1193 return VERR_RESOURCE_IN_USE;
1194 return VERR_DUPLICATE;
1195 }
1196 }
1197 Assert(!pSession->fInHashTable);
1198 Assert(!pSession->ppOsSessionPtr);
1199
1200 /*
1201 * Insert it, doing a callout to the OS specific code in case it has
1202 * anything it wishes to do while we're holding the spinlock.
1203 */
1204 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1205 pDevExt->apSessionHashTab[iHash] = pSession;
1206 pSession->fInHashTable = true;
1207 ASMAtomicIncS32(&pDevExt->cSessions);
1208
1209 pSession->ppOsSessionPtr = ppOsSessionPtr;
1210 if (ppOsSessionPtr)
1211 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1212
1213 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1214
1215 /*
1216 * Retain a reference for the pointer in the session table.
1217 */
1218 ASMAtomicIncU32(&pSession->cRefs);
1219
1220 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1221 return VINF_SUCCESS;
1222}
1223
1224
1225/**
1226 * Removes the session from the global hash table.
1227 *
1228 * @retval VINF_SUCCESS on success.
1229 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1230 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1231 * session (asserted).
1232 *
1233 * @param pDevExt The device extension.
1234 * @param pSession The session. The caller is expected to have a reference
1235 * to this so it won't croak on us when we release the hash
1236 * table reference.
1237 * @param pvUser OS specific context value for the
1238 * supdrvOSSessionHashTabInserted callback.
1239 */
1240int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1241{
1242 PSUPDRVSESSION pCur;
1243 unsigned iHash;
1244 int32_t cRefs;
1245
1246 /*
1247 * Validate input.
1248 */
1249 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1250 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1251
1252 /*
1253 * Calculate the hash table index and acquire the spinlock.
1254 */
1255 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1256
1257 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1258
1259 /*
1260 * Unlink it.
1261 */
1262 pCur = pDevExt->apSessionHashTab[iHash];
1263 if (pCur == pSession)
1264 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1265 else
1266 {
1267 PSUPDRVSESSION pPrev = pCur;
1268 while (pCur && pCur != pSession)
1269 {
1270 pPrev = pCur;
1271 pCur = pCur->pCommonNextHash;
1272 }
1273 if (pCur)
1274 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1275 else
1276 {
1277 Assert(!pSession->fInHashTable);
1278 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1279 return VERR_NOT_FOUND;
1280 }
1281 }
1282
1283 pSession->pCommonNextHash = NULL;
1284 pSession->fInHashTable = false;
1285
1286 ASMAtomicDecS32(&pDevExt->cSessions);
1287
1288 /*
1289 * Clear OS specific session pointer if available and do the OS callback.
1290 */
1291 if (pSession->ppOsSessionPtr)
1292 {
1293 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1294 pSession->ppOsSessionPtr = NULL;
1295 }
1296
1297 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1298
1299 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1300
1301 /*
1302 * Drop the reference the hash table had to the session. This shouldn't
1303 * be the last reference!
1304 */
1305 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1306 Assert(cRefs > 0 && cRefs < _1M);
1307 if (cRefs == 0)
1308 supdrvDestroySession(pDevExt, pSession);
1309
1310 return VINF_SUCCESS;
1311}
1312
1313
1314/**
1315 * Looks up the session for the current process in the global hash table or in
1316 * OS specific pointer.
1317 *
1318 * @returns Pointer to the session with a reference that the caller must
1319 * release. If no valid session was found, NULL is returned.
1320 *
1321 * @param pDevExt The device extension.
1322 * @param Process The process ID.
1323 * @param R0Process The ring-0 process handle.
1324 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1325 * this is used instead of the hash table. For
1326 * additional safety it must then be equal to the
1327 * SUPDRVSESSION::ppOsSessionPtr member.
1328 * This can be NULL even if the OS has a session
1329 * pointer.
1330 */
1331PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1332 PSUPDRVSESSION *ppOsSessionPtr)
1333{
1334 PSUPDRVSESSION pCur;
1335 unsigned iHash;
1336
1337 /*
1338 * Validate input.
1339 */
1340 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1341
1342 /*
1343 * Calculate the hash table index and acquire the spinlock.
1344 */
1345 iHash = SUPDRV_SESSION_HASH(Process);
1346
1347 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1348
1349 /*
1350 * If an OS session pointer is provided, always use it.
1351 */
1352 if (ppOsSessionPtr)
1353 {
1354 pCur = *ppOsSessionPtr;
1355 if ( pCur
1356 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1357 || pCur->Process != Process
1358 || pCur->R0Process != R0Process) )
1359 pCur = NULL;
1360 }
1361 else
1362 {
1363 /*
1364 * Otherwise, do the hash table lookup.
1365 */
1366 pCur = pDevExt->apSessionHashTab[iHash];
1367 while ( pCur
1368 && ( pCur->Process != Process
1369 || pCur->R0Process != R0Process) )
1370 pCur = pCur->pCommonNextHash;
1371 }
1372
1373 /*
1374 * Retain the session.
1375 */
1376 if (pCur)
1377 {
1378 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1379 NOREF(cRefs);
1380 Assert(cRefs > 1 && cRefs < _1M);
1381 }
1382
1383 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1384
1385 return pCur;
1386}
1387
1388
1389/**
1390 * Retain a session to make sure it doesn't go away while it is in use.
1391 *
1392 * @returns New reference count on success, UINT32_MAX on failure.
1393 * @param pSession Session data.
1394 */
1395uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1396{
1397 uint32_t cRefs;
1398 AssertPtrReturn(pSession, UINT32_MAX);
1399 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1400
1401 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1402 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1403 return cRefs;
1404}
1405
1406
1407/**
1408 * Releases a given session.
1409 *
1410 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1411 * @param pSession Session data.
1412 */
1413uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1414{
1415 uint32_t cRefs;
1416 AssertPtrReturn(pSession, UINT32_MAX);
1417 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1418
1419 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1420 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1421 if (cRefs == 0)
1422 supdrvDestroySession(pSession->pDevExt, pSession);
1423 return cRefs;
1424}
1425
1426
1427/**
1428 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1429 *
1430 * @returns IPRT status code, see SUPR0ObjAddRef.
1431 * @param hHandleTable The handle table handle. Ignored.
1432 * @param pvObj The object pointer.
1433 * @param pvCtx Context, the handle type. Ignored.
1434 * @param pvUser Session pointer.
1435 */
1436static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1437{
1438 NOREF(pvCtx);
1439 NOREF(hHandleTable);
1440 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1441}
1442
1443
1444/**
1445 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1446 *
1447 * @param hHandleTable The handle table handle. Ignored.
1448 * @param h The handle value. Ignored.
1449 * @param pvObj The object pointer.
1450 * @param pvCtx Context, the handle type. Ignored.
1451 * @param pvUser Session pointer.
1452 */
1453static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1454{
1455 NOREF(pvCtx);
1456 NOREF(h);
1457 NOREF(hHandleTable);
1458 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1459}
1460
1461
1462/**
1463 * Fast path I/O Control worker.
1464 *
1465 * @returns VBox status code that should be passed down to ring-3 unchanged.
1466 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1467 * @param idCpu VMCPU id.
1468 * @param pDevExt Device extention.
1469 * @param pSession Session data.
1470 */
1471int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1472{
1473 /*
1474 * Validate input and check that the VM has a session.
1475 */
1476 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1477 {
1478 PVM pVM = pSession->pSessionVM;
1479 PGVM pGVM = pSession->pSessionGVM;
1480 if (RT_LIKELY( pGVM != NULL
1481 && pVM != NULL
1482 && pVM == pSession->pFastIoCtrlVM))
1483 {
1484 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1485 {
1486 /*
1487 * Make the call.
1488 */
1489 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1490 return VINF_SUCCESS;
1491 }
1492
1493 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1494 }
1495 else
1496 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1497 pGVM, pVM, pSession->pFastIoCtrlVM);
1498 }
1499 else
1500 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1501 return VERR_INTERNAL_ERROR;
1502}
1503
1504
1505/**
1506 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1507 *
1508 * Check if pszStr contains any character of pszChars. We would use strpbrk
1509 * here if this function would be contained in the RedHat kABI white list, see
1510 * http://www.kerneldrivers.org/RHEL5.
1511 *
1512 * @returns true if fine, false if not.
1513 * @param pszName The module name to check.
1514 */
1515static bool supdrvIsLdrModuleNameValid(const char *pszName)
1516{
1517 int chCur;
1518 while ((chCur = *pszName++) != '\0')
1519 {
1520 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1521 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1522 while (offInv-- > 0)
1523 if (s_szInvalidChars[offInv] == chCur)
1524 return false;
1525 }
1526 return true;
1527}
1528
1529
1530
1531/**
1532 * I/O Control inner worker (tracing reasons).
1533 *
1534 * @returns IPRT status code.
1535 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1536 *
1537 * @param uIOCtl Function number.
1538 * @param pDevExt Device extention.
1539 * @param pSession Session data.
1540 * @param pReqHdr The request header.
1541 */
1542static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1543{
1544 /*
1545 * Validation macros
1546 */
1547#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1548 do { \
1549 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1550 { \
1551 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1552 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1553 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1554 } \
1555 } while (0)
1556
1557#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1558
1559#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1560 do { \
1561 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1562 { \
1563 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1564 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1565 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1566 } \
1567 } while (0)
1568
1569#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1570 do { \
1571 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1572 { \
1573 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1574 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1575 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1576 } \
1577 } while (0)
1578
1579#define REQ_CHECK_EXPR(Name, expr) \
1580 do { \
1581 if (RT_UNLIKELY(!(expr))) \
1582 { \
1583 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1584 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1585 } \
1586 } while (0)
1587
1588#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1589 do { \
1590 if (RT_UNLIKELY(!(expr))) \
1591 { \
1592 OSDBGPRINT( fmt ); \
1593 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1594 } \
1595 } while (0)
1596
1597 /*
1598 * The switch.
1599 */
1600 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1601 {
1602 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1603 {
1604 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1605 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1606 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1607 {
1608 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1609 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1610 return 0;
1611 }
1612
1613#if 0
1614 /*
1615 * Call out to the OS specific code and let it do permission checks on the
1616 * client process.
1617 */
1618 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1619 {
1620 pReq->u.Out.u32Cookie = 0xffffffff;
1621 pReq->u.Out.u32SessionCookie = 0xffffffff;
1622 pReq->u.Out.u32SessionVersion = 0xffffffff;
1623 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1624 pReq->u.Out.pSession = NULL;
1625 pReq->u.Out.cFunctions = 0;
1626 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1627 return 0;
1628 }
1629#endif
1630
1631 /*
1632 * Match the version.
1633 * The current logic is very simple, match the major interface version.
1634 */
1635 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1636 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1637 {
1638 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1639 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1640 pReq->u.Out.u32Cookie = 0xffffffff;
1641 pReq->u.Out.u32SessionCookie = 0xffffffff;
1642 pReq->u.Out.u32SessionVersion = 0xffffffff;
1643 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1644 pReq->u.Out.pSession = NULL;
1645 pReq->u.Out.cFunctions = 0;
1646 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1647 return 0;
1648 }
1649
1650 /*
1651 * Fill in return data and be gone.
1652 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1653 * u32SessionVersion <= u32ReqVersion!
1654 */
1655 /** @todo Somehow validate the client and negotiate a secure cookie... */
1656 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1657 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1658 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1659 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1660 pReq->u.Out.pSession = pSession;
1661 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1662 pReq->Hdr.rc = VINF_SUCCESS;
1663 return 0;
1664 }
1665
1666 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1667 {
1668 /* validate */
1669 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1670 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1671
1672 /* execute */
1673 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1674 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1675 pReq->Hdr.rc = VINF_SUCCESS;
1676 return 0;
1677 }
1678
1679 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1680 {
1681 /* validate */
1682 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1683 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1684 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1685 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1686 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1687
1688 /* execute */
1689 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1690 if (RT_FAILURE(pReq->Hdr.rc))
1691 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1692 return 0;
1693 }
1694
1695 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1696 {
1697 /* validate */
1698 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1699 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1700
1701 /* execute */
1702 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1703 return 0;
1704 }
1705
1706 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1707 {
1708 /* validate */
1709 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1710 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1711
1712 /* execute */
1713 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1714 if (RT_FAILURE(pReq->Hdr.rc))
1715 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1716 return 0;
1717 }
1718
1719 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1720 {
1721 /* validate */
1722 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1723 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1724
1725 /* execute */
1726 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1727 return 0;
1728 }
1729
1730 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1731 {
1732 /* validate */
1733 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1734 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1735 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1736 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1737 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1738 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1739 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1740 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1741 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1742 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1743
1744 /* execute */
1745 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1746 return 0;
1747 }
1748
1749 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1750 {
1751 /* validate */
1752 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1753 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1754 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1755 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1756 || ( pReq->u.In.cSymbols <= 16384
1757 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1758 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1759 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1760 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1761 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1762 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1763 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1764 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1765 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1766 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1767 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1768 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1769 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1770 && pReq->u.In.cSegments <= 128
1771 && pReq->u.In.cSegments <= pReq->u.In.cbImageBits / PAGE_SIZE
1772 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1773 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1774 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1775 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1776 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1777
1778 if (pReq->u.In.cSymbols)
1779 {
1780 uint32_t i;
1781 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1782 for (i = 0; i < pReq->u.In.cSymbols; i++)
1783 {
1784 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1785 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1786 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1787 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1788 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1789 pReq->u.In.cbStrTab - paSyms[i].offName),
1790 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1791 }
1792 }
1793 {
1794 uint32_t i;
1795 uint32_t offPrevEnd = 0;
1796 PSUPLDRSEG paSegs = (PSUPLDRSEG)&pReq->u.In.abImage[pReq->u.In.offSegments];
1797 for (i = 0; i < pReq->u.In.cSegments; i++)
1798 {
1799 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1800 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1801 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1802 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1803 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1804 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1805 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1806 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1807 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1808 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1809 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1810 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1811 }
1812 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1813 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1814 }
1815
1816 /* execute */
1817 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1818 return 0;
1819 }
1820
1821 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1822 {
1823 /* validate */
1824 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1825 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1826
1827 /* execute */
1828 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1829 return 0;
1830 }
1831
1832 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1833 {
1834 /* validate */
1835 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1836
1837 /* execute */
1838 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1839 return 0;
1840 }
1841
1842 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1843 {
1844 /* validate */
1845 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1846 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1847 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1848
1849 /* execute */
1850 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1851 return 0;
1852 }
1853
1854 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1855 {
1856 /* validate */
1857 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1858 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1859 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1860
1861 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1862 {
1863 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1864
1865 /* execute */
1866 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1867 {
1868 if (pReq->u.In.pVMR0 == NULL)
1869 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1870 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1871 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1872 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1873 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1874 else
1875 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1876 }
1877 else
1878 pReq->Hdr.rc = VERR_WRONG_ORDER;
1879 }
1880 else
1881 {
1882 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1883 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1884 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1885 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1886 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1887
1888 /* execute */
1889 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1890 {
1891 if (pReq->u.In.pVMR0 == NULL)
1892 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1893 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1894 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1895 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1896 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1897 else
1898 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1899 }
1900 else
1901 pReq->Hdr.rc = VERR_WRONG_ORDER;
1902 }
1903
1904 if ( RT_FAILURE(pReq->Hdr.rc)
1905 && pReq->Hdr.rc != VERR_INTERRUPTED
1906 && pReq->Hdr.rc != VERR_TIMEOUT)
1907 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1908 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1909 else
1910 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1911 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1912 return 0;
1913 }
1914
1915 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1916 {
1917 /* validate */
1918 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1919 PSUPVMMR0REQHDR pVMMReq;
1920 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1921 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1922
1923 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1924 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1925 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1926 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1927 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1928
1929 /* execute */
1930 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1931 {
1932 if (pReq->u.In.pVMR0 == NULL)
1933 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1934 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1935 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1936 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1937 else
1938 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1939 }
1940 else
1941 pReq->Hdr.rc = VERR_WRONG_ORDER;
1942
1943 if ( RT_FAILURE(pReq->Hdr.rc)
1944 && pReq->Hdr.rc != VERR_INTERRUPTED
1945 && pReq->Hdr.rc != VERR_TIMEOUT)
1946 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1947 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1948 else
1949 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1950 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1951 return 0;
1952 }
1953
1954 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1955 {
1956 /* validate */
1957 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1958 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1959
1960 /* execute */
1961 pReq->Hdr.rc = VINF_SUCCESS;
1962 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1963 return 0;
1964 }
1965
1966 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1967 {
1968 /* validate */
1969 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1970 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1971 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1972
1973 /* execute */
1974 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1975 if (RT_FAILURE(pReq->Hdr.rc))
1976 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1977 return 0;
1978 }
1979
1980 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1981 {
1982 /* validate */
1983 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1984 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1985
1986 /* execute */
1987 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1988 return 0;
1989 }
1990
1991 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1992 {
1993 /* validate */
1994 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1995 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1996
1997 /* execute */
1998 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1999 if (RT_SUCCESS(pReq->Hdr.rc))
2000 pReq->u.Out.pGipR0 = pDevExt->pGip;
2001 return 0;
2002 }
2003
2004 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2005 {
2006 /* validate */
2007 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2008 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2009
2010 /* execute */
2011 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2012 return 0;
2013 }
2014
2015 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2016 {
2017 /* validate */
2018 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2019 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2020 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2021 || ( VALID_PTR(pReq->u.In.pVMR0)
2022 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2023 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2024
2025 /* execute */
2026 RTSpinlockAcquire(pDevExt->Spinlock);
2027 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2028 {
2029 if (pSession->pFastIoCtrlVM == NULL)
2030 {
2031 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2032 RTSpinlockRelease(pDevExt->Spinlock);
2033 pReq->Hdr.rc = VINF_SUCCESS;
2034 }
2035 else
2036 {
2037 RTSpinlockRelease(pDevExt->Spinlock);
2038 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2039 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2040 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2041 }
2042 }
2043 else
2044 {
2045 RTSpinlockRelease(pDevExt->Spinlock);
2046 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2047 pSession->pSessionVM, pReq->u.In.pVMR0));
2048 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2049 }
2050 return 0;
2051 }
2052
2053 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2054 {
2055 /* validate */
2056 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2057 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2058 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2059 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2060 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2061 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2062 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2063 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2064 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2065
2066 /* execute */
2067 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2068 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2069 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2070 &pReq->u.Out.aPages[0]);
2071 if (RT_FAILURE(pReq->Hdr.rc))
2072 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2073 return 0;
2074 }
2075
2076 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2077 {
2078 /* validate */
2079 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2080 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2081 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2082 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2083 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2084 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2085
2086 /* execute */
2087 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2088 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2089 if (RT_FAILURE(pReq->Hdr.rc))
2090 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2091 return 0;
2092 }
2093
2094 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2095 {
2096 /* validate */
2097 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2098 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2099 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2100 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2101 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2102 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2103 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2104
2105 /* execute */
2106 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2107 return 0;
2108 }
2109
2110 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2111 {
2112 /* validate */
2113 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2114 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2115
2116 /* execute */
2117 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2118 return 0;
2119 }
2120
2121 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2122 {
2123 /* validate */
2124 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2125 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2126 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2127
2128 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2129 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2130 else
2131 {
2132 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2133 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2134 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2135 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2136 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2137 }
2138 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2139
2140 /* execute */
2141 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2142 return 0;
2143 }
2144
2145 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2146 {
2147 /* validate */
2148 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2149 size_t cbStrTab;
2150 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2151 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2152 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2153 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2154 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2155 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2156 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2157 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2158 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2159 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2160 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2161
2162 /* execute */
2163 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2164 return 0;
2165 }
2166
2167 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2168 {
2169 /* validate */
2170 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2171 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2172 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2173
2174 /* execute */
2175 switch (pReq->u.In.uType)
2176 {
2177 case SUP_SEM_TYPE_EVENT:
2178 {
2179 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2180 switch (pReq->u.In.uOp)
2181 {
2182 case SUPSEMOP2_WAIT_MS_REL:
2183 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2184 break;
2185 case SUPSEMOP2_WAIT_NS_ABS:
2186 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2187 break;
2188 case SUPSEMOP2_WAIT_NS_REL:
2189 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2190 break;
2191 case SUPSEMOP2_SIGNAL:
2192 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2193 break;
2194 case SUPSEMOP2_CLOSE:
2195 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2196 break;
2197 case SUPSEMOP2_RESET:
2198 default:
2199 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2200 break;
2201 }
2202 break;
2203 }
2204
2205 case SUP_SEM_TYPE_EVENT_MULTI:
2206 {
2207 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2208 switch (pReq->u.In.uOp)
2209 {
2210 case SUPSEMOP2_WAIT_MS_REL:
2211 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2212 break;
2213 case SUPSEMOP2_WAIT_NS_ABS:
2214 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2215 break;
2216 case SUPSEMOP2_WAIT_NS_REL:
2217 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2218 break;
2219 case SUPSEMOP2_SIGNAL:
2220 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2221 break;
2222 case SUPSEMOP2_CLOSE:
2223 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2224 break;
2225 case SUPSEMOP2_RESET:
2226 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2227 break;
2228 default:
2229 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2230 break;
2231 }
2232 break;
2233 }
2234
2235 default:
2236 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2237 break;
2238 }
2239 return 0;
2240 }
2241
2242 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2243 {
2244 /* validate */
2245 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2246 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2247 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2248
2249 /* execute */
2250 switch (pReq->u.In.uType)
2251 {
2252 case SUP_SEM_TYPE_EVENT:
2253 {
2254 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2255 switch (pReq->u.In.uOp)
2256 {
2257 case SUPSEMOP3_CREATE:
2258 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2259 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2260 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2261 break;
2262 case SUPSEMOP3_GET_RESOLUTION:
2263 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2264 pReq->Hdr.rc = VINF_SUCCESS;
2265 pReq->Hdr.cbOut = sizeof(*pReq);
2266 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2267 break;
2268 default:
2269 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2270 break;
2271 }
2272 break;
2273 }
2274
2275 case SUP_SEM_TYPE_EVENT_MULTI:
2276 {
2277 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2278 switch (pReq->u.In.uOp)
2279 {
2280 case SUPSEMOP3_CREATE:
2281 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2282 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2283 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2284 break;
2285 case SUPSEMOP3_GET_RESOLUTION:
2286 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2287 pReq->Hdr.rc = VINF_SUCCESS;
2288 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2289 break;
2290 default:
2291 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2292 break;
2293 }
2294 break;
2295 }
2296
2297 default:
2298 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2299 break;
2300 }
2301 return 0;
2302 }
2303
2304 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2305 {
2306 /* validate */
2307 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2308 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2309
2310 /* execute */
2311 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2312 if (RT_FAILURE(pReq->Hdr.rc))
2313 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2314 return 0;
2315 }
2316
2317 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2318 {
2319 /* validate */
2320 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2321 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2322
2323 /* execute */
2324 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2325 return 0;
2326 }
2327
2328 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2329 {
2330 /* validate */
2331 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2332
2333 /* execute */
2334 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2335 return 0;
2336 }
2337
2338 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2339 {
2340 /* validate */
2341 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2342 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2343
2344 /* execute */
2345 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2346 return 0;
2347 }
2348
2349 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2350 {
2351 /* validate */
2352 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2353 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2354 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2355 return VERR_INVALID_PARAMETER;
2356
2357 /* execute */
2358 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2359 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2360 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2361 pReq->u.In.szName, pReq->u.In.fFlags);
2362 return 0;
2363 }
2364
2365 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2366 {
2367 /* validate */
2368 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2369 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2370
2371 /* execute */
2372 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2373 return 0;
2374 }
2375
2376 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2377 {
2378 /* validate */
2379 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2380 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2381
2382 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2383 pReqHdr->rc = VINF_SUCCESS;
2384 return 0;
2385 }
2386
2387 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2388 {
2389 /* validate */
2390 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2391 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2392 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2393 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2394
2395 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2396 return 0;
2397 }
2398
2399 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2400 {
2401 /* validate */
2402 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2403
2404 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2405 return 0;
2406 }
2407
2408 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2409 {
2410 /* validate */
2411 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2412 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2413
2414 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2415 return 0;
2416 }
2417
2418 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2419 {
2420 /* validate */
2421 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2422 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2423
2424 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2425 return 0;
2426 }
2427
2428 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2429 {
2430 /* validate */
2431 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2432 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2433
2434 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2435 return 0;
2436 }
2437
2438 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2439 {
2440 /* validate */
2441 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2442 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2443
2444 /* execute */
2445 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2446 if (RT_FAILURE(pReq->Hdr.rc))
2447 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2448 return 0;
2449 }
2450
2451 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2452 {
2453 /* validate */
2454 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2455 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2456 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2457 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2458 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2459
2460 /* execute */
2461 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2462 if (RT_FAILURE(pReq->Hdr.rc))
2463 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2464 return 0;
2465 }
2466
2467 default:
2468 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2469 break;
2470 }
2471 return VERR_GENERAL_FAILURE;
2472}
2473
2474
2475/**
2476 * I/O Control inner worker for the restricted operations.
2477 *
2478 * @returns IPRT status code.
2479 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2480 *
2481 * @param uIOCtl Function number.
2482 * @param pDevExt Device extention.
2483 * @param pSession Session data.
2484 * @param pReqHdr The request header.
2485 */
2486static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2487{
2488 /*
2489 * The switch.
2490 */
2491 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2492 {
2493 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2494 {
2495 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2496 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2497 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2498 {
2499 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2500 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2501 return 0;
2502 }
2503
2504 /*
2505 * Match the version.
2506 * The current logic is very simple, match the major interface version.
2507 */
2508 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2509 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2510 {
2511 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2512 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2513 pReq->u.Out.u32Cookie = 0xffffffff;
2514 pReq->u.Out.u32SessionCookie = 0xffffffff;
2515 pReq->u.Out.u32SessionVersion = 0xffffffff;
2516 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2517 pReq->u.Out.pSession = NULL;
2518 pReq->u.Out.cFunctions = 0;
2519 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2520 return 0;
2521 }
2522
2523 /*
2524 * Fill in return data and be gone.
2525 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2526 * u32SessionVersion <= u32ReqVersion!
2527 */
2528 /** @todo Somehow validate the client and negotiate a secure cookie... */
2529 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2530 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2531 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2532 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2533 pReq->u.Out.pSession = pSession;
2534 pReq->u.Out.cFunctions = 0;
2535 pReq->Hdr.rc = VINF_SUCCESS;
2536 return 0;
2537 }
2538
2539 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2540 {
2541 /* validate */
2542 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2543 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2544
2545 /* execute */
2546 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2547 if (RT_FAILURE(pReq->Hdr.rc))
2548 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2549 return 0;
2550 }
2551
2552 default:
2553 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2554 break;
2555 }
2556 return VERR_GENERAL_FAILURE;
2557}
2558
2559
2560/**
2561 * I/O Control worker.
2562 *
2563 * @returns IPRT status code.
2564 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2565 *
2566 * @param uIOCtl Function number.
2567 * @param pDevExt Device extention.
2568 * @param pSession Session data.
2569 * @param pReqHdr The request header.
2570 * @param cbReq The size of the request buffer.
2571 */
2572int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2573{
2574 int rc;
2575 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2576
2577 /*
2578 * Validate the request.
2579 */
2580 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2581 {
2582 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2583 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2584 return VERR_INVALID_PARAMETER;
2585 }
2586 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2587 || pReqHdr->cbIn < sizeof(*pReqHdr)
2588 || pReqHdr->cbIn > cbReq
2589 || pReqHdr->cbOut < sizeof(*pReqHdr)
2590 || pReqHdr->cbOut > cbReq))
2591 {
2592 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2593 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2594 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2595 return VERR_INVALID_PARAMETER;
2596 }
2597 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2598 {
2599 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2600 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2601 return VERR_INVALID_PARAMETER;
2602 }
2603 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2604 {
2605 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2606 {
2607 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2608 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2609 return VERR_INVALID_PARAMETER;
2610 }
2611 }
2612 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2613 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2614 {
2615 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2616 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2617 return VERR_INVALID_PARAMETER;
2618 }
2619
2620 /*
2621 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2622 */
2623 if (pSession->fUnrestricted)
2624 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2625 else
2626 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2627
2628 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2629 return rc;
2630}
2631
2632
2633/**
2634 * Inter-Driver Communication (IDC) worker.
2635 *
2636 * @returns VBox status code.
2637 * @retval VINF_SUCCESS on success.
2638 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2639 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2640 *
2641 * @param uReq The request (function) code.
2642 * @param pDevExt Device extention.
2643 * @param pSession Session data.
2644 * @param pReqHdr The request header.
2645 */
2646int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2647{
2648 /*
2649 * The OS specific code has already validated the pSession
2650 * pointer, and the request size being greater or equal to
2651 * size of the header.
2652 *
2653 * So, just check that pSession is a kernel context session.
2654 */
2655 if (RT_UNLIKELY( pSession
2656 && pSession->R0Process != NIL_RTR0PROCESS))
2657 return VERR_INVALID_PARAMETER;
2658
2659/*
2660 * Validation macro.
2661 */
2662#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2663 do { \
2664 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2665 { \
2666 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2667 (long)pReqHdr->cb, (long)(cbExpect))); \
2668 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2669 } \
2670 } while (0)
2671
2672 switch (uReq)
2673 {
2674 case SUPDRV_IDC_REQ_CONNECT:
2675 {
2676 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2677 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2678
2679 /*
2680 * Validate the cookie and other input.
2681 */
2682 if (pReq->Hdr.pSession != NULL)
2683 {
2684 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2685 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2686 }
2687 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2688 {
2689 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2690 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2691 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2692 }
2693 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2694 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2695 {
2696 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2697 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2698 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2699 }
2700 if (pSession != NULL)
2701 {
2702 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2703 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2704 }
2705
2706 /*
2707 * Match the version.
2708 * The current logic is very simple, match the major interface version.
2709 */
2710 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2711 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2712 {
2713 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2714 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2715 pReq->u.Out.pSession = NULL;
2716 pReq->u.Out.uSessionVersion = 0xffffffff;
2717 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2718 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2719 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2720 return VINF_SUCCESS;
2721 }
2722
2723 pReq->u.Out.pSession = NULL;
2724 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2725 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2726 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2727
2728 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2729 if (RT_FAILURE(pReq->Hdr.rc))
2730 {
2731 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2732 return VINF_SUCCESS;
2733 }
2734
2735 pReq->u.Out.pSession = pSession;
2736 pReq->Hdr.pSession = pSession;
2737
2738 return VINF_SUCCESS;
2739 }
2740
2741 case SUPDRV_IDC_REQ_DISCONNECT:
2742 {
2743 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2744
2745 supdrvSessionRelease(pSession);
2746 return pReqHdr->rc = VINF_SUCCESS;
2747 }
2748
2749 case SUPDRV_IDC_REQ_GET_SYMBOL:
2750 {
2751 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2752 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2753
2754 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2755 return VINF_SUCCESS;
2756 }
2757
2758 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2759 {
2760 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2761 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2762
2763 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2764 return VINF_SUCCESS;
2765 }
2766
2767 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2768 {
2769 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2770 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2771
2772 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2773 return VINF_SUCCESS;
2774 }
2775
2776 default:
2777 Log(("Unknown IDC %#lx\n", (long)uReq));
2778 break;
2779 }
2780
2781#undef REQ_CHECK_IDC_SIZE
2782 return VERR_NOT_SUPPORTED;
2783}
2784
2785
2786/**
2787 * Register a object for reference counting.
2788 * The object is registered with one reference in the specified session.
2789 *
2790 * @returns Unique identifier on success (pointer).
2791 * All future reference must use this identifier.
2792 * @returns NULL on failure.
2793 * @param pSession The caller's session.
2794 * @param enmType The object type.
2795 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2796 * @param pvUser1 The first user argument.
2797 * @param pvUser2 The second user argument.
2798 */
2799SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2800{
2801 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2802 PSUPDRVOBJ pObj;
2803 PSUPDRVUSAGE pUsage;
2804
2805 /*
2806 * Validate the input.
2807 */
2808 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2809 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2810 AssertPtrReturn(pfnDestructor, NULL);
2811
2812 /*
2813 * Allocate and initialize the object.
2814 */
2815 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2816 if (!pObj)
2817 return NULL;
2818 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2819 pObj->enmType = enmType;
2820 pObj->pNext = NULL;
2821 pObj->cUsage = 1;
2822 pObj->pfnDestructor = pfnDestructor;
2823 pObj->pvUser1 = pvUser1;
2824 pObj->pvUser2 = pvUser2;
2825 pObj->CreatorUid = pSession->Uid;
2826 pObj->CreatorGid = pSession->Gid;
2827 pObj->CreatorProcess= pSession->Process;
2828 supdrvOSObjInitCreator(pObj, pSession);
2829
2830 /*
2831 * Allocate the usage record.
2832 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2833 */
2834 RTSpinlockAcquire(pDevExt->Spinlock);
2835
2836 pUsage = pDevExt->pUsageFree;
2837 if (pUsage)
2838 pDevExt->pUsageFree = pUsage->pNext;
2839 else
2840 {
2841 RTSpinlockRelease(pDevExt->Spinlock);
2842 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2843 if (!pUsage)
2844 {
2845 RTMemFree(pObj);
2846 return NULL;
2847 }
2848 RTSpinlockAcquire(pDevExt->Spinlock);
2849 }
2850
2851 /*
2852 * Insert the object and create the session usage record.
2853 */
2854 /* The object. */
2855 pObj->pNext = pDevExt->pObjs;
2856 pDevExt->pObjs = pObj;
2857
2858 /* The session record. */
2859 pUsage->cUsage = 1;
2860 pUsage->pObj = pObj;
2861 pUsage->pNext = pSession->pUsage;
2862 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2863 pSession->pUsage = pUsage;
2864
2865 RTSpinlockRelease(pDevExt->Spinlock);
2866
2867 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2868 return pObj;
2869}
2870
2871
2872/**
2873 * Increment the reference counter for the object associating the reference
2874 * with the specified session.
2875 *
2876 * @returns IPRT status code.
2877 * @param pvObj The identifier returned by SUPR0ObjRegister().
2878 * @param pSession The session which is referencing the object.
2879 *
2880 * @remarks The caller should not own any spinlocks and must carefully protect
2881 * itself against potential race with the destructor so freed memory
2882 * isn't accessed here.
2883 */
2884SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2885{
2886 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2887}
2888
2889
2890/**
2891 * Increment the reference counter for the object associating the reference
2892 * with the specified session.
2893 *
2894 * @returns IPRT status code.
2895 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2896 * couldn't be allocated. (If you see this you're not doing the right
2897 * thing and it won't ever work reliably.)
2898 *
2899 * @param pvObj The identifier returned by SUPR0ObjRegister().
2900 * @param pSession The session which is referencing the object.
2901 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2902 * first reference to an object in a session with this
2903 * argument set.
2904 *
2905 * @remarks The caller should not own any spinlocks and must carefully protect
2906 * itself against potential race with the destructor so freed memory
2907 * isn't accessed here.
2908 */
2909SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2910{
2911 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2912 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2913 int rc = VINF_SUCCESS;
2914 PSUPDRVUSAGE pUsagePre;
2915 PSUPDRVUSAGE pUsage;
2916
2917 /*
2918 * Validate the input.
2919 * Be ready for the destruction race (someone might be stuck in the
2920 * destructor waiting a lock we own).
2921 */
2922 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2923 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2924 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2925 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2926 VERR_INVALID_PARAMETER);
2927
2928 RTSpinlockAcquire(pDevExt->Spinlock);
2929
2930 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2931 {
2932 RTSpinlockRelease(pDevExt->Spinlock);
2933
2934 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2935 return VERR_WRONG_ORDER;
2936 }
2937
2938 /*
2939 * Preallocate the usage record if we can.
2940 */
2941 pUsagePre = pDevExt->pUsageFree;
2942 if (pUsagePre)
2943 pDevExt->pUsageFree = pUsagePre->pNext;
2944 else if (!fNoBlocking)
2945 {
2946 RTSpinlockRelease(pDevExt->Spinlock);
2947 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2948 if (!pUsagePre)
2949 return VERR_NO_MEMORY;
2950
2951 RTSpinlockAcquire(pDevExt->Spinlock);
2952 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2953 {
2954 RTSpinlockRelease(pDevExt->Spinlock);
2955
2956 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2957 return VERR_WRONG_ORDER;
2958 }
2959 }
2960
2961 /*
2962 * Reference the object.
2963 */
2964 pObj->cUsage++;
2965
2966 /*
2967 * Look for the session record.
2968 */
2969 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2970 {
2971 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2972 if (pUsage->pObj == pObj)
2973 break;
2974 }
2975 if (pUsage)
2976 pUsage->cUsage++;
2977 else if (pUsagePre)
2978 {
2979 /* create a new session record. */
2980 pUsagePre->cUsage = 1;
2981 pUsagePre->pObj = pObj;
2982 pUsagePre->pNext = pSession->pUsage;
2983 pSession->pUsage = pUsagePre;
2984 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2985
2986 pUsagePre = NULL;
2987 }
2988 else
2989 {
2990 pObj->cUsage--;
2991 rc = VERR_TRY_AGAIN;
2992 }
2993
2994 /*
2995 * Put any unused usage record into the free list..
2996 */
2997 if (pUsagePre)
2998 {
2999 pUsagePre->pNext = pDevExt->pUsageFree;
3000 pDevExt->pUsageFree = pUsagePre;
3001 }
3002
3003 RTSpinlockRelease(pDevExt->Spinlock);
3004
3005 return rc;
3006}
3007
3008
3009/**
3010 * Decrement / destroy a reference counter record for an object.
3011 *
3012 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3013 *
3014 * @returns IPRT status code.
3015 * @retval VINF_SUCCESS if not destroyed.
3016 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3017 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3018 * string builds.
3019 *
3020 * @param pvObj The identifier returned by SUPR0ObjRegister().
3021 * @param pSession The session which is referencing the object.
3022 */
3023SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3024{
3025 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3026 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3027 int rc = VERR_INVALID_PARAMETER;
3028 PSUPDRVUSAGE pUsage;
3029 PSUPDRVUSAGE pUsagePrev;
3030
3031 /*
3032 * Validate the input.
3033 */
3034 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3035 AssertMsgReturn(VALID_PTR(pObj)&& pObj->u32Magic == SUPDRVOBJ_MAGIC,
3036 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3037 VERR_INVALID_PARAMETER);
3038
3039 /*
3040 * Acquire the spinlock and look for the usage record.
3041 */
3042 RTSpinlockAcquire(pDevExt->Spinlock);
3043
3044 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3045 pUsage;
3046 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3047 {
3048 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3049 if (pUsage->pObj == pObj)
3050 {
3051 rc = VINF_SUCCESS;
3052 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3053 if (pUsage->cUsage > 1)
3054 {
3055 pObj->cUsage--;
3056 pUsage->cUsage--;
3057 }
3058 else
3059 {
3060 /*
3061 * Free the session record.
3062 */
3063 if (pUsagePrev)
3064 pUsagePrev->pNext = pUsage->pNext;
3065 else
3066 pSession->pUsage = pUsage->pNext;
3067 pUsage->pNext = pDevExt->pUsageFree;
3068 pDevExt->pUsageFree = pUsage;
3069
3070 /* What about the object? */
3071 if (pObj->cUsage > 1)
3072 pObj->cUsage--;
3073 else
3074 {
3075 /*
3076 * Object is to be destroyed, unlink it.
3077 */
3078 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3079 rc = VINF_OBJECT_DESTROYED;
3080 if (pDevExt->pObjs == pObj)
3081 pDevExt->pObjs = pObj->pNext;
3082 else
3083 {
3084 PSUPDRVOBJ pObjPrev;
3085 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3086 if (pObjPrev->pNext == pObj)
3087 {
3088 pObjPrev->pNext = pObj->pNext;
3089 break;
3090 }
3091 Assert(pObjPrev);
3092 }
3093 }
3094 }
3095 break;
3096 }
3097 }
3098
3099 RTSpinlockRelease(pDevExt->Spinlock);
3100
3101 /*
3102 * Call the destructor and free the object if required.
3103 */
3104 if (rc == VINF_OBJECT_DESTROYED)
3105 {
3106 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3107 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3108 if (pObj->pfnDestructor)
3109 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3110 RTMemFree(pObj);
3111 }
3112
3113 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3114 return rc;
3115}
3116
3117
3118/**
3119 * Verifies that the current process can access the specified object.
3120 *
3121 * @returns The following IPRT status code:
3122 * @retval VINF_SUCCESS if access was granted.
3123 * @retval VERR_PERMISSION_DENIED if denied access.
3124 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3125 *
3126 * @param pvObj The identifier returned by SUPR0ObjRegister().
3127 * @param pSession The session which wishes to access the object.
3128 * @param pszObjName Object string name. This is optional and depends on the object type.
3129 *
3130 * @remark The caller is responsible for making sure the object isn't removed while
3131 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3132 */
3133SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3134{
3135 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3136 int rc;
3137
3138 /*
3139 * Validate the input.
3140 */
3141 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3142 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3143 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3144 VERR_INVALID_PARAMETER);
3145
3146 /*
3147 * Check access. (returns true if a decision has been made.)
3148 */
3149 rc = VERR_INTERNAL_ERROR;
3150 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3151 return rc;
3152
3153 /*
3154 * Default policy is to allow the user to access his own
3155 * stuff but nothing else.
3156 */
3157 if (pObj->CreatorUid == pSession->Uid)
3158 return VINF_SUCCESS;
3159 return VERR_PERMISSION_DENIED;
3160}
3161
3162
3163/**
3164 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3165 *
3166 * @returns The associated VM pointer.
3167 * @param pSession The session of the current thread.
3168 */
3169SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3170{
3171 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3172 return pSession->pSessionVM;
3173}
3174
3175
3176/**
3177 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3178 *
3179 * @returns The associated GVM pointer.
3180 * @param pSession The session of the current thread.
3181 */
3182SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3183{
3184 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3185 return pSession->pSessionGVM;
3186}
3187
3188
3189/**
3190 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3191 *
3192 * This will fail if there is already a VM associated with the session and pVM
3193 * isn't NULL.
3194 *
3195 * @retval VINF_SUCCESS
3196 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3197 * session.
3198 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3199 * the session is invalid.
3200 *
3201 * @param pSession The session of the current thread.
3202 * @param pGVM The GVM to associate with the session. Pass NULL to
3203 * dissassociate.
3204 * @param pVM The VM to associate with the session. Pass NULL to
3205 * dissassociate.
3206 */
3207SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3208{
3209 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3210 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3211
3212 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3213 if (pGVM)
3214 {
3215 if (!pSession->pSessionGVM)
3216 {
3217 pSession->pSessionGVM = pGVM;
3218 pSession->pSessionVM = pVM;
3219 pSession->pFastIoCtrlVM = NULL;
3220 }
3221 else
3222 {
3223 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3224 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3225 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3226 return VERR_ALREADY_EXISTS;
3227 }
3228 }
3229 else
3230 {
3231 pSession->pSessionGVM = NULL;
3232 pSession->pSessionVM = NULL;
3233 pSession->pFastIoCtrlVM = NULL;
3234 }
3235 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3236 return VINF_SUCCESS;
3237}
3238
3239
3240/** @copydoc RTLogGetDefaultInstanceEx
3241 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3242SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3243{
3244 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3245}
3246
3247
3248/** @copydoc RTLogRelGetDefaultInstanceEx
3249 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3250SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3251{
3252 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3253}
3254
3255
3256/**
3257 * Lock pages.
3258 *
3259 * @returns IPRT status code.
3260 * @param pSession Session to which the locked memory should be associated.
3261 * @param pvR3 Start of the memory range to lock.
3262 * This must be page aligned.
3263 * @param cPages Number of pages to lock.
3264 * @param paPages Where to put the physical addresses of locked memory.
3265 */
3266SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3267{
3268 int rc;
3269 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3270 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3271 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3272
3273 /*
3274 * Verify input.
3275 */
3276 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3277 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3278 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3279 || !pvR3)
3280 {
3281 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3282 return VERR_INVALID_PARAMETER;
3283 }
3284
3285 /*
3286 * Let IPRT do the job.
3287 */
3288 Mem.eType = MEMREF_TYPE_LOCKED;
3289 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3290 if (RT_SUCCESS(rc))
3291 {
3292 uint32_t iPage = cPages;
3293 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3294 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3295
3296 while (iPage-- > 0)
3297 {
3298 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3299 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3300 {
3301 AssertMsgFailed(("iPage=%d\n", iPage));
3302 rc = VERR_INTERNAL_ERROR;
3303 break;
3304 }
3305 }
3306 if (RT_SUCCESS(rc))
3307 rc = supdrvMemAdd(&Mem, pSession);
3308 if (RT_FAILURE(rc))
3309 {
3310 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3311 AssertRC(rc2);
3312 }
3313 }
3314
3315 return rc;
3316}
3317
3318
3319/**
3320 * Unlocks the memory pointed to by pv.
3321 *
3322 * @returns IPRT status code.
3323 * @param pSession Session to which the memory was locked.
3324 * @param pvR3 Memory to unlock.
3325 */
3326SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3327{
3328 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3329 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3330 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3331}
3332
3333
3334/**
3335 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3336 * backing.
3337 *
3338 * @returns IPRT status code.
3339 * @param pSession Session data.
3340 * @param cPages Number of pages to allocate.
3341 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3342 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3343 * @param pHCPhys Where to put the physical address of allocated memory.
3344 */
3345SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3346{
3347 int rc;
3348 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3349 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3350
3351 /*
3352 * Validate input.
3353 */
3354 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3355 if (!ppvR3 || !ppvR0 || !pHCPhys)
3356 {
3357 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3358 pSession, ppvR0, ppvR3, pHCPhys));
3359 return VERR_INVALID_PARAMETER;
3360
3361 }
3362 if (cPages < 1 || cPages >= 256)
3363 {
3364 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3365 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3366 }
3367
3368 /*
3369 * Let IPRT do the job.
3370 */
3371 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3372 if (RT_SUCCESS(rc))
3373 {
3374 int rc2;
3375 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3376 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3377 if (RT_SUCCESS(rc))
3378 {
3379 Mem.eType = MEMREF_TYPE_CONT;
3380 rc = supdrvMemAdd(&Mem, pSession);
3381 if (!rc)
3382 {
3383 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3384 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3385 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3386 return 0;
3387 }
3388
3389 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3390 AssertRC(rc2);
3391 }
3392 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3393 AssertRC(rc2);
3394 }
3395
3396 return rc;
3397}
3398
3399
3400/**
3401 * Frees memory allocated using SUPR0ContAlloc().
3402 *
3403 * @returns IPRT status code.
3404 * @param pSession The session to which the memory was allocated.
3405 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3406 */
3407SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3408{
3409 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3410 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3411 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3412}
3413
3414
3415/**
3416 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3417 *
3418 * The memory isn't zeroed.
3419 *
3420 * @returns IPRT status code.
3421 * @param pSession Session data.
3422 * @param cPages Number of pages to allocate.
3423 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3424 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3425 * @param paPages Where to put the physical addresses of allocated memory.
3426 */
3427SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3428{
3429 unsigned iPage;
3430 int rc;
3431 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3432 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3433
3434 /*
3435 * Validate input.
3436 */
3437 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3438 if (!ppvR3 || !ppvR0 || !paPages)
3439 {
3440 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3441 pSession, ppvR3, ppvR0, paPages));
3442 return VERR_INVALID_PARAMETER;
3443
3444 }
3445 if (cPages < 1 || cPages >= 256)
3446 {
3447 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3448 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3449 }
3450
3451 /*
3452 * Let IPRT do the work.
3453 */
3454 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3455 if (RT_SUCCESS(rc))
3456 {
3457 int rc2;
3458 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3459 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3460 if (RT_SUCCESS(rc))
3461 {
3462 Mem.eType = MEMREF_TYPE_LOW;
3463 rc = supdrvMemAdd(&Mem, pSession);
3464 if (!rc)
3465 {
3466 for (iPage = 0; iPage < cPages; iPage++)
3467 {
3468 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3469 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3470 }
3471 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3472 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3473 return 0;
3474 }
3475
3476 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3477 AssertRC(rc2);
3478 }
3479
3480 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3481 AssertRC(rc2);
3482 }
3483
3484 return rc;
3485}
3486
3487
3488/**
3489 * Frees memory allocated using SUPR0LowAlloc().
3490 *
3491 * @returns IPRT status code.
3492 * @param pSession The session to which the memory was allocated.
3493 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3494 */
3495SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3496{
3497 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3498 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3499 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3500}
3501
3502
3503
3504/**
3505 * Allocates a chunk of memory with both R0 and R3 mappings.
3506 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3507 *
3508 * @returns IPRT status code.
3509 * @param pSession The session to associated the allocation with.
3510 * @param cb Number of bytes to allocate.
3511 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3512 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3513 */
3514SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3515{
3516 int rc;
3517 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3518 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3519
3520 /*
3521 * Validate input.
3522 */
3523 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3524 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3525 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3526 if (cb < 1 || cb >= _4M)
3527 {
3528 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3529 return VERR_INVALID_PARAMETER;
3530 }
3531
3532 /*
3533 * Let IPRT do the work.
3534 */
3535 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3536 if (RT_SUCCESS(rc))
3537 {
3538 int rc2;
3539 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3540 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3541 if (RT_SUCCESS(rc))
3542 {
3543 Mem.eType = MEMREF_TYPE_MEM;
3544 rc = supdrvMemAdd(&Mem, pSession);
3545 if (!rc)
3546 {
3547 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3548 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3549 return VINF_SUCCESS;
3550 }
3551
3552 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3553 AssertRC(rc2);
3554 }
3555
3556 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3557 AssertRC(rc2);
3558 }
3559
3560 return rc;
3561}
3562
3563
3564/**
3565 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3566 *
3567 * @returns IPRT status code.
3568 * @param pSession The session to which the memory was allocated.
3569 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3570 * @param paPages Where to store the physical addresses.
3571 */
3572SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3573{
3574 PSUPDRVBUNDLE pBundle;
3575 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3576
3577 /*
3578 * Validate input.
3579 */
3580 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3581 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3582 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3583
3584 /*
3585 * Search for the address.
3586 */
3587 RTSpinlockAcquire(pSession->Spinlock);
3588 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3589 {
3590 if (pBundle->cUsed > 0)
3591 {
3592 unsigned i;
3593 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3594 {
3595 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3596 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3597 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3598 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3599 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3600 )
3601 )
3602 {
3603 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3604 size_t iPage;
3605 for (iPage = 0; iPage < cPages; iPage++)
3606 {
3607 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3608 paPages[iPage].uReserved = 0;
3609 }
3610 RTSpinlockRelease(pSession->Spinlock);
3611 return VINF_SUCCESS;
3612 }
3613 }
3614 }
3615 }
3616 RTSpinlockRelease(pSession->Spinlock);
3617 Log(("Failed to find %p!!!\n", (void *)uPtr));
3618 return VERR_INVALID_PARAMETER;
3619}
3620
3621
3622/**
3623 * Free memory allocated by SUPR0MemAlloc().
3624 *
3625 * @returns IPRT status code.
3626 * @param pSession The session owning the allocation.
3627 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3628 */
3629SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3630{
3631 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3632 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3633 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3634}
3635
3636
3637/**
3638 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3639 *
3640 * The memory is fixed and it's possible to query the physical addresses using
3641 * SUPR0MemGetPhys().
3642 *
3643 * @returns IPRT status code.
3644 * @param pSession The session to associated the allocation with.
3645 * @param cPages The number of pages to allocate.
3646 * @param fFlags Flags, reserved for the future. Must be zero.
3647 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3648 * NULL if no ring-3 mapping.
3649 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3650 * NULL if no ring-0 mapping.
3651 * @param paPages Where to store the addresses of the pages. Optional.
3652 */
3653SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3654{
3655 int rc;
3656 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3657 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3658
3659 /*
3660 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3661 */
3662 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3663 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3664 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3665 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3666 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3667 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3668 {
3669 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3670 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3671 }
3672
3673 /*
3674 * Let IPRT do the work.
3675 */
3676 if (ppvR0)
3677 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3678 else
3679 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3680 if (RT_SUCCESS(rc))
3681 {
3682 int rc2;
3683 if (ppvR3)
3684 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3685 else
3686 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3687 if (RT_SUCCESS(rc))
3688 {
3689 Mem.eType = MEMREF_TYPE_PAGE;
3690 rc = supdrvMemAdd(&Mem, pSession);
3691 if (!rc)
3692 {
3693 if (ppvR3)
3694 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3695 if (ppvR0)
3696 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3697 if (paPages)
3698 {
3699 uint32_t iPage = cPages;
3700 while (iPage-- > 0)
3701 {
3702 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3703 Assert(paPages[iPage] != NIL_RTHCPHYS);
3704 }
3705 }
3706 return VINF_SUCCESS;
3707 }
3708
3709 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3710 AssertRC(rc2);
3711 }
3712
3713 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3714 AssertRC(rc2);
3715 }
3716 return rc;
3717}
3718
3719
3720/**
3721 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3722 * space.
3723 *
3724 * @returns IPRT status code.
3725 * @param pSession The session to associated the allocation with.
3726 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3727 * @param offSub Where to start mapping. Must be page aligned.
3728 * @param cbSub How much to map. Must be page aligned.
3729 * @param fFlags Flags, MBZ.
3730 * @param ppvR0 Where to return the address of the ring-0 mapping on
3731 * success.
3732 */
3733SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3734 uint32_t fFlags, PRTR0PTR ppvR0)
3735{
3736 int rc;
3737 PSUPDRVBUNDLE pBundle;
3738 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3739 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3740
3741 /*
3742 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3743 */
3744 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3745 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3746 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3747 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3748 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3749 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3750
3751 /*
3752 * Find the memory object.
3753 */
3754 RTSpinlockAcquire(pSession->Spinlock);
3755 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3756 {
3757 if (pBundle->cUsed > 0)
3758 {
3759 unsigned i;
3760 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3761 {
3762 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3763 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3764 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3765 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3766 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3767 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3768 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3769 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3770 {
3771 hMemObj = pBundle->aMem[i].MemObj;
3772 break;
3773 }
3774 }
3775 }
3776 }
3777 RTSpinlockRelease(pSession->Spinlock);
3778
3779 rc = VERR_INVALID_PARAMETER;
3780 if (hMemObj != NIL_RTR0MEMOBJ)
3781 {
3782 /*
3783 * Do some further input validations before calling IPRT.
3784 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3785 */
3786 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3787 if ( offSub < cbMemObj
3788 && cbSub <= cbMemObj
3789 && offSub + cbSub <= cbMemObj)
3790 {
3791 RTR0MEMOBJ hMapObj;
3792 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3793 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3794 if (RT_SUCCESS(rc))
3795 *ppvR0 = RTR0MemObjAddress(hMapObj);
3796 }
3797 else
3798 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3799
3800 }
3801 return rc;
3802}
3803
3804
3805/**
3806 * Changes the page level protection of one or more pages previously allocated
3807 * by SUPR0PageAllocEx.
3808 *
3809 * @returns IPRT status code.
3810 * @param pSession The session to associated the allocation with.
3811 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3812 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3813 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3814 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3815 * @param offSub Where to start changing. Must be page aligned.
3816 * @param cbSub How much to change. Must be page aligned.
3817 * @param fProt The new page level protection, see RTMEM_PROT_*.
3818 */
3819SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3820{
3821 int rc;
3822 PSUPDRVBUNDLE pBundle;
3823 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3824 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3825 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3826
3827 /*
3828 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3829 */
3830 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3831 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3832 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3833 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3834 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3835
3836 /*
3837 * Find the memory object.
3838 */
3839 RTSpinlockAcquire(pSession->Spinlock);
3840 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3841 {
3842 if (pBundle->cUsed > 0)
3843 {
3844 unsigned i;
3845 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3846 {
3847 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3848 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3849 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3850 || pvR3 == NIL_RTR3PTR)
3851 && ( pvR0 == NIL_RTR0PTR
3852 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3853 && ( pvR3 == NIL_RTR3PTR
3854 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3855 {
3856 if (pvR0 != NIL_RTR0PTR)
3857 hMemObjR0 = pBundle->aMem[i].MemObj;
3858 if (pvR3 != NIL_RTR3PTR)
3859 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3860 break;
3861 }
3862 }
3863 }
3864 }
3865 RTSpinlockRelease(pSession->Spinlock);
3866
3867 rc = VERR_INVALID_PARAMETER;
3868 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3869 || hMemObjR3 != NIL_RTR0MEMOBJ)
3870 {
3871 /*
3872 * Do some further input validations before calling IPRT.
3873 */
3874 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3875 if ( offSub < cbMemObj
3876 && cbSub <= cbMemObj
3877 && offSub + cbSub <= cbMemObj)
3878 {
3879 rc = VINF_SUCCESS;
3880 if (hMemObjR3 != NIL_RTR0PTR)
3881 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3882 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3883 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3884 }
3885 else
3886 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3887
3888 }
3889 return rc;
3890
3891}
3892
3893
3894/**
3895 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3896 *
3897 * @returns IPRT status code.
3898 * @param pSession The session owning the allocation.
3899 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3900 * SUPR0PageAllocEx().
3901 */
3902SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3903{
3904 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3905 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3906 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3907}
3908
3909
3910/**
3911 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3912 *
3913 * @param pDevExt The device extension.
3914 * @param pszFile The source file where the caller detected the bad
3915 * context.
3916 * @param uLine The line number in @a pszFile.
3917 * @param pszExtra Optional additional message to give further hints.
3918 */
3919void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3920{
3921 uint32_t cCalls;
3922
3923 /*
3924 * Shorten the filename before displaying the message.
3925 */
3926 for (;;)
3927 {
3928 const char *pszTmp = strchr(pszFile, '/');
3929 if (!pszTmp)
3930 pszTmp = strchr(pszFile, '\\');
3931 if (!pszTmp)
3932 break;
3933 pszFile = pszTmp + 1;
3934 }
3935 if (RT_VALID_PTR(pszExtra) && *pszExtra)
3936 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
3937 else
3938 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
3939
3940 /*
3941 * Record the incident so that we stand a chance of blocking I/O controls
3942 * before panicing the system.
3943 */
3944 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
3945 if (cCalls > UINT32_MAX - _1K)
3946 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
3947}
3948
3949
3950/**
3951 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3952 *
3953 * @param pSession The session of the caller.
3954 * @param pszFile The source file where the caller detected the bad
3955 * context.
3956 * @param uLine The line number in @a pszFile.
3957 * @param pszExtra Optional additional message to give further hints.
3958 */
3959SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
3960{
3961 PSUPDRVDEVEXT pDevExt;
3962
3963 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
3964 pDevExt = pSession->pDevExt;
3965
3966 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
3967}
3968
3969
3970/**
3971 * Gets the paging mode of the current CPU.
3972 *
3973 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3974 */
3975SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3976{
3977 SUPPAGINGMODE enmMode;
3978
3979 RTR0UINTREG cr0 = ASMGetCR0();
3980 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3981 enmMode = SUPPAGINGMODE_INVALID;
3982 else
3983 {
3984 RTR0UINTREG cr4 = ASMGetCR4();
3985 uint32_t fNXEPlusLMA = 0;
3986 if (cr4 & X86_CR4_PAE)
3987 {
3988 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
3989 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
3990 {
3991 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3992 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3993 fNXEPlusLMA |= RT_BIT(0);
3994 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3995 fNXEPlusLMA |= RT_BIT(1);
3996 }
3997 }
3998
3999 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4000 {
4001 case 0:
4002 enmMode = SUPPAGINGMODE_32_BIT;
4003 break;
4004
4005 case X86_CR4_PGE:
4006 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4007 break;
4008
4009 case X86_CR4_PAE:
4010 enmMode = SUPPAGINGMODE_PAE;
4011 break;
4012
4013 case X86_CR4_PAE | RT_BIT(0):
4014 enmMode = SUPPAGINGMODE_PAE_NX;
4015 break;
4016
4017 case X86_CR4_PAE | X86_CR4_PGE:
4018 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4019 break;
4020
4021 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4022 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4023 break;
4024
4025 case RT_BIT(1) | X86_CR4_PAE:
4026 enmMode = SUPPAGINGMODE_AMD64;
4027 break;
4028
4029 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4030 enmMode = SUPPAGINGMODE_AMD64_NX;
4031 break;
4032
4033 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4034 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4035 break;
4036
4037 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4038 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4039 break;
4040
4041 default:
4042 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4043 enmMode = SUPPAGINGMODE_INVALID;
4044 break;
4045 }
4046 }
4047 return enmMode;
4048}
4049
4050
4051/**
4052 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4053 *
4054 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4055 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4056 * for code with interrupts enabled.
4057 *
4058 * @returns the old CR4 value.
4059 *
4060 * @param fOrMask bits to be set in CR4.
4061 * @param fAndMask bits to be cleard in CR4.
4062 *
4063 * @remarks Must be called with preemption/interrupts disabled.
4064 */
4065SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4066{
4067#ifdef RT_OS_LINUX
4068 return supdrvOSChangeCR4(fOrMask, fAndMask);
4069#else
4070 RTCCUINTREG uOld = ASMGetCR4();
4071 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4072 if (uNew != uOld)
4073 ASMSetCR4(uNew);
4074 return uOld;
4075#endif
4076}
4077
4078
4079/**
4080 * Enables or disabled hardware virtualization extensions using native OS APIs.
4081 *
4082 * @returns VBox status code.
4083 * @retval VINF_SUCCESS on success.
4084 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4085 *
4086 * @param fEnable Whether to enable or disable.
4087 */
4088SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4089{
4090#ifdef RT_OS_DARWIN
4091 return supdrvOSEnableVTx(fEnable);
4092#else
4093 RT_NOREF1(fEnable);
4094 return VERR_NOT_SUPPORTED;
4095#endif
4096}
4097
4098
4099/**
4100 * Suspends hardware virtualization extensions using the native OS API.
4101 *
4102 * This is called prior to entering raw-mode context.
4103 *
4104 * @returns @c true if suspended, @c false if not.
4105 */
4106SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4107{
4108#ifdef RT_OS_DARWIN
4109 return supdrvOSSuspendVTxOnCpu();
4110#else
4111 return false;
4112#endif
4113}
4114
4115
4116/**
4117 * Resumes hardware virtualization extensions using the native OS API.
4118 *
4119 * This is called after to entering raw-mode context.
4120 *
4121 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4122 */
4123SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4124{
4125#ifdef RT_OS_DARWIN
4126 supdrvOSResumeVTxOnCpu(fSuspended);
4127#else
4128 RT_NOREF1(fSuspended);
4129 Assert(!fSuspended);
4130#endif
4131}
4132
4133
4134SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4135{
4136#ifdef RT_OS_LINUX
4137 return supdrvOSGetCurrentGdtRw(pGdtRw);
4138#else
4139 NOREF(pGdtRw);
4140 return VERR_NOT_IMPLEMENTED;
4141#endif
4142}
4143
4144
4145/**
4146 * Gets AMD-V and VT-x support for the calling CPU.
4147 *
4148 * @returns VBox status code.
4149 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4150 * (SUPVTCAPS_AMD_V) is supported.
4151 */
4152SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4153{
4154 Assert(pfCaps);
4155 *pfCaps = 0;
4156
4157 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4158 if (ASMHasCpuId())
4159 {
4160 /* Check the range of standard CPUID leafs. */
4161 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4162 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4163 if (ASMIsValidStdRange(uMaxLeaf))
4164 {
4165 /* Query the standard CPUID leaf. */
4166 uint32_t fFeatEcx, fFeatEdx, uDummy;
4167 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4168
4169 /* Check if the vendor is Intel (or compatible). */
4170 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4171 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4172 || ASMIsShanghaiCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4173 {
4174 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4175 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4176 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4177 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4178 {
4179 *pfCaps = SUPVTCAPS_VT_X;
4180 return VINF_SUCCESS;
4181 }
4182 return VERR_VMX_NO_VMX;
4183 }
4184
4185 /* Check if the vendor is AMD (or compatible). */
4186 if ( ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4187 || ASMIsHygonCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4188 {
4189 uint32_t fExtFeatEcx, uExtMaxId;
4190 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4191 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4192
4193 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4194 if ( ASMIsValidExtRange(uExtMaxId)
4195 && uExtMaxId >= 0x8000000a
4196 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4197 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4198 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4199 {
4200 *pfCaps = SUPVTCAPS_AMD_V;
4201 return VINF_SUCCESS;
4202 }
4203 return VERR_SVM_NO_SVM;
4204 }
4205 }
4206 }
4207 return VERR_UNSUPPORTED_CPU;
4208}
4209
4210
4211/**
4212 * Checks if Intel VT-x feature is usable on this CPU.
4213 *
4214 * @returns VBox status code.
4215 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4216 * ambiguity that makes us unsure whether we
4217 * really can use VT-x or not.
4218 *
4219 * @remarks Must be called with preemption disabled.
4220 * The caller is also expected to check that the CPU is an Intel (or
4221 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4222 * function might throw a \#GP fault as it tries to read/write MSRs
4223 * that may not be present!
4224 */
4225SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4226{
4227 uint64_t fFeatMsr;
4228 bool fMaybeSmxMode;
4229 bool fMsrLocked;
4230 bool fSmxVmxAllowed;
4231 bool fVmxAllowed;
4232 bool fIsSmxModeAmbiguous;
4233 int rc;
4234
4235 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4236
4237 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4238 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4239 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4240 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4241 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4242 fIsSmxModeAmbiguous = false;
4243 rc = VERR_INTERNAL_ERROR_5;
4244
4245 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4246 if (fMsrLocked)
4247 {
4248 if (fVmxAllowed && fSmxVmxAllowed)
4249 rc = VINF_SUCCESS;
4250 else if (!fVmxAllowed && !fSmxVmxAllowed)
4251 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4252 else if (!fMaybeSmxMode)
4253 {
4254 if (fVmxAllowed)
4255 rc = VINF_SUCCESS;
4256 else
4257 rc = VERR_VMX_MSR_VMX_DISABLED;
4258 }
4259 else
4260 {
4261 /*
4262 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4263 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4264 * See @bugref{6873}.
4265 */
4266 Assert(fMaybeSmxMode == true);
4267 fIsSmxModeAmbiguous = true;
4268 rc = VINF_SUCCESS;
4269 }
4270 }
4271 else
4272 {
4273 /*
4274 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4275 * this MSR can no longer be modified.
4276 *
4277 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4278 * accurately. See @bugref{6873}.
4279 *
4280 * We need to check for SMX hardware support here, before writing the MSR as
4281 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4282 * for it.
4283 */
4284 uint32_t fFeaturesECX, uDummy;
4285#ifdef VBOX_STRICT
4286 /* Callers should have verified these at some point. */
4287 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4288 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4289 Assert(ASMIsValidStdRange(uMaxId));
4290 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4291 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4292 || ASMIsShanghaiCpuEx( uVendorEBX, uVendorECX, uVendorEDX));
4293#endif
4294 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4295 bool fSmxVmxHwSupport = false;
4296 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4297 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4298 fSmxVmxHwSupport = true;
4299
4300 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4301 | MSR_IA32_FEATURE_CONTROL_VMXON;
4302 if (fSmxVmxHwSupport)
4303 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4304
4305 /*
4306 * Commit.
4307 */
4308 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4309
4310 /*
4311 * Verify.
4312 */
4313 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4314 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4315 if (fMsrLocked)
4316 {
4317 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4318 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4319 if ( fVmxAllowed
4320 && ( !fSmxVmxHwSupport
4321 || fSmxVmxAllowed))
4322 rc = VINF_SUCCESS;
4323 else
4324 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4325 }
4326 else
4327 rc = VERR_VMX_MSR_LOCKING_FAILED;
4328 }
4329
4330 if (pfIsSmxModeAmbiguous)
4331 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4332
4333 return rc;
4334}
4335
4336
4337/**
4338 * Checks if AMD-V SVM feature is usable on this CPU.
4339 *
4340 * @returns VBox status code.
4341 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4342 *
4343 * @remarks Must be called with preemption disabled.
4344 */
4345SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4346{
4347 int rc;
4348 uint64_t fVmCr;
4349 uint64_t fEfer;
4350
4351 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4352 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4353 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4354 {
4355 rc = VINF_SUCCESS;
4356 if (fInitSvm)
4357 {
4358 /* Turn on SVM in the EFER MSR. */
4359 fEfer = ASMRdMsr(MSR_K6_EFER);
4360 if (fEfer & MSR_K6_EFER_SVME)
4361 rc = VERR_SVM_IN_USE;
4362 else
4363 {
4364 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4365
4366 /* Paranoia. */
4367 fEfer = ASMRdMsr(MSR_K6_EFER);
4368 if (fEfer & MSR_K6_EFER_SVME)
4369 {
4370 /* Restore previous value. */
4371 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4372 }
4373 else
4374 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4375 }
4376 }
4377 }
4378 else
4379 rc = VERR_SVM_DISABLED;
4380 return rc;
4381}
4382
4383
4384/**
4385 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4386 *
4387 * @returns VBox status code.
4388 * @retval VERR_VMX_NO_VMX
4389 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4390 * @retval VERR_VMX_MSR_VMX_DISABLED
4391 * @retval VERR_VMX_MSR_LOCKING_FAILED
4392 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4393 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4394 * @retval VERR_SVM_NO_SVM
4395 * @retval VERR_SVM_DISABLED
4396 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4397 * (centaur)/Shanghai CPU.
4398 *
4399 * @param pfCaps Where to store the capabilities.
4400 */
4401int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4402{
4403 int rc = VERR_UNSUPPORTED_CPU;
4404 bool fIsSmxModeAmbiguous = false;
4405 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4406
4407 /*
4408 * Input validation.
4409 */
4410 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4411 *pfCaps = 0;
4412
4413 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4414 RTThreadPreemptDisable(&PreemptState);
4415
4416 /* Check if VT-x/AMD-V is supported. */
4417 rc = SUPR0GetVTSupport(pfCaps);
4418 if (RT_SUCCESS(rc))
4419 {
4420 /* Check if VT-x is supported. */
4421 if (*pfCaps & SUPVTCAPS_VT_X)
4422 {
4423 /* Check if VT-x is usable. */
4424 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4425 if (RT_SUCCESS(rc))
4426 {
4427 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4428 VMXCTLSMSR vtCaps;
4429 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4430 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4431 {
4432 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4433 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4434 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4435 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4436 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4437 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4438 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4439 }
4440 }
4441 }
4442 /* Check if AMD-V is supported. */
4443 else if (*pfCaps & SUPVTCAPS_AMD_V)
4444 {
4445 /* Check is SVM is usable. */
4446 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4447 if (RT_SUCCESS(rc))
4448 {
4449 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4450 uint32_t uDummy, fSvmFeatures;
4451 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4452 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4453 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4454 }
4455 }
4456 }
4457
4458 /* Restore preemption. */
4459 RTThreadPreemptRestore(&PreemptState);
4460
4461 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4462 if (fIsSmxModeAmbiguous)
4463 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4464
4465 return rc;
4466}
4467
4468
4469/**
4470 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4471 *
4472 * @returns VBox status code.
4473 * @retval VERR_VMX_NO_VMX
4474 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4475 * @retval VERR_VMX_MSR_VMX_DISABLED
4476 * @retval VERR_VMX_MSR_LOCKING_FAILED
4477 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4478 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4479 * @retval VERR_SVM_NO_SVM
4480 * @retval VERR_SVM_DISABLED
4481 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4482 * (centaur)/Shanghai CPU.
4483 *
4484 * @param pSession The session handle.
4485 * @param pfCaps Where to store the capabilities.
4486 */
4487SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4488{
4489 /*
4490 * Input validation.
4491 */
4492 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4493 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4494
4495 /*
4496 * Call common worker.
4497 */
4498 return supdrvQueryVTCapsInternal(pfCaps);
4499}
4500
4501
4502/**
4503 * Queries the CPU microcode revision.
4504 *
4505 * @returns VBox status code.
4506 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4507 * readable microcode rev.
4508 *
4509 * @param puRevision Where to store the microcode revision.
4510 */
4511static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4512{
4513 int rc = VERR_UNSUPPORTED_CPU;
4514 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4515
4516 /*
4517 * Input validation.
4518 */
4519 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4520
4521 *puRevision = 0;
4522
4523 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4524 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4525 RTThreadPreemptDisable(&PreemptState);
4526
4527 if (ASMHasCpuId())
4528 {
4529 uint32_t uDummy, uTFMSEAX;
4530 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4531
4532 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4533 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4534
4535 if (ASMIsValidStdRange(uMaxId))
4536 {
4537 uint64_t uRevMsr;
4538 if (ASMIsIntelCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4539 {
4540 /* Architectural MSR available on Pentium Pro and later. */
4541 if (ASMGetCpuFamily(uTFMSEAX) >= 6)
4542 {
4543 /* Revision is in the high dword. */
4544 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4545 *puRevision = RT_HIDWORD(uRevMsr);
4546 rc = VINF_SUCCESS;
4547 }
4548 }
4549 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4550 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4551 {
4552 /* Not well documented, but at least all AMD64 CPUs support this. */
4553 if (ASMGetCpuFamily(uTFMSEAX) >= 15)
4554 {
4555 /* Revision is in the low dword. */
4556 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4557 *puRevision = RT_LODWORD(uRevMsr);
4558 rc = VINF_SUCCESS;
4559 }
4560 }
4561 }
4562 }
4563
4564 RTThreadPreemptRestore(&PreemptState);
4565
4566 return rc;
4567}
4568
4569/**
4570 * Queries the CPU microcode revision.
4571 *
4572 * @returns VBox status code.
4573 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4574 * readable microcode rev.
4575 *
4576 * @param pSession The session handle.
4577 * @param puRevision Where to store the microcode revision.
4578 */
4579SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4580{
4581 /*
4582 * Input validation.
4583 */
4584 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4585 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4586
4587 /*
4588 * Call common worker.
4589 */
4590 return supdrvQueryUcodeRev(puRevision);
4591}
4592
4593
4594/**
4595 * Gets hardware-virtualization MSRs of the calling CPU.
4596 *
4597 * @returns VBox status code.
4598 * @param pMsrs Where to store the hardware-virtualization MSRs.
4599 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4600 * to explicitly check for the presence of VT-x/AMD-V before
4601 * querying MSRs.
4602 * @param fForce Force querying of MSRs from the hardware.
4603 */
4604SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4605{
4606 NOREF(fForce);
4607
4608 int rc;
4609 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4610
4611 /*
4612 * Input validation.
4613 */
4614 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4615
4616 /*
4617 * Disable preemption so we make sure we don't migrate CPUs and because
4618 * we access global data.
4619 */
4620 RTThreadPreemptDisable(&PreemptState);
4621
4622 /*
4623 * Query the MSRs from the hardware.
4624 */
4625 /** @todo Cache MSR values so future accesses can avoid querying the hardware as
4626 * it may be expensive (esp. in nested virtualization scenarios). Do this
4627 * with proper locking and race safety. */
4628 SUPHWVIRTMSRS Msrs;
4629 RT_ZERO(Msrs);
4630
4631 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4632 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4633 rc = SUPR0GetVTSupport(&fCaps);
4634 else
4635 rc = VINF_SUCCESS;
4636 if (RT_SUCCESS(rc))
4637 {
4638 if (fCaps & SUPVTCAPS_VT_X)
4639 {
4640 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4641 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4642 Msrs.u.vmx.u64PinCtls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4643 Msrs.u.vmx.u64ProcCtls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4644 Msrs.u.vmx.u64ExitCtls = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4645 Msrs.u.vmx.u64EntryCtls = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4646 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4647 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4648 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4649 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4650 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4651 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4652
4653 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4654 {
4655 Msrs.u.vmx.u64TruePinCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4656 Msrs.u.vmx.u64TrueProcCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4657 Msrs.u.vmx.u64TrueEntryCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4658 Msrs.u.vmx.u64TrueExitCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4659 }
4660
4661 uint32_t const fProcCtlsAllowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls);
4662 if (fProcCtlsAllowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4663 {
4664 Msrs.u.vmx.u64ProcCtls2 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4665
4666 uint32_t const fProcCtls2Allowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls2);
4667 if (fProcCtls2Allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4668 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4669
4670 if (fProcCtls2Allowed1 & VMX_PROC_CTLS2_VMFUNC)
4671 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4672 }
4673 }
4674 else if (fCaps & SUPVTCAPS_AMD_V)
4675 {
4676 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4677 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4678 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4679 }
4680 else
4681 {
4682 RTThreadPreemptRestore(&PreemptState);
4683 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4684 VERR_INTERNAL_ERROR_2);
4685 }
4686
4687 /*
4688 * Copy the MSRs out.
4689 */
4690 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4691 }
4692
4693 RTThreadPreemptRestore(&PreemptState);
4694
4695 return rc;
4696}
4697
4698
4699/**
4700 * Register a component factory with the support driver.
4701 *
4702 * This is currently restricted to kernel sessions only.
4703 *
4704 * @returns VBox status code.
4705 * @retval VINF_SUCCESS on success.
4706 * @retval VERR_NO_MEMORY if we're out of memory.
4707 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4708 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4709 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4710 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4711 *
4712 * @param pSession The SUPDRV session (must be a ring-0 session).
4713 * @param pFactory Pointer to the component factory registration structure.
4714 *
4715 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4716 */
4717SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4718{
4719 PSUPDRVFACTORYREG pNewReg;
4720 const char *psz;
4721 int rc;
4722
4723 /*
4724 * Validate parameters.
4725 */
4726 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4727 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4728 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4729 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4730 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4731 AssertReturn(psz, VERR_INVALID_PARAMETER);
4732
4733 /*
4734 * Allocate and initialize a new registration structure.
4735 */
4736 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4737 if (pNewReg)
4738 {
4739 pNewReg->pNext = NULL;
4740 pNewReg->pFactory = pFactory;
4741 pNewReg->pSession = pSession;
4742 pNewReg->cchName = psz - &pFactory->szName[0];
4743
4744 /*
4745 * Add it to the tail of the list after checking for prior registration.
4746 */
4747 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4748 if (RT_SUCCESS(rc))
4749 {
4750 PSUPDRVFACTORYREG pPrev = NULL;
4751 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4752 while (pCur && pCur->pFactory != pFactory)
4753 {
4754 pPrev = pCur;
4755 pCur = pCur->pNext;
4756 }
4757 if (!pCur)
4758 {
4759 if (pPrev)
4760 pPrev->pNext = pNewReg;
4761 else
4762 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4763 rc = VINF_SUCCESS;
4764 }
4765 else
4766 rc = VERR_ALREADY_EXISTS;
4767
4768 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4769 }
4770
4771 if (RT_FAILURE(rc))
4772 RTMemFree(pNewReg);
4773 }
4774 else
4775 rc = VERR_NO_MEMORY;
4776 return rc;
4777}
4778
4779
4780/**
4781 * Deregister a component factory.
4782 *
4783 * @returns VBox status code.
4784 * @retval VINF_SUCCESS on success.
4785 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4786 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4787 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4788 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4789 *
4790 * @param pSession The SUPDRV session (must be a ring-0 session).
4791 * @param pFactory Pointer to the component factory registration structure
4792 * previously passed SUPR0ComponentRegisterFactory().
4793 *
4794 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4795 */
4796SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4797{
4798 int rc;
4799
4800 /*
4801 * Validate parameters.
4802 */
4803 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4804 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4805 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4806
4807 /*
4808 * Take the lock and look for the registration record.
4809 */
4810 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4811 if (RT_SUCCESS(rc))
4812 {
4813 PSUPDRVFACTORYREG pPrev = NULL;
4814 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4815 while (pCur && pCur->pFactory != pFactory)
4816 {
4817 pPrev = pCur;
4818 pCur = pCur->pNext;
4819 }
4820 if (pCur)
4821 {
4822 if (!pPrev)
4823 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4824 else
4825 pPrev->pNext = pCur->pNext;
4826
4827 pCur->pNext = NULL;
4828 pCur->pFactory = NULL;
4829 pCur->pSession = NULL;
4830 rc = VINF_SUCCESS;
4831 }
4832 else
4833 rc = VERR_NOT_FOUND;
4834
4835 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4836
4837 RTMemFree(pCur);
4838 }
4839 return rc;
4840}
4841
4842
4843/**
4844 * Queries a component factory.
4845 *
4846 * @returns VBox status code.
4847 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4848 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4849 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4850 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4851 *
4852 * @param pSession The SUPDRV session.
4853 * @param pszName The name of the component factory.
4854 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4855 * @param ppvFactoryIf Where to store the factory interface.
4856 */
4857SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4858{
4859 const char *pszEnd;
4860 size_t cchName;
4861 int rc;
4862
4863 /*
4864 * Validate parameters.
4865 */
4866 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4867
4868 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4869 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4870 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4871 cchName = pszEnd - pszName;
4872
4873 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4874 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4875 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4876
4877 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4878 *ppvFactoryIf = NULL;
4879
4880 /*
4881 * Take the lock and try all factories by this name.
4882 */
4883 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4884 if (RT_SUCCESS(rc))
4885 {
4886 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4887 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4888 while (pCur)
4889 {
4890 if ( pCur->cchName == cchName
4891 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4892 {
4893 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4894 if (pvFactory)
4895 {
4896 *ppvFactoryIf = pvFactory;
4897 rc = VINF_SUCCESS;
4898 break;
4899 }
4900 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4901 }
4902
4903 /* next */
4904 pCur = pCur->pNext;
4905 }
4906
4907 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4908 }
4909 return rc;
4910}
4911
4912
4913/**
4914 * Adds a memory object to the session.
4915 *
4916 * @returns IPRT status code.
4917 * @param pMem Memory tracking structure containing the
4918 * information to track.
4919 * @param pSession The session.
4920 */
4921static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
4922{
4923 PSUPDRVBUNDLE pBundle;
4924
4925 /*
4926 * Find free entry and record the allocation.
4927 */
4928 RTSpinlockAcquire(pSession->Spinlock);
4929 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4930 {
4931 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
4932 {
4933 unsigned i;
4934 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4935 {
4936 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
4937 {
4938 pBundle->cUsed++;
4939 pBundle->aMem[i] = *pMem;
4940 RTSpinlockRelease(pSession->Spinlock);
4941 return VINF_SUCCESS;
4942 }
4943 }
4944 AssertFailed(); /* !!this can't be happening!!! */
4945 }
4946 }
4947 RTSpinlockRelease(pSession->Spinlock);
4948
4949 /*
4950 * Need to allocate a new bundle.
4951 * Insert into the last entry in the bundle.
4952 */
4953 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
4954 if (!pBundle)
4955 return VERR_NO_MEMORY;
4956
4957 /* take last entry. */
4958 pBundle->cUsed++;
4959 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
4960
4961 /* insert into list. */
4962 RTSpinlockAcquire(pSession->Spinlock);
4963 pBundle->pNext = pSession->Bundle.pNext;
4964 pSession->Bundle.pNext = pBundle;
4965 RTSpinlockRelease(pSession->Spinlock);
4966
4967 return VINF_SUCCESS;
4968}
4969
4970
4971/**
4972 * Releases a memory object referenced by pointer and type.
4973 *
4974 * @returns IPRT status code.
4975 * @param pSession Session data.
4976 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
4977 * @param eType Memory type.
4978 */
4979static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
4980{
4981 PSUPDRVBUNDLE pBundle;
4982
4983 /*
4984 * Validate input.
4985 */
4986 if (!uPtr)
4987 {
4988 Log(("Illegal address %p\n", (void *)uPtr));
4989 return VERR_INVALID_PARAMETER;
4990 }
4991
4992 /*
4993 * Search for the address.
4994 */
4995 RTSpinlockAcquire(pSession->Spinlock);
4996 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4997 {
4998 if (pBundle->cUsed > 0)
4999 {
5000 unsigned i;
5001 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5002 {
5003 if ( pBundle->aMem[i].eType == eType
5004 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5005 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5006 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5007 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5008 )
5009 {
5010 /* Make a copy of it and release it outside the spinlock. */
5011 SUPDRVMEMREF Mem = pBundle->aMem[i];
5012 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5013 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5014 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5015 RTSpinlockRelease(pSession->Spinlock);
5016
5017 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5018 {
5019 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5020 AssertRC(rc); /** @todo figure out how to handle this. */
5021 }
5022 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5023 {
5024 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5025 AssertRC(rc); /** @todo figure out how to handle this. */
5026 }
5027 return VINF_SUCCESS;
5028 }
5029 }
5030 }
5031 }
5032 RTSpinlockRelease(pSession->Spinlock);
5033 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5034 return VERR_INVALID_PARAMETER;
5035}
5036
5037
5038/**
5039 * Opens an image. If it's the first time it's opened the call must upload
5040 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5041 *
5042 * This is the 1st step of the loading.
5043 *
5044 * @returns IPRT status code.
5045 * @param pDevExt Device globals.
5046 * @param pSession Session data.
5047 * @param pReq The open request.
5048 */
5049static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5050{
5051 int rc;
5052 PSUPDRVLDRIMAGE pImage;
5053 void *pv;
5054 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5055 SUPDRV_CHECK_SMAP_SETUP();
5056 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5057 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5058
5059 /*
5060 * Check if we got an instance of the image already.
5061 */
5062 supdrvLdrLock(pDevExt);
5063 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5064 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5065 {
5066 if ( pImage->szName[cchName] == '\0'
5067 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5068 {
5069 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
5070 {
5071 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5072 * that indicates that the images are different. */
5073 pImage->cUsage++;
5074 pReq->u.Out.pvImageBase = pImage->pvImage;
5075 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5076 pReq->u.Out.fNativeLoader = pImage->fNative;
5077 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5078 supdrvLdrUnlock(pDevExt);
5079 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5080 return VINF_SUCCESS;
5081 }
5082 supdrvLdrUnlock(pDevExt);
5083 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5084 return VERR_TOO_MANY_REFERENCES;
5085 }
5086 }
5087 /* (not found - add it!) */
5088
5089 /* If the loader interface is locked down, make userland fail early */
5090 if (pDevExt->fLdrLockedDown)
5091 {
5092 supdrvLdrUnlock(pDevExt);
5093 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5094 return VERR_PERMISSION_DENIED;
5095 }
5096
5097 /*
5098 * Allocate memory.
5099 */
5100 Assert(cchName < sizeof(pImage->szName));
5101 pv = RTMemAlloc(sizeof(SUPDRVLDRIMAGE));
5102 if (!pv)
5103 {
5104 supdrvLdrUnlock(pDevExt);
5105 Log(("supdrvIOCtl_LdrOpen: RTMemAlloc() failed\n"));
5106 return /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_2;
5107 }
5108 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5109
5110 /*
5111 * Setup and link in the LDR stuff.
5112 */
5113 pImage = (PSUPDRVLDRIMAGE)pv;
5114 pImage->pvImage = NULL;
5115#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5116 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5117#else
5118 pImage->pvImageAlloc = NULL;
5119#endif
5120 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5121 pImage->cbImageBits = pReq->u.In.cbImageBits;
5122 pImage->cSymbols = 0;
5123 pImage->paSymbols = NULL;
5124 pImage->pachStrTab = NULL;
5125 pImage->cbStrTab = 0;
5126 pImage->cSegments = 0;
5127 pImage->paSegments = NULL;
5128 pImage->pfnModuleInit = NULL;
5129 pImage->pfnModuleTerm = NULL;
5130 pImage->pfnServiceReqHandler = NULL;
5131 pImage->uState = SUP_IOCTL_LDR_OPEN;
5132 pImage->cUsage = 1;
5133 pImage->pDevExt = pDevExt;
5134 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5135 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5136
5137 /*
5138 * Try load it using the native loader, if that isn't supported, fall back
5139 * on the older method.
5140 */
5141 pImage->fNative = true;
5142 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5143 if (rc == VERR_NOT_SUPPORTED)
5144 {
5145#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5146 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5147 if (RT_SUCCESS(rc))
5148 {
5149 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5150 pImage->fNative = false;
5151 }
5152#else
5153 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
5154 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
5155 pImage->fNative = false;
5156 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
5157#endif
5158 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5159 }
5160 if (RT_FAILURE(rc))
5161 {
5162 supdrvLdrUnlock(pDevExt);
5163 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5164 RTMemFree(pImage);
5165 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5166 return rc;
5167 }
5168 Assert(VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5169
5170 /*
5171 * Link it.
5172 */
5173 pImage->pNext = pDevExt->pLdrImages;
5174 pDevExt->pLdrImages = pImage;
5175
5176 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5177
5178 pReq->u.Out.pvImageBase = pImage->pvImage;
5179 pReq->u.Out.fNeedsLoading = true;
5180 pReq->u.Out.fNativeLoader = pImage->fNative;
5181 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5182
5183 supdrvLdrUnlock(pDevExt);
5184 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5185 return VINF_SUCCESS;
5186}
5187
5188
5189/**
5190 * Formats a load error message.
5191 *
5192 * @returns @a rc
5193 * @param rc Return code.
5194 * @param pReq The request.
5195 * @param pszFormat The error message format string.
5196 * @param ... Argument to the format string.
5197 */
5198int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5199{
5200 va_list va;
5201 va_start(va, pszFormat);
5202 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5203 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5204 va_end(va);
5205 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5206 return rc;
5207}
5208
5209
5210/**
5211 * Worker that validates a pointer to an image entrypoint.
5212 *
5213 * Calls supdrvLdrLoadError on error.
5214 *
5215 * @returns IPRT status code.
5216 * @param pDevExt The device globals.
5217 * @param pImage The loader image.
5218 * @param pv The pointer into the image.
5219 * @param fMayBeNull Whether it may be NULL.
5220 * @param pszSymbol The entrypoint name or log name. If the symbol is
5221 * capitalized it signifies a specific symbol, otherwise it
5222 * for logging.
5223 * @param pbImageBits The image bits prepared by ring-3.
5224 * @param pReq The request for passing to supdrvLdrLoadError.
5225 *
5226 * @note Will leave the loader lock on failure!
5227 */
5228static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5229 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5230{
5231 if (!fMayBeNull || pv)
5232 {
5233 uint32_t iSeg;
5234
5235 /* Must be within the image bits: */
5236 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5237 if (uRva >= pImage->cbImageBits)
5238 {
5239 supdrvLdrUnlock(pDevExt);
5240 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5241 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5242 pv, pszSymbol, uRva, pImage->cbImageBits);
5243 }
5244
5245 /* Must be in an executable segment: */
5246 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5247 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5248 {
5249 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5250 break;
5251 supdrvLdrUnlock(pDevExt);
5252 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5253 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5254 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5255 pImage->paSegments[iSeg].fProt);
5256 }
5257 if (iSeg >= pImage->cSegments)
5258 {
5259 supdrvLdrUnlock(pDevExt);
5260 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5261 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5262 pv, pszSymbol, uRva);
5263 }
5264
5265 if (pImage->fNative)
5266 {
5267 /** @todo pass pReq along to the native code. */
5268 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5269 if (RT_FAILURE(rc))
5270 {
5271 supdrvLdrUnlock(pDevExt);
5272 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5273 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5274 }
5275 }
5276 }
5277 return VINF_SUCCESS;
5278}
5279
5280
5281/**
5282 * Loads the image bits.
5283 *
5284 * This is the 2nd step of the loading.
5285 *
5286 * @returns IPRT status code.
5287 * @param pDevExt Device globals.
5288 * @param pSession Session data.
5289 * @param pReq The request.
5290 */
5291static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5292{
5293 PSUPDRVLDRUSAGE pUsage;
5294 PSUPDRVLDRIMAGE pImage;
5295 int rc;
5296 SUPDRV_CHECK_SMAP_SETUP();
5297 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5298 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5299
5300 /*
5301 * Find the ldr image.
5302 */
5303 supdrvLdrLock(pDevExt);
5304 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5305
5306 pUsage = pSession->pLdrUsage;
5307 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5308 pUsage = pUsage->pNext;
5309 if (!pUsage)
5310 {
5311 supdrvLdrUnlock(pDevExt);
5312 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5313 }
5314 pImage = pUsage->pImage;
5315
5316 /*
5317 * Validate input.
5318 */
5319 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5320 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5321 {
5322 supdrvLdrUnlock(pDevExt);
5323 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5324 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5325 }
5326
5327 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5328 {
5329 unsigned uState = pImage->uState;
5330 supdrvLdrUnlock(pDevExt);
5331 if (uState != SUP_IOCTL_LDR_LOAD)
5332 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5333 pReq->u.Out.uErrorMagic = 0;
5334 return VERR_ALREADY_LOADED;
5335 }
5336
5337 /* If the loader interface is locked down, don't load new images */
5338 if (pDevExt->fLdrLockedDown)
5339 {
5340 supdrvLdrUnlock(pDevExt);
5341 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5342 }
5343
5344 /*
5345 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5346 */
5347 pImage->cSegments = pReq->u.In.cSegments;
5348 {
5349 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5350 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
5351 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5352 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5353 else
5354 {
5355 supdrvLdrUnlock(pDevExt);
5356 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5357 }
5358 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5359 }
5360
5361 /*
5362 * Validate entrypoints.
5363 */
5364 switch (pReq->u.In.eEPType)
5365 {
5366 case SUPLDRLOADEP_NOTHING:
5367 break;
5368
5369 case SUPLDRLOADEP_VMMR0:
5370 if (pReq->u.In.EP.VMMR0.pvVMMR0 != pImage->pvImage)
5371 {
5372 supdrvLdrUnlock(pDevExt);
5373 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid pvVMMR0 pointer: %p, expected %p", pReq->u.In.EP.VMMR0.pvVMMR0, pImage->pvImage);
5374 }
5375 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5376 if (RT_FAILURE(rc))
5377 return rc;
5378 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5379 if (RT_FAILURE(rc))
5380 return rc;
5381 break;
5382
5383 case SUPLDRLOADEP_SERVICE:
5384 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5385 if (RT_FAILURE(rc))
5386 return rc;
5387 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5388 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5389 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5390 {
5391 supdrvLdrUnlock(pDevExt);
5392 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5393 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5394 pReq->u.In.EP.Service.apvReserved[2]);
5395 }
5396 break;
5397
5398 default:
5399 supdrvLdrUnlock(pDevExt);
5400 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5401 }
5402
5403 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5404 if (RT_FAILURE(rc))
5405 return rc;
5406 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5407 if (RT_FAILURE(rc))
5408 return rc;
5409 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5410
5411 /*
5412 * Allocate and copy the tables if non-native.
5413 * (No need to do try/except as this is a buffered request.)
5414 */
5415 if (!pImage->fNative)
5416 {
5417 pImage->cbStrTab = pReq->u.In.cbStrTab;
5418 if (pImage->cbStrTab)
5419 {
5420 pImage->pachStrTab = (char *)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5421 if (!pImage->pachStrTab)
5422 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5423 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5424 }
5425
5426 pImage->cSymbols = pReq->u.In.cSymbols;
5427 if (RT_SUCCESS(rc) && pImage->cSymbols)
5428 {
5429 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5430 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
5431 if (!pImage->paSymbols)
5432 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5433 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5434 }
5435 }
5436
5437 /*
5438 * Copy the bits and apply permissions / complete native loading.
5439 */
5440 if (RT_SUCCESS(rc))
5441 {
5442 pImage->uState = SUP_IOCTL_LDR_LOAD;
5443 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5444 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5445
5446 if (pImage->fNative)
5447 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5448 else
5449 {
5450#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5451 uint32_t i;
5452 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5453
5454 for (i = 0; i < pImage->cSegments; i++)
5455 {
5456 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5457 pImage->paSegments[i].fProt);
5458 if (RT_SUCCESS(rc))
5459 continue;
5460 if (rc == VERR_NOT_SUPPORTED)
5461 rc = VINF_SUCCESS;
5462 else
5463 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5464 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5465 break;
5466 }
5467#else
5468 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5469#endif
5470 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5471 }
5472 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5473 }
5474
5475 /*
5476 * Update any entry points.
5477 */
5478 if (RT_SUCCESS(rc))
5479 {
5480 switch (pReq->u.In.eEPType)
5481 {
5482 default:
5483 case SUPLDRLOADEP_NOTHING:
5484 rc = VINF_SUCCESS;
5485 break;
5486 case SUPLDRLOADEP_VMMR0:
5487 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0,
5488 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5489 break;
5490 case SUPLDRLOADEP_SERVICE:
5491 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5492 rc = VINF_SUCCESS;
5493 break;
5494 }
5495 }
5496
5497 /*
5498 * On success call the module initialization.
5499 */
5500 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5501 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5502 {
5503 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5504 pDevExt->pLdrInitImage = pImage;
5505 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5506 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5507 rc = pImage->pfnModuleInit(pImage);
5508 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5509 pDevExt->pLdrInitImage = NULL;
5510 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5511 if (RT_FAILURE(rc))
5512 {
5513 if (pDevExt->pvVMMR0 == pImage->pvImage)
5514 supdrvLdrUnsetVMMR0EPs(pDevExt);
5515 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5516 }
5517 }
5518 if (RT_SUCCESS(rc))
5519 {
5520 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5521 pReq->u.Out.uErrorMagic = 0;
5522 pReq->u.Out.szError[0] = '\0';
5523 }
5524 else
5525 {
5526 /* Inform the tracing component in case ModuleInit registered TPs. */
5527 supdrvTracerModuleUnloading(pDevExt, pImage);
5528
5529 pImage->uState = SUP_IOCTL_LDR_OPEN;
5530 pImage->pfnModuleInit = NULL;
5531 pImage->pfnModuleTerm = NULL;
5532 pImage->pfnServiceReqHandler= NULL;
5533 pImage->cbStrTab = 0;
5534 RTMemFree(pImage->pachStrTab);
5535 pImage->pachStrTab = NULL;
5536 RTMemFree(pImage->paSymbols);
5537 pImage->paSymbols = NULL;
5538 pImage->cSymbols = 0;
5539 }
5540
5541 supdrvLdrUnlock(pDevExt);
5542 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5543 return rc;
5544}
5545
5546
5547/**
5548 * Frees a previously loaded (prep'ed) image.
5549 *
5550 * @returns IPRT status code.
5551 * @param pDevExt Device globals.
5552 * @param pSession Session data.
5553 * @param pReq The request.
5554 */
5555static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
5556{
5557 int rc;
5558 PSUPDRVLDRUSAGE pUsagePrev;
5559 PSUPDRVLDRUSAGE pUsage;
5560 PSUPDRVLDRIMAGE pImage;
5561 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
5562
5563 /*
5564 * Find the ldr image.
5565 */
5566 supdrvLdrLock(pDevExt);
5567 pUsagePrev = NULL;
5568 pUsage = pSession->pLdrUsage;
5569 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5570 {
5571 pUsagePrev = pUsage;
5572 pUsage = pUsage->pNext;
5573 }
5574 if (!pUsage)
5575 {
5576 supdrvLdrUnlock(pDevExt);
5577 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
5578 return VERR_INVALID_HANDLE;
5579 }
5580 if (pUsage->cRing3Usage == 0)
5581 {
5582 supdrvLdrUnlock(pDevExt);
5583 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
5584 return VERR_CALLER_NO_REFERENCE;
5585 }
5586
5587 /*
5588 * Check if we can remove anything.
5589 */
5590 rc = VINF_SUCCESS;
5591 pImage = pUsage->pImage;
5592 if (pImage->cUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
5593 {
5594 /*
5595 * Check if there are any objects with destructors in the image, if
5596 * so leave it for the session cleanup routine so we get a chance to
5597 * clean things up in the right order and not leave them all dangling.
5598 */
5599 RTSpinlockAcquire(pDevExt->Spinlock);
5600 if (pImage->cUsage <= 1)
5601 {
5602 PSUPDRVOBJ pObj;
5603 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5604 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5605 {
5606 rc = VERR_DANGLING_OBJECTS;
5607 break;
5608 }
5609 }
5610 else
5611 {
5612 PSUPDRVUSAGE pGenUsage;
5613 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5614 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5615 {
5616 rc = VERR_DANGLING_OBJECTS;
5617 break;
5618 }
5619 }
5620 RTSpinlockRelease(pDevExt->Spinlock);
5621 if (rc == VINF_SUCCESS)
5622 {
5623 /* unlink it */
5624 if (pUsagePrev)
5625 pUsagePrev->pNext = pUsage->pNext;
5626 else
5627 pSession->pLdrUsage = pUsage->pNext;
5628
5629 /* free it */
5630 pUsage->pImage = NULL;
5631 pUsage->pNext = NULL;
5632 RTMemFree(pUsage);
5633
5634 /*
5635 * Dereference the image.
5636 */
5637 if (pImage->cUsage <= 1)
5638 supdrvLdrFree(pDevExt, pImage);
5639 else
5640 pImage->cUsage--;
5641 }
5642 else
5643 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
5644 }
5645 else
5646 {
5647 /*
5648 * Dereference both image and usage.
5649 */
5650 pImage->cUsage--;
5651 pUsage->cRing3Usage--;
5652 }
5653
5654 supdrvLdrUnlock(pDevExt);
5655 return rc;
5656}
5657
5658
5659/**
5660 * Lock down the image loader interface.
5661 *
5662 * @returns IPRT status code.
5663 * @param pDevExt Device globals.
5664 */
5665static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
5666{
5667 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
5668
5669 supdrvLdrLock(pDevExt);
5670 if (!pDevExt->fLdrLockedDown)
5671 {
5672 pDevExt->fLdrLockedDown = true;
5673 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
5674 }
5675 supdrvLdrUnlock(pDevExt);
5676
5677 return VINF_SUCCESS;
5678}
5679
5680
5681/**
5682 * Queries the address of a symbol in an open image.
5683 *
5684 * @returns IPRT status code.
5685 * @param pDevExt Device globals.
5686 * @param pSession Session data.
5687 * @param pReq The request buffer.
5688 */
5689static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
5690{
5691 PSUPDRVLDRIMAGE pImage;
5692 PSUPDRVLDRUSAGE pUsage;
5693 uint32_t i;
5694 PSUPLDRSYM paSyms;
5695 const char *pchStrings;
5696 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
5697 void *pvSymbol = NULL;
5698 int rc = VERR_SYMBOL_NOT_FOUND;
5699 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
5700
5701 /*
5702 * Find the ldr image.
5703 */
5704 supdrvLdrLock(pDevExt);
5705 pUsage = pSession->pLdrUsage;
5706 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5707 pUsage = pUsage->pNext;
5708 if (!pUsage)
5709 {
5710 supdrvLdrUnlock(pDevExt);
5711 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
5712 return VERR_INVALID_HANDLE;
5713 }
5714 pImage = pUsage->pImage;
5715 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
5716 {
5717 unsigned uState = pImage->uState;
5718 supdrvLdrUnlock(pDevExt);
5719 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
5720 return VERR_ALREADY_LOADED;
5721 }
5722
5723 /*
5724 * Search the image exports / symbol strings.
5725 *
5726 * Note! The int32_t is for native loading on solaris where the data
5727 * and text segments are in very different places.
5728 */
5729 if (pImage->fNative)
5730 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pReq->u.In.szSymbol, cbSymbol - 1, &pvSymbol);
5731 else
5732 {
5733 pchStrings = pImage->pachStrTab;
5734 paSyms = pImage->paSymbols;
5735 for (i = 0; i < pImage->cSymbols; i++)
5736 {
5737 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5738 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
5739 {
5740 pvSymbol = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
5741 rc = VINF_SUCCESS;
5742 break;
5743 }
5744 }
5745 }
5746 supdrvLdrUnlock(pDevExt);
5747 pReq->u.Out.pvSymbol = pvSymbol;
5748 return rc;
5749}
5750
5751
5752/**
5753 * Gets the address of a symbol in an open image or the support driver.
5754 *
5755 * @returns VINF_SUCCESS on success.
5756 * @returns
5757 * @param pDevExt Device globals.
5758 * @param pSession Session data.
5759 * @param pReq The request buffer.
5760 */
5761static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
5762{
5763 int rc = VINF_SUCCESS;
5764 const char *pszSymbol = pReq->u.In.pszSymbol;
5765 const char *pszModule = pReq->u.In.pszModule;
5766 size_t cbSymbol;
5767 char const *pszEnd;
5768 uint32_t i;
5769
5770 /*
5771 * Input validation.
5772 */
5773 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
5774 pszEnd = RTStrEnd(pszSymbol, 512);
5775 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5776 cbSymbol = pszEnd - pszSymbol + 1;
5777
5778 if (pszModule)
5779 {
5780 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
5781 pszEnd = RTStrEnd(pszModule, 64);
5782 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5783 }
5784 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
5785
5786
5787 if ( !pszModule
5788 || !strcmp(pszModule, "SupDrv"))
5789 {
5790 /*
5791 * Search the support driver export table.
5792 */
5793 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5794 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5795 {
5796 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
5797 break;
5798 }
5799 }
5800 else
5801 {
5802 /*
5803 * Find the loader image.
5804 */
5805 PSUPDRVLDRIMAGE pImage;
5806
5807 supdrvLdrLock(pDevExt);
5808
5809 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5810 if (!strcmp(pImage->szName, pszModule))
5811 break;
5812 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
5813 {
5814 /*
5815 * Search the image exports / symbol strings.
5816 */
5817 if (pImage->fNative)
5818 {
5819 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cbSymbol - 1, (void **)&pReq->u.Out.pfnSymbol);
5820 if (RT_SUCCESS(rc))
5821 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5822 }
5823 else
5824 {
5825 const char *pchStrings = pImage->pachStrTab;
5826 PCSUPLDRSYM paSyms = pImage->paSymbols;
5827 rc = VERR_SYMBOL_NOT_FOUND;
5828 for (i = 0; i < pImage->cSymbols; i++)
5829 {
5830 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5831 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
5832 {
5833 /*
5834 * Found it! Calc the symbol address and add a reference to the module.
5835 */
5836 pReq->u.Out.pfnSymbol = (PFNRT)((uintptr_t)pImage->pvImage + (int32_t)paSyms[i].offSymbol);
5837 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5838 break;
5839 }
5840 }
5841 }
5842 }
5843 else
5844 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
5845
5846 supdrvLdrUnlock(pDevExt);
5847 }
5848 return rc;
5849}
5850
5851
5852/**
5853 * Looks up a symbol in g_aFunctions
5854 *
5855 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
5856 * @param pszSymbol The symbol to look up.
5857 * @param puValue Where to return the value.
5858 */
5859int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
5860{
5861 uint32_t i;
5862 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5863 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5864 {
5865 *puValue = (uintptr_t)g_aFunctions[i].pfn;
5866 return VINF_SUCCESS;
5867 }
5868
5869 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
5870 {
5871 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
5872 return VINF_SUCCESS;
5873 }
5874
5875 return VERR_SYMBOL_NOT_FOUND;
5876}
5877
5878
5879/**
5880 * Updates the VMMR0 entry point pointers.
5881 *
5882 * @returns IPRT status code.
5883 * @param pDevExt Device globals.
5884 * @param pvVMMR0 VMMR0 image handle.
5885 * @param pvVMMR0EntryFast VMMR0EntryFast address.
5886 * @param pvVMMR0EntryEx VMMR0EntryEx address.
5887 * @remark Caller must own the loader mutex.
5888 */
5889static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
5890{
5891 int rc = VINF_SUCCESS;
5892 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryFast=%p\n", pvVMMR0, pvVMMR0EntryFast));
5893
5894
5895 /*
5896 * Check if not yet set.
5897 */
5898 if (!pDevExt->pvVMMR0)
5899 {
5900 pDevExt->pvVMMR0 = pvVMMR0;
5901 *(void **)&pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
5902 *(void **)&pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
5903 ASMCompilerBarrier(); /* the above isn't nice, so be careful... */
5904 }
5905 else
5906 {
5907 /*
5908 * Return failure or success depending on whether the values match or not.
5909 */
5910 if ( pDevExt->pvVMMR0 != pvVMMR0
5911 || (uintptr_t)pDevExt->pfnVMMR0EntryFast != (uintptr_t)pvVMMR0EntryFast
5912 || (uintptr_t)pDevExt->pfnVMMR0EntryEx != (uintptr_t)pvVMMR0EntryEx)
5913 {
5914 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
5915 rc = VERR_INVALID_PARAMETER;
5916 }
5917 }
5918 return rc;
5919}
5920
5921
5922/**
5923 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
5924 *
5925 * @param pDevExt Device globals.
5926 */
5927static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
5928{
5929 pDevExt->pvVMMR0 = NULL;
5930 pDevExt->pfnVMMR0EntryFast = NULL;
5931 pDevExt->pfnVMMR0EntryEx = NULL;
5932}
5933
5934
5935/**
5936 * Adds a usage reference in the specified session of an image.
5937 *
5938 * Called while owning the loader semaphore.
5939 *
5940 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
5941 * @param pSession Session in question.
5942 * @param pImage Image which the session is using.
5943 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
5944 */
5945static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
5946{
5947 PSUPDRVLDRUSAGE pUsage;
5948 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
5949
5950 /*
5951 * Referenced it already?
5952 */
5953 pUsage = pSession->pLdrUsage;
5954 while (pUsage)
5955 {
5956 if (pUsage->pImage == pImage)
5957 {
5958 if (fRing3Usage)
5959 pUsage->cRing3Usage++;
5960 else
5961 pUsage->cRing0Usage++;
5962 return VINF_SUCCESS;
5963 }
5964 pUsage = pUsage->pNext;
5965 }
5966
5967 /*
5968 * Allocate new usage record.
5969 */
5970 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
5971 AssertReturn(pUsage, /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_5);
5972 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
5973 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
5974 pUsage->pImage = pImage;
5975 pUsage->pNext = pSession->pLdrUsage;
5976 pSession->pLdrUsage = pUsage;
5977 return VINF_SUCCESS;
5978}
5979
5980
5981/**
5982 * Frees a load image.
5983 *
5984 * @param pDevExt Pointer to device extension.
5985 * @param pImage Pointer to the image we're gonna free.
5986 * This image must exit!
5987 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
5988 */
5989static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
5990{
5991 PSUPDRVLDRIMAGE pImagePrev;
5992 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
5993
5994 /*
5995 * Warn if we're releasing images while the image loader interface is
5996 * locked down -- we won't be able to reload them!
5997 */
5998 if (pDevExt->fLdrLockedDown)
5999 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6000
6001 /* find it - arg. should've used doubly linked list. */
6002 Assert(pDevExt->pLdrImages);
6003 pImagePrev = NULL;
6004 if (pDevExt->pLdrImages != pImage)
6005 {
6006 pImagePrev = pDevExt->pLdrImages;
6007 while (pImagePrev->pNext != pImage)
6008 pImagePrev = pImagePrev->pNext;
6009 Assert(pImagePrev->pNext == pImage);
6010 }
6011
6012 /* unlink */
6013 if (pImagePrev)
6014 pImagePrev->pNext = pImage->pNext;
6015 else
6016 pDevExt->pLdrImages = pImage->pNext;
6017
6018 /* check if this is VMMR0.r0 unset its entry point pointers. */
6019 if (pDevExt->pvVMMR0 == pImage->pvImage)
6020 supdrvLdrUnsetVMMR0EPs(pDevExt);
6021
6022 /* check for objects with destructors in this image. (Shouldn't happen.) */
6023 if (pDevExt->pObjs)
6024 {
6025 unsigned cObjs = 0;
6026 PSUPDRVOBJ pObj;
6027 RTSpinlockAcquire(pDevExt->Spinlock);
6028 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6029 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6030 {
6031 pObj->pfnDestructor = NULL;
6032 cObjs++;
6033 }
6034 RTSpinlockRelease(pDevExt->Spinlock);
6035 if (cObjs)
6036 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6037 }
6038
6039 /* call termination function if fully loaded. */
6040 if ( pImage->pfnModuleTerm
6041 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6042 {
6043 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6044 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6045 pImage->pfnModuleTerm(pImage);
6046 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6047 }
6048
6049 /* Inform the tracing component. */
6050 supdrvTracerModuleUnloading(pDevExt, pImage);
6051
6052 /* Do native unload if appropriate, then inform the native code about the
6053 unloading (mainly for non-native loading case). */
6054 if (pImage->fNative)
6055 supdrvOSLdrUnload(pDevExt, pImage);
6056 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6057
6058 /* free the image */
6059 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6060 pImage->cUsage = 0;
6061 pImage->pDevExt = NULL;
6062 pImage->pNext = NULL;
6063 pImage->uState = SUP_IOCTL_LDR_FREE;
6064#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
6065 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6066 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6067#else
6068 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
6069 pImage->pvImageAlloc = NULL;
6070#endif
6071 pImage->pvImage = NULL;
6072 RTMemFree(pImage->pachStrTab);
6073 pImage->pachStrTab = NULL;
6074 RTMemFree(pImage->paSymbols);
6075 pImage->paSymbols = NULL;
6076 RTMemFree(pImage->paSegments);
6077 pImage->paSegments = NULL;
6078 RTMemFree(pImage);
6079}
6080
6081
6082/**
6083 * Acquires the loader lock.
6084 *
6085 * @returns IPRT status code.
6086 * @param pDevExt The device extension.
6087 * @note Not recursive on all platforms yet.
6088 */
6089DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6090{
6091#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6092 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6093#else
6094 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6095#endif
6096 AssertRC(rc);
6097 return rc;
6098}
6099
6100
6101/**
6102 * Releases the loader lock.
6103 *
6104 * @returns IPRT status code.
6105 * @param pDevExt The device extension.
6106 */
6107DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6108{
6109#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6110 return RTSemMutexRelease(pDevExt->mtxLdr);
6111#else
6112 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6113#endif
6114}
6115
6116
6117/**
6118 * Acquires the global loader lock.
6119 *
6120 * This can be useful when accessing structures being modified by the ModuleInit
6121 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6122 *
6123 * @returns VBox status code.
6124 * @param pSession The session doing the locking.
6125 *
6126 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6127 */
6128SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6129{
6130 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6131 return supdrvLdrLock(pSession->pDevExt);
6132}
6133
6134
6135/**
6136 * Releases the global loader lock.
6137 *
6138 * Must correspond to a SUPR0LdrLock call!
6139 *
6140 * @returns VBox status code.
6141 * @param pSession The session doing the locking.
6142 *
6143 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6144 */
6145SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6146{
6147 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6148 return supdrvLdrUnlock(pSession->pDevExt);
6149}
6150
6151
6152/**
6153 * For checking lock ownership in Assert() statements during ModuleInit and
6154 * ModuleTerm.
6155 *
6156 * @returns Whether we own the loader lock or not.
6157 * @param hMod The module in question.
6158 * @param fWantToHear For hosts where it is difficult to know who owns the
6159 * lock, this will be returned instead.
6160 */
6161SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6162{
6163 PSUPDRVDEVEXT pDevExt;
6164 RTNATIVETHREAD hOwner;
6165
6166 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6167 AssertPtrReturn(pImage, fWantToHear);
6168 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6169
6170 pDevExt = pImage->pDevExt;
6171 AssertPtrReturn(pDevExt, fWantToHear);
6172
6173 /*
6174 * Expecting this to be called at init/term time only, so this will be sufficient.
6175 */
6176 hOwner = pDevExt->hLdrInitThread;
6177 if (hOwner == NIL_RTNATIVETHREAD)
6178 hOwner = pDevExt->hLdrTermThread;
6179 if (hOwner != NIL_RTNATIVETHREAD)
6180 return hOwner == RTThreadNativeSelf();
6181
6182 /*
6183 * Neither of the two semaphore variants currently offers very good
6184 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6185 */
6186#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6187 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6188#else
6189 return fWantToHear;
6190#endif
6191}
6192
6193
6194/**
6195 * Locates and retains the given module for ring-0 usage.
6196 *
6197 * @returns VBox status code.
6198 * @param pSession The session to associate the module reference with.
6199 * @param pszName The module name (no path).
6200 * @param phMod Where to return the module handle. The module is
6201 * referenced and a call to SUPR0LdrModRelease() is
6202 * necessary when done with it.
6203 */
6204SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6205{
6206 int rc;
6207 size_t cchName;
6208 PSUPDRVDEVEXT pDevExt;
6209
6210 /*
6211 * Validate input.
6212 */
6213 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6214 *phMod = NULL;
6215 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6216 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6217 cchName = strlen(pszName);
6218 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6219 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6220
6221 /*
6222 * Do the lookup.
6223 */
6224 pDevExt = pSession->pDevExt;
6225 rc = supdrvLdrLock(pDevExt);
6226 if (RT_SUCCESS(rc))
6227 {
6228 PSUPDRVLDRIMAGE pImage;
6229 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6230 {
6231 if ( pImage->szName[cchName] == '\0'
6232 && !memcmp(pImage->szName, pszName, cchName))
6233 {
6234 /*
6235 * Check the state and make sure we don't overflow the reference counter before return it.
6236 */
6237 uint32_t uState = pImage->uState;
6238 if (uState == SUP_IOCTL_LDR_LOAD)
6239 {
6240 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6241 {
6242 pImage->cUsage++;
6243 supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6244 *phMod = pImage;
6245 supdrvLdrUnlock(pDevExt);
6246 return VINF_SUCCESS;
6247 }
6248 supdrvLdrUnlock(pDevExt);
6249 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6250 return VERR_TOO_MANY_REFERENCES;
6251 }
6252 supdrvLdrUnlock(pDevExt);
6253 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6254 return VERR_INVALID_STATE;
6255 }
6256 }
6257 supdrvLdrUnlock(pDevExt);
6258 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6259 rc = VERR_MODULE_NOT_FOUND;
6260 }
6261 return rc;
6262}
6263
6264
6265/**
6266 * Retains a ring-0 module reference.
6267 *
6268 * Release reference when done by calling SUPR0LdrModRelease().
6269 *
6270 * @returns VBox status code.
6271 * @param pSession The session to reference the module in. A usage
6272 * record is added if needed.
6273 * @param hMod The handle to the module to retain.
6274 */
6275SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6276{
6277 PSUPDRVDEVEXT pDevExt;
6278 PSUPDRVLDRIMAGE pImage;
6279 int rc;
6280
6281 /* Validate input a little. */
6282 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6283 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6284 pImage = (PSUPDRVLDRIMAGE)hMod;
6285 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6286
6287 /* Reference the module: */
6288 pDevExt = pSession->pDevExt;
6289 rc = supdrvLdrLock(pDevExt);
6290 if (RT_SUCCESS(rc))
6291 {
6292 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6293 {
6294 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6295 {
6296 rc = supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6297 if (RT_SUCCESS(rc))
6298 {
6299 pImage->cUsage++;
6300 rc = VINF_SUCCESS;
6301 }
6302 }
6303 else
6304 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6305 }
6306 else
6307 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6308 supdrvLdrUnlock(pDevExt);
6309 }
6310 return rc;
6311}
6312
6313
6314/**
6315 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6316 * SUPR0LdrModRetain().
6317 *
6318 * @returns VBox status code.
6319 * @param pSession The session that the module was retained in.
6320 * @param hMod The module handle. NULL is silently ignored.
6321 */
6322SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6323{
6324 PSUPDRVDEVEXT pDevExt;
6325 PSUPDRVLDRIMAGE pImage;
6326 int rc;
6327
6328 /*
6329 * Validate input.
6330 */
6331 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6332 if (!hMod)
6333 return VINF_SUCCESS;
6334 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6335 pImage = (PSUPDRVLDRIMAGE)hMod;
6336 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6337
6338 /*
6339 * Take the loader lock and revalidate the module:
6340 */
6341 pDevExt = pSession->pDevExt;
6342 rc = supdrvLdrLock(pDevExt);
6343 if (RT_SUCCESS(rc))
6344 {
6345 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6346 {
6347 /*
6348 * Find the usage record for the module:
6349 */
6350 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6351 PSUPDRVLDRUSAGE pUsage;
6352
6353 rc = VERR_MODULE_NOT_FOUND;
6354 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6355 {
6356 if (pUsage->pImage == pImage)
6357 {
6358 /*
6359 * Drop a ring-0 reference:
6360 */
6361 Assert(pImage->cUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6362 if (pUsage->cRing0Usage > 0)
6363 {
6364 if (pImage->cUsage > 1)
6365 {
6366 pImage->cUsage -= 1;
6367 pUsage->cRing0Usage -= 1;
6368 rc = VINF_SUCCESS;
6369 }
6370 else
6371 {
6372 supdrvLdrFree(pDevExt, pImage);
6373
6374 if (pPrevUsage)
6375 pPrevUsage->pNext = pUsage->pNext;
6376 else
6377 pSession->pLdrUsage = pUsage->pNext;
6378 pUsage->pNext = NULL;
6379 pUsage->pImage = NULL;
6380 pUsage->cRing0Usage = 0;
6381 pUsage->cRing3Usage = 0;
6382 RTMemFree(pUsage);
6383
6384 rc = VINF_OBJECT_DESTROYED;
6385 }
6386 }
6387 else
6388 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6389 break;
6390 }
6391 pPrevUsage = pUsage;
6392 }
6393 }
6394 else
6395 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6396 supdrvLdrUnlock(pDevExt);
6397 }
6398 return rc;
6399
6400}
6401
6402
6403/**
6404 * Implements the service call request.
6405 *
6406 * @returns VBox status code.
6407 * @param pDevExt The device extension.
6408 * @param pSession The calling session.
6409 * @param pReq The request packet, valid.
6410 */
6411static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6412{
6413#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
6414 int rc;
6415
6416 /*
6417 * Find the module first in the module referenced by the calling session.
6418 */
6419 rc = supdrvLdrLock(pDevExt);
6420 if (RT_SUCCESS(rc))
6421 {
6422 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
6423 PSUPDRVLDRUSAGE pUsage;
6424
6425 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6426 if ( pUsage->pImage->pfnServiceReqHandler
6427 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
6428 {
6429 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
6430 break;
6431 }
6432 supdrvLdrUnlock(pDevExt);
6433
6434 if (pfnServiceReqHandler)
6435 {
6436 /*
6437 * Call it.
6438 */
6439 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
6440 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
6441 else
6442 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
6443 }
6444 else
6445 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
6446 }
6447
6448 /* log it */
6449 if ( RT_FAILURE(rc)
6450 && rc != VERR_INTERRUPTED
6451 && rc != VERR_TIMEOUT)
6452 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6453 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6454 else
6455 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6456 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6457 return rc;
6458#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6459 RT_NOREF3(pDevExt, pSession, pReq);
6460 return VERR_NOT_IMPLEMENTED;
6461#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6462}
6463
6464
6465/**
6466 * Implements the logger settings request.
6467 *
6468 * @returns VBox status code.
6469 * @param pReq The request.
6470 */
6471static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
6472{
6473 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
6474 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
6475 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
6476 PRTLOGGER pLogger = NULL;
6477 int rc;
6478
6479 /*
6480 * Some further validation.
6481 */
6482 switch (pReq->u.In.fWhat)
6483 {
6484 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6485 case SUPLOGGERSETTINGS_WHAT_CREATE:
6486 break;
6487
6488 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6489 if (*pszGroup || *pszFlags || *pszDest)
6490 return VERR_INVALID_PARAMETER;
6491 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
6492 return VERR_ACCESS_DENIED;
6493 break;
6494
6495 default:
6496 return VERR_INTERNAL_ERROR;
6497 }
6498
6499 /*
6500 * Get the logger.
6501 */
6502 switch (pReq->u.In.fWhich)
6503 {
6504 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6505 pLogger = RTLogGetDefaultInstance();
6506 break;
6507
6508 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6509 pLogger = RTLogRelGetDefaultInstance();
6510 break;
6511
6512 default:
6513 return VERR_INTERNAL_ERROR;
6514 }
6515
6516 /*
6517 * Do the job.
6518 */
6519 switch (pReq->u.In.fWhat)
6520 {
6521 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6522 if (pLogger)
6523 {
6524 rc = RTLogFlags(pLogger, pszFlags);
6525 if (RT_SUCCESS(rc))
6526 rc = RTLogGroupSettings(pLogger, pszGroup);
6527 NOREF(pszDest);
6528 }
6529 else
6530 rc = VERR_NOT_FOUND;
6531 break;
6532
6533 case SUPLOGGERSETTINGS_WHAT_CREATE:
6534 {
6535 if (pLogger)
6536 rc = VERR_ALREADY_EXISTS;
6537 else
6538 {
6539 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
6540
6541 rc = RTLogCreate(&pLogger,
6542 0 /* fFlags */,
6543 pszGroup,
6544 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
6545 ? "VBOX_LOG"
6546 : "VBOX_RELEASE_LOG",
6547 RT_ELEMENTS(s_apszGroups),
6548 s_apszGroups,
6549 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
6550 NULL);
6551 if (RT_SUCCESS(rc))
6552 {
6553 rc = RTLogFlags(pLogger, pszFlags);
6554 NOREF(pszDest);
6555 if (RT_SUCCESS(rc))
6556 {
6557 switch (pReq->u.In.fWhich)
6558 {
6559 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6560 pLogger = RTLogSetDefaultInstance(pLogger);
6561 break;
6562 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6563 pLogger = RTLogRelSetDefaultInstance(pLogger);
6564 break;
6565 }
6566 }
6567 RTLogDestroy(pLogger);
6568 }
6569 }
6570 break;
6571 }
6572
6573 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6574 switch (pReq->u.In.fWhich)
6575 {
6576 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6577 pLogger = RTLogSetDefaultInstance(NULL);
6578 break;
6579 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6580 pLogger = RTLogRelSetDefaultInstance(NULL);
6581 break;
6582 }
6583 rc = RTLogDestroy(pLogger);
6584 break;
6585
6586 default:
6587 {
6588 rc = VERR_INTERNAL_ERROR;
6589 break;
6590 }
6591 }
6592
6593 return rc;
6594}
6595
6596
6597/**
6598 * Implements the MSR prober operations.
6599 *
6600 * @returns VBox status code.
6601 * @param pDevExt The device extension.
6602 * @param pReq The request.
6603 */
6604static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
6605{
6606#ifdef SUPDRV_WITH_MSR_PROBER
6607 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
6608 int rc;
6609
6610 switch (pReq->u.In.enmOp)
6611 {
6612 case SUPMSRPROBEROP_READ:
6613 {
6614 uint64_t uValue;
6615 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
6616 if (RT_SUCCESS(rc))
6617 {
6618 pReq->u.Out.uResults.Read.uValue = uValue;
6619 pReq->u.Out.uResults.Read.fGp = false;
6620 }
6621 else if (rc == VERR_ACCESS_DENIED)
6622 {
6623 pReq->u.Out.uResults.Read.uValue = 0;
6624 pReq->u.Out.uResults.Read.fGp = true;
6625 rc = VINF_SUCCESS;
6626 }
6627 break;
6628 }
6629
6630 case SUPMSRPROBEROP_WRITE:
6631 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
6632 if (RT_SUCCESS(rc))
6633 pReq->u.Out.uResults.Write.fGp = false;
6634 else if (rc == VERR_ACCESS_DENIED)
6635 {
6636 pReq->u.Out.uResults.Write.fGp = true;
6637 rc = VINF_SUCCESS;
6638 }
6639 break;
6640
6641 case SUPMSRPROBEROP_MODIFY:
6642 case SUPMSRPROBEROP_MODIFY_FASTER:
6643 rc = supdrvOSMsrProberModify(idCpu, pReq);
6644 break;
6645
6646 default:
6647 return VERR_INVALID_FUNCTION;
6648 }
6649 RT_NOREF1(pDevExt);
6650 return rc;
6651#else
6652 RT_NOREF2(pDevExt, pReq);
6653 return VERR_NOT_IMPLEMENTED;
6654#endif
6655}
6656
6657
6658/**
6659 * Resume built-in keyboard on MacBook Air and Pro hosts.
6660 * If there is no built-in keyboard device, return success anyway.
6661 *
6662 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
6663 */
6664static int supdrvIOCtl_ResumeSuspendedKbds(void)
6665{
6666#if defined(RT_OS_DARWIN)
6667 return supdrvDarwinResumeSuspendedKbds();
6668#else
6669 return VERR_NOT_IMPLEMENTED;
6670#endif
6671}
6672
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette