VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 86214

Last change on this file since 86214 was 85748, checked in by vboxsync, 4 years ago

SUPDrv: Added flag for detecting Virtualized VMSAVE/VMLOAD feature on AMD CPUs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 255.6 KB
Line 
1/* $Id: SUPDrv.cpp 85748 2020-08-13 10:20:38Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_vmx.h>
68
69#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
70# include "dtrace/SUPDrv.h"
71#else
72# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
73# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
74# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
75# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
76#endif
77
78/*
79 * Logging assignments:
80 * Log - useful stuff, like failures.
81 * LogFlow - program flow, except the really noisy bits.
82 * Log2 - Cleanup.
83 * Log3 - Loader flow noise.
84 * Log4 - Call VMMR0 flow noise.
85 * Log5 - Native yet-to-be-defined noise.
86 * Log6 - Native ioctl flow noise.
87 *
88 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
89 * instantiation in log-vbox.c(pp).
90 */
91
92
93/*********************************************************************************************************************************
94* Defined Constants And Macros *
95*********************************************************************************************************************************/
96/** @def VBOX_SVN_REV
97 * The makefile should define this if it can. */
98#ifndef VBOX_SVN_REV
99# define VBOX_SVN_REV 0
100#endif
101
102/** @ SUPDRV_CHECK_SMAP_SETUP
103 * SMAP check setup. */
104/** @def SUPDRV_CHECK_SMAP_CHECK
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
106 * will be logged and @a a_BadExpr is executed. */
107#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
108# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
109# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
110 do { \
111 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
112 { \
113 RTCCUINTREG fEfl = ASMGetFlags(); \
114 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
115 { /* likely */ } \
116 else \
117 { \
118 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
119 a_BadExpr; \
120 } \
121 } \
122 } while (0)
123#else
124# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
125# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
133static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
134static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
135static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
136static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
137static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
138static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
139static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
140static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
141static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
142static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
143static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
144static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
145static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
146DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
147DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
148static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
149static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
150static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
151static int supdrvIOCtl_ResumeSuspendedKbds(void);
152
153
154/*********************************************************************************************************************************
155* Global Variables *
156*********************************************************************************************************************************/
157/**
158 * Array of the R0 SUP API.
159 *
160 * While making changes to these exports, make sure to update the IOC
161 * minor version (SUPDRV_IOC_VERSION).
162 *
163 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
164 * produce definition files from which import libraries are generated.
165 * Take care when commenting things and especially with \#ifdef'ing.
166 */
167static SUPFUNC g_aFunctions[] =
168{
169/* SED: START */
170 /* name function */
171 /* Entries with absolute addresses determined at runtime, fixup
172 code makes ugly ASSUMPTIONS about the order here: */
173 { "SUPR0AbsIs64bit", (void *)0 },
174 { "SUPR0Abs64bitKernelCS", (void *)0 },
175 { "SUPR0Abs64bitKernelSS", (void *)0 },
176 { "SUPR0Abs64bitKernelDS", (void *)0 },
177 { "SUPR0AbsKernelCS", (void *)0 },
178 { "SUPR0AbsKernelSS", (void *)0 },
179 { "SUPR0AbsKernelDS", (void *)0 },
180 { "SUPR0AbsKernelES", (void *)0 },
181 { "SUPR0AbsKernelFS", (void *)0 },
182 { "SUPR0AbsKernelGS", (void *)0 },
183 /* Normal function pointers: */
184 { "g_pSUPGlobalInfoPage", (void *)&g_pSUPGlobalInfoPage }, /* SED: DATA */
185 { "SUPGetGIP", (void *)(uintptr_t)SUPGetGIP },
186 { "SUPReadTscWithDelta", (void *)(uintptr_t)SUPReadTscWithDelta },
187 { "SUPGetTscDeltaSlow", (void *)(uintptr_t)SUPGetTscDeltaSlow },
188 { "SUPGetCpuHzFromGipForAsyncMode", (void *)(uintptr_t)SUPGetCpuHzFromGipForAsyncMode },
189 { "SUPIsTscFreqCompatible", (void *)(uintptr_t)SUPIsTscFreqCompatible },
190 { "SUPIsTscFreqCompatibleEx", (void *)(uintptr_t)SUPIsTscFreqCompatibleEx },
191 { "SUPR0BadContext", (void *)(uintptr_t)SUPR0BadContext },
192 { "SUPR0ComponentDeregisterFactory", (void *)(uintptr_t)SUPR0ComponentDeregisterFactory },
193 { "SUPR0ComponentQueryFactory", (void *)(uintptr_t)SUPR0ComponentQueryFactory },
194 { "SUPR0ComponentRegisterFactory", (void *)(uintptr_t)SUPR0ComponentRegisterFactory },
195 { "SUPR0ContAlloc", (void *)(uintptr_t)SUPR0ContAlloc },
196 { "SUPR0ContFree", (void *)(uintptr_t)SUPR0ContFree },
197 { "SUPR0ChangeCR4", (void *)(uintptr_t)SUPR0ChangeCR4 },
198 { "SUPR0EnableVTx", (void *)(uintptr_t)SUPR0EnableVTx },
199 { "SUPR0SuspendVTxOnCpu", (void *)(uintptr_t)SUPR0SuspendVTxOnCpu },
200 { "SUPR0ResumeVTxOnCpu", (void *)(uintptr_t)SUPR0ResumeVTxOnCpu },
201 { "SUPR0GetCurrentGdtRw", (void *)(uintptr_t)SUPR0GetCurrentGdtRw },
202 { "SUPR0GetKernelFeatures", (void *)(uintptr_t)SUPR0GetKernelFeatures },
203 { "SUPR0GetHwvirtMsrs", (void *)(uintptr_t)SUPR0GetHwvirtMsrs },
204 { "SUPR0GetPagingMode", (void *)(uintptr_t)SUPR0GetPagingMode },
205 { "SUPR0GetSvmUsability", (void *)(uintptr_t)SUPR0GetSvmUsability },
206 { "SUPR0GetVTSupport", (void *)(uintptr_t)SUPR0GetVTSupport },
207 { "SUPR0GetVmxUsability", (void *)(uintptr_t)SUPR0GetVmxUsability },
208 { "SUPR0LdrIsLockOwnerByMod", (void *)(uintptr_t)SUPR0LdrIsLockOwnerByMod },
209 { "SUPR0LdrLock", (void *)(uintptr_t)SUPR0LdrLock },
210 { "SUPR0LdrUnlock", (void *)(uintptr_t)SUPR0LdrUnlock },
211 { "SUPR0LdrModByName", (void *)(uintptr_t)SUPR0LdrModByName },
212 { "SUPR0LdrModRelease", (void *)(uintptr_t)SUPR0LdrModRelease },
213 { "SUPR0LdrModRetain", (void *)(uintptr_t)SUPR0LdrModRetain },
214 { "SUPR0LockMem", (void *)(uintptr_t)SUPR0LockMem },
215 { "SUPR0LowAlloc", (void *)(uintptr_t)SUPR0LowAlloc },
216 { "SUPR0LowFree", (void *)(uintptr_t)SUPR0LowFree },
217 { "SUPR0MemAlloc", (void *)(uintptr_t)SUPR0MemAlloc },
218 { "SUPR0MemFree", (void *)(uintptr_t)SUPR0MemFree },
219 { "SUPR0MemGetPhys", (void *)(uintptr_t)SUPR0MemGetPhys },
220 { "SUPR0ObjAddRef", (void *)(uintptr_t)SUPR0ObjAddRef },
221 { "SUPR0ObjAddRefEx", (void *)(uintptr_t)SUPR0ObjAddRefEx },
222 { "SUPR0ObjRegister", (void *)(uintptr_t)SUPR0ObjRegister },
223 { "SUPR0ObjRelease", (void *)(uintptr_t)SUPR0ObjRelease },
224 { "SUPR0ObjVerifyAccess", (void *)(uintptr_t)SUPR0ObjVerifyAccess },
225 { "SUPR0PageAllocEx", (void *)(uintptr_t)SUPR0PageAllocEx },
226 { "SUPR0PageFree", (void *)(uintptr_t)SUPR0PageFree },
227 { "SUPR0PageMapKernel", (void *)(uintptr_t)SUPR0PageMapKernel },
228 { "SUPR0PageProtect", (void *)(uintptr_t)SUPR0PageProtect },
229#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
230 { "SUPR0HCPhysToVirt", (void *)(uintptr_t)SUPR0HCPhysToVirt }, /* only-linux, only solaris */
231#endif
232 { "SUPR0Printf", (void *)(uintptr_t)SUPR0Printf },
233 { "SUPR0GetSessionGVM", (void *)(uintptr_t)SUPR0GetSessionGVM },
234 { "SUPR0GetSessionVM", (void *)(uintptr_t)SUPR0GetSessionVM },
235 { "SUPR0SetSessionVM", (void *)(uintptr_t)SUPR0SetSessionVM },
236 { "SUPR0TscDeltaMeasureBySetIndex", (void *)(uintptr_t)SUPR0TscDeltaMeasureBySetIndex },
237 { "SUPR0TracerDeregisterDrv", (void *)(uintptr_t)SUPR0TracerDeregisterDrv },
238 { "SUPR0TracerDeregisterImpl", (void *)(uintptr_t)SUPR0TracerDeregisterImpl },
239 { "SUPR0TracerFireProbe", (void *)(uintptr_t)SUPR0TracerFireProbe },
240 { "SUPR0TracerRegisterDrv", (void *)(uintptr_t)SUPR0TracerRegisterDrv },
241 { "SUPR0TracerRegisterImpl", (void *)(uintptr_t)SUPR0TracerRegisterImpl },
242 { "SUPR0TracerRegisterModule", (void *)(uintptr_t)SUPR0TracerRegisterModule },
243 { "SUPR0TracerUmodProbeFire", (void *)(uintptr_t)SUPR0TracerUmodProbeFire },
244 { "SUPR0UnlockMem", (void *)(uintptr_t)SUPR0UnlockMem },
245#ifdef RT_OS_WINDOWS
246 { "SUPR0IoCtlSetupForHandle", (void *)(uintptr_t)SUPR0IoCtlSetupForHandle }, /* only-windows */
247 { "SUPR0IoCtlPerform", (void *)(uintptr_t)SUPR0IoCtlPerform }, /* only-windows */
248 { "SUPR0IoCtlCleanup", (void *)(uintptr_t)SUPR0IoCtlCleanup }, /* only-windows */
249#endif
250 { "SUPSemEventClose", (void *)(uintptr_t)SUPSemEventClose },
251 { "SUPSemEventCreate", (void *)(uintptr_t)SUPSemEventCreate },
252 { "SUPSemEventGetResolution", (void *)(uintptr_t)SUPSemEventGetResolution },
253 { "SUPSemEventMultiClose", (void *)(uintptr_t)SUPSemEventMultiClose },
254 { "SUPSemEventMultiCreate", (void *)(uintptr_t)SUPSemEventMultiCreate },
255 { "SUPSemEventMultiGetResolution", (void *)(uintptr_t)SUPSemEventMultiGetResolution },
256 { "SUPSemEventMultiReset", (void *)(uintptr_t)SUPSemEventMultiReset },
257 { "SUPSemEventMultiSignal", (void *)(uintptr_t)SUPSemEventMultiSignal },
258 { "SUPSemEventMultiWait", (void *)(uintptr_t)SUPSemEventMultiWait },
259 { "SUPSemEventMultiWaitNoResume", (void *)(uintptr_t)SUPSemEventMultiWaitNoResume },
260 { "SUPSemEventMultiWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsAbsIntr },
261 { "SUPSemEventMultiWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsRelIntr },
262 { "SUPSemEventSignal", (void *)(uintptr_t)SUPSemEventSignal },
263 { "SUPSemEventWait", (void *)(uintptr_t)SUPSemEventWait },
264 { "SUPSemEventWaitNoResume", (void *)(uintptr_t)SUPSemEventWaitNoResume },
265 { "SUPSemEventWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventWaitNsAbsIntr },
266 { "SUPSemEventWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventWaitNsRelIntr },
267
268 { "RTAssertAreQuiet", (void *)(uintptr_t)RTAssertAreQuiet },
269 { "RTAssertMayPanic", (void *)(uintptr_t)RTAssertMayPanic },
270 { "RTAssertMsg1", (void *)(uintptr_t)RTAssertMsg1 },
271 { "RTAssertMsg2AddV", (void *)(uintptr_t)RTAssertMsg2AddV },
272 { "RTAssertMsg2V", (void *)(uintptr_t)RTAssertMsg2V },
273 { "RTAssertSetMayPanic", (void *)(uintptr_t)RTAssertSetMayPanic },
274 { "RTAssertSetQuiet", (void *)(uintptr_t)RTAssertSetQuiet },
275 { "RTCrc32", (void *)(uintptr_t)RTCrc32 },
276 { "RTCrc32Finish", (void *)(uintptr_t)RTCrc32Finish },
277 { "RTCrc32Process", (void *)(uintptr_t)RTCrc32Process },
278 { "RTCrc32Start", (void *)(uintptr_t)RTCrc32Start },
279 { "RTErrConvertFromErrno", (void *)(uintptr_t)RTErrConvertFromErrno },
280 { "RTErrConvertToErrno", (void *)(uintptr_t)RTErrConvertToErrno },
281 { "RTHandleTableAllocWithCtx", (void *)(uintptr_t)RTHandleTableAllocWithCtx },
282 { "RTHandleTableCreate", (void *)(uintptr_t)RTHandleTableCreate },
283 { "RTHandleTableCreateEx", (void *)(uintptr_t)RTHandleTableCreateEx },
284 { "RTHandleTableDestroy", (void *)(uintptr_t)RTHandleTableDestroy },
285 { "RTHandleTableFreeWithCtx", (void *)(uintptr_t)RTHandleTableFreeWithCtx },
286 { "RTHandleTableLookupWithCtx", (void *)(uintptr_t)RTHandleTableLookupWithCtx },
287 { "RTLogDefaultInstance", (void *)(uintptr_t)RTLogDefaultInstance },
288 { "RTLogDefaultInstanceEx", (void *)(uintptr_t)RTLogDefaultInstanceEx },
289 { "RTLogGetDefaultInstance", (void *)(uintptr_t)RTLogGetDefaultInstance },
290 { "RTLogGetDefaultInstanceEx", (void *)(uintptr_t)RTLogGetDefaultInstanceEx },
291 { "SUPR0GetDefaultLogInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogInstanceEx },
292 { "RTLogLoggerExV", (void *)(uintptr_t)RTLogLoggerExV },
293 { "RTLogPrintfV", (void *)(uintptr_t)RTLogPrintfV },
294 { "RTLogRelGetDefaultInstance", (void *)(uintptr_t)RTLogRelGetDefaultInstance },
295 { "RTLogRelGetDefaultInstanceEx", (void *)(uintptr_t)RTLogRelGetDefaultInstanceEx },
296 { "SUPR0GetDefaultLogRelInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogRelInstanceEx },
297 { "RTLogSetDefaultInstanceThread", (void *)(uintptr_t)RTLogSetDefaultInstanceThread },
298 { "RTMemAllocExTag", (void *)(uintptr_t)RTMemAllocExTag },
299 { "RTMemAllocTag", (void *)(uintptr_t)RTMemAllocTag },
300 { "RTMemAllocVarTag", (void *)(uintptr_t)RTMemAllocVarTag },
301 { "RTMemAllocZTag", (void *)(uintptr_t)RTMemAllocZTag },
302 { "RTMemAllocZVarTag", (void *)(uintptr_t)RTMemAllocZVarTag },
303 { "RTMemDupExTag", (void *)(uintptr_t)RTMemDupExTag },
304 { "RTMemDupTag", (void *)(uintptr_t)RTMemDupTag },
305 { "RTMemFree", (void *)(uintptr_t)RTMemFree },
306 { "RTMemFreeEx", (void *)(uintptr_t)RTMemFreeEx },
307 { "RTMemReallocTag", (void *)(uintptr_t)RTMemReallocTag },
308 { "RTMpCpuId", (void *)(uintptr_t)RTMpCpuId },
309 { "RTMpCpuIdFromSetIndex", (void *)(uintptr_t)RTMpCpuIdFromSetIndex },
310 { "RTMpCpuIdToSetIndex", (void *)(uintptr_t)RTMpCpuIdToSetIndex },
311 { "RTMpCurSetIndex", (void *)(uintptr_t)RTMpCurSetIndex },
312 { "RTMpCurSetIndexAndId", (void *)(uintptr_t)RTMpCurSetIndexAndId },
313 { "RTMpGetArraySize", (void *)(uintptr_t)RTMpGetArraySize },
314 { "RTMpGetCount", (void *)(uintptr_t)RTMpGetCount },
315 { "RTMpGetMaxCpuId", (void *)(uintptr_t)RTMpGetMaxCpuId },
316 { "RTMpGetOnlineCount", (void *)(uintptr_t)RTMpGetOnlineCount },
317 { "RTMpGetOnlineSet", (void *)(uintptr_t)RTMpGetOnlineSet },
318 { "RTMpGetSet", (void *)(uintptr_t)RTMpGetSet },
319 { "RTMpIsCpuOnline", (void *)(uintptr_t)RTMpIsCpuOnline },
320 { "RTMpIsCpuPossible", (void *)(uintptr_t)RTMpIsCpuPossible },
321 { "RTMpIsCpuWorkPending", (void *)(uintptr_t)RTMpIsCpuWorkPending },
322 { "RTMpNotificationDeregister", (void *)(uintptr_t)RTMpNotificationDeregister },
323 { "RTMpNotificationRegister", (void *)(uintptr_t)RTMpNotificationRegister },
324 { "RTMpOnAll", (void *)(uintptr_t)RTMpOnAll },
325 { "RTMpOnOthers", (void *)(uintptr_t)RTMpOnOthers },
326 { "RTMpOnSpecific", (void *)(uintptr_t)RTMpOnSpecific },
327 { "RTMpPokeCpu", (void *)(uintptr_t)RTMpPokeCpu },
328 { "RTNetIPv4AddDataChecksum", (void *)(uintptr_t)RTNetIPv4AddDataChecksum },
329 { "RTNetIPv4AddTCPChecksum", (void *)(uintptr_t)RTNetIPv4AddTCPChecksum },
330 { "RTNetIPv4AddUDPChecksum", (void *)(uintptr_t)RTNetIPv4AddUDPChecksum },
331 { "RTNetIPv4FinalizeChecksum", (void *)(uintptr_t)RTNetIPv4FinalizeChecksum },
332 { "RTNetIPv4HdrChecksum", (void *)(uintptr_t)RTNetIPv4HdrChecksum },
333 { "RTNetIPv4IsDHCPValid", (void *)(uintptr_t)RTNetIPv4IsDHCPValid },
334 { "RTNetIPv4IsHdrValid", (void *)(uintptr_t)RTNetIPv4IsHdrValid },
335 { "RTNetIPv4IsTCPSizeValid", (void *)(uintptr_t)RTNetIPv4IsTCPSizeValid },
336 { "RTNetIPv4IsTCPValid", (void *)(uintptr_t)RTNetIPv4IsTCPValid },
337 { "RTNetIPv4IsUDPSizeValid", (void *)(uintptr_t)RTNetIPv4IsUDPSizeValid },
338 { "RTNetIPv4IsUDPValid", (void *)(uintptr_t)RTNetIPv4IsUDPValid },
339 { "RTNetIPv4PseudoChecksum", (void *)(uintptr_t)RTNetIPv4PseudoChecksum },
340 { "RTNetIPv4PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv4PseudoChecksumBits },
341 { "RTNetIPv4TCPChecksum", (void *)(uintptr_t)RTNetIPv4TCPChecksum },
342 { "RTNetIPv4UDPChecksum", (void *)(uintptr_t)RTNetIPv4UDPChecksum },
343 { "RTNetIPv6PseudoChecksum", (void *)(uintptr_t)RTNetIPv6PseudoChecksum },
344 { "RTNetIPv6PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv6PseudoChecksumBits },
345 { "RTNetIPv6PseudoChecksumEx", (void *)(uintptr_t)RTNetIPv6PseudoChecksumEx },
346 { "RTNetTCPChecksum", (void *)(uintptr_t)RTNetTCPChecksum },
347 { "RTNetUDPChecksum", (void *)(uintptr_t)RTNetUDPChecksum },
348 { "RTPowerNotificationDeregister", (void *)(uintptr_t)RTPowerNotificationDeregister },
349 { "RTPowerNotificationRegister", (void *)(uintptr_t)RTPowerNotificationRegister },
350 { "RTProcSelf", (void *)(uintptr_t)RTProcSelf },
351 { "RTR0AssertPanicSystem", (void *)(uintptr_t)RTR0AssertPanicSystem },
352#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
353 { "RTR0DbgKrnlInfoOpen", (void *)(uintptr_t)RTR0DbgKrnlInfoOpen }, /* only-darwin, only-solaris, only-windows */
354 { "RTR0DbgKrnlInfoQueryMember", (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember }, /* only-darwin, only-solaris, only-windows */
355# if defined(RT_OS_SOLARIS)
356 { "RTR0DbgKrnlInfoQuerySize", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySize }, /* only-solaris */
357# endif
358 { "RTR0DbgKrnlInfoQuerySymbol", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol }, /* only-darwin, only-solaris, only-windows */
359 { "RTR0DbgKrnlInfoRelease", (void *)(uintptr_t)RTR0DbgKrnlInfoRelease }, /* only-darwin, only-solaris, only-windows */
360 { "RTR0DbgKrnlInfoRetain", (void *)(uintptr_t)RTR0DbgKrnlInfoRetain }, /* only-darwin, only-solaris, only-windows */
361#endif
362 { "RTR0MemAreKrnlAndUsrDifferent", (void *)(uintptr_t)RTR0MemAreKrnlAndUsrDifferent },
363 { "RTR0MemKernelIsValidAddr", (void *)(uintptr_t)RTR0MemKernelIsValidAddr },
364 { "RTR0MemKernelCopyFrom", (void *)(uintptr_t)RTR0MemKernelCopyFrom },
365 { "RTR0MemKernelCopyTo", (void *)(uintptr_t)RTR0MemKernelCopyTo },
366 { "RTR0MemObjAddress", (void *)(uintptr_t)RTR0MemObjAddress },
367 { "RTR0MemObjAddressR3", (void *)(uintptr_t)RTR0MemObjAddressR3 },
368 { "RTR0MemObjAllocContTag", (void *)(uintptr_t)RTR0MemObjAllocContTag },
369 { "RTR0MemObjAllocLowTag", (void *)(uintptr_t)RTR0MemObjAllocLowTag },
370 { "RTR0MemObjAllocPageTag", (void *)(uintptr_t)RTR0MemObjAllocPageTag },
371 { "RTR0MemObjAllocPhysExTag", (void *)(uintptr_t)RTR0MemObjAllocPhysExTag },
372 { "RTR0MemObjAllocPhysNCTag", (void *)(uintptr_t)RTR0MemObjAllocPhysNCTag },
373 { "RTR0MemObjAllocPhysTag", (void *)(uintptr_t)RTR0MemObjAllocPhysTag },
374 { "RTR0MemObjEnterPhysTag", (void *)(uintptr_t)RTR0MemObjEnterPhysTag },
375 { "RTR0MemObjFree", (void *)(uintptr_t)RTR0MemObjFree },
376 { "RTR0MemObjGetPagePhysAddr", (void *)(uintptr_t)RTR0MemObjGetPagePhysAddr },
377 { "RTR0MemObjIsMapping", (void *)(uintptr_t)RTR0MemObjIsMapping },
378 { "RTR0MemObjLockUserTag", (void *)(uintptr_t)RTR0MemObjLockUserTag },
379 { "RTR0MemObjMapKernelExTag", (void *)(uintptr_t)RTR0MemObjMapKernelExTag },
380 { "RTR0MemObjMapKernelTag", (void *)(uintptr_t)RTR0MemObjMapKernelTag },
381 { "RTR0MemObjMapUserTag", (void *)(uintptr_t)RTR0MemObjMapUserTag },
382 { "RTR0MemObjMapUserExTag", (void *)(uintptr_t)RTR0MemObjMapUserExTag },
383 { "RTR0MemObjProtect", (void *)(uintptr_t)RTR0MemObjProtect },
384 { "RTR0MemObjSize", (void *)(uintptr_t)RTR0MemObjSize },
385 { "RTR0MemUserCopyFrom", (void *)(uintptr_t)RTR0MemUserCopyFrom },
386 { "RTR0MemUserCopyTo", (void *)(uintptr_t)RTR0MemUserCopyTo },
387 { "RTR0MemUserIsValidAddr", (void *)(uintptr_t)RTR0MemUserIsValidAddr },
388 { "RTR0ProcHandleSelf", (void *)(uintptr_t)RTR0ProcHandleSelf },
389 { "RTSemEventCreate", (void *)(uintptr_t)RTSemEventCreate },
390 { "RTSemEventDestroy", (void *)(uintptr_t)RTSemEventDestroy },
391 { "RTSemEventGetResolution", (void *)(uintptr_t)RTSemEventGetResolution },
392 { "RTSemEventMultiCreate", (void *)(uintptr_t)RTSemEventMultiCreate },
393 { "RTSemEventMultiDestroy", (void *)(uintptr_t)RTSemEventMultiDestroy },
394 { "RTSemEventMultiGetResolution", (void *)(uintptr_t)RTSemEventMultiGetResolution },
395 { "RTSemEventMultiReset", (void *)(uintptr_t)RTSemEventMultiReset },
396 { "RTSemEventMultiSignal", (void *)(uintptr_t)RTSemEventMultiSignal },
397 { "RTSemEventMultiWait", (void *)(uintptr_t)RTSemEventMultiWait },
398 { "RTSemEventMultiWaitEx", (void *)(uintptr_t)RTSemEventMultiWaitEx },
399 { "RTSemEventMultiWaitExDebug", (void *)(uintptr_t)RTSemEventMultiWaitExDebug },
400 { "RTSemEventMultiWaitNoResume", (void *)(uintptr_t)RTSemEventMultiWaitNoResume },
401 { "RTSemEventSignal", (void *)(uintptr_t)RTSemEventSignal },
402 { "RTSemEventWait", (void *)(uintptr_t)RTSemEventWait },
403 { "RTSemEventWaitEx", (void *)(uintptr_t)RTSemEventWaitEx },
404 { "RTSemEventWaitExDebug", (void *)(uintptr_t)RTSemEventWaitExDebug },
405 { "RTSemEventWaitNoResume", (void *)(uintptr_t)RTSemEventWaitNoResume },
406 { "RTSemFastMutexCreate", (void *)(uintptr_t)RTSemFastMutexCreate },
407 { "RTSemFastMutexDestroy", (void *)(uintptr_t)RTSemFastMutexDestroy },
408 { "RTSemFastMutexRelease", (void *)(uintptr_t)RTSemFastMutexRelease },
409 { "RTSemFastMutexRequest", (void *)(uintptr_t)RTSemFastMutexRequest },
410 { "RTSemMutexCreate", (void *)(uintptr_t)RTSemMutexCreate },
411 { "RTSemMutexDestroy", (void *)(uintptr_t)RTSemMutexDestroy },
412 { "RTSemMutexRelease", (void *)(uintptr_t)RTSemMutexRelease },
413 { "RTSemMutexRequest", (void *)(uintptr_t)RTSemMutexRequest },
414 { "RTSemMutexRequestDebug", (void *)(uintptr_t)RTSemMutexRequestDebug },
415 { "RTSemMutexRequestNoResume", (void *)(uintptr_t)RTSemMutexRequestNoResume },
416 { "RTSemMutexRequestNoResumeDebug", (void *)(uintptr_t)RTSemMutexRequestNoResumeDebug },
417 { "RTSpinlockAcquire", (void *)(uintptr_t)RTSpinlockAcquire },
418 { "RTSpinlockCreate", (void *)(uintptr_t)RTSpinlockCreate },
419 { "RTSpinlockDestroy", (void *)(uintptr_t)RTSpinlockDestroy },
420 { "RTSpinlockRelease", (void *)(uintptr_t)RTSpinlockRelease },
421 { "RTStrCopy", (void *)(uintptr_t)RTStrCopy },
422 { "RTStrDupTag", (void *)(uintptr_t)RTStrDupTag },
423 { "RTStrFormat", (void *)(uintptr_t)RTStrFormat },
424 { "RTStrFormatNumber", (void *)(uintptr_t)RTStrFormatNumber },
425 { "RTStrFormatTypeDeregister", (void *)(uintptr_t)RTStrFormatTypeDeregister },
426 { "RTStrFormatTypeRegister", (void *)(uintptr_t)RTStrFormatTypeRegister },
427 { "RTStrFormatTypeSetUser", (void *)(uintptr_t)RTStrFormatTypeSetUser },
428 { "RTStrFormatV", (void *)(uintptr_t)RTStrFormatV },
429 { "RTStrFree", (void *)(uintptr_t)RTStrFree },
430 { "RTStrNCmp", (void *)(uintptr_t)RTStrNCmp },
431 { "RTStrPrintf", (void *)(uintptr_t)RTStrPrintf },
432 { "RTStrPrintfEx", (void *)(uintptr_t)RTStrPrintfEx },
433 { "RTStrPrintfExV", (void *)(uintptr_t)RTStrPrintfExV },
434 { "RTStrPrintfV", (void *)(uintptr_t)RTStrPrintfV },
435 { "RTThreadCreate", (void *)(uintptr_t)RTThreadCreate },
436 { "RTThreadCtxHookIsEnabled", (void *)(uintptr_t)RTThreadCtxHookIsEnabled },
437 { "RTThreadCtxHookCreate", (void *)(uintptr_t)RTThreadCtxHookCreate },
438 { "RTThreadCtxHookDestroy", (void *)(uintptr_t)RTThreadCtxHookDestroy },
439 { "RTThreadCtxHookDisable", (void *)(uintptr_t)RTThreadCtxHookDisable },
440 { "RTThreadCtxHookEnable", (void *)(uintptr_t)RTThreadCtxHookEnable },
441 { "RTThreadGetName", (void *)(uintptr_t)RTThreadGetName },
442 { "RTThreadGetNative", (void *)(uintptr_t)RTThreadGetNative },
443 { "RTThreadGetType", (void *)(uintptr_t)RTThreadGetType },
444 { "RTThreadIsInInterrupt", (void *)(uintptr_t)RTThreadIsInInterrupt },
445 { "RTThreadNativeSelf", (void *)(uintptr_t)RTThreadNativeSelf },
446 { "RTThreadPreemptDisable", (void *)(uintptr_t)RTThreadPreemptDisable },
447 { "RTThreadPreemptIsEnabled", (void *)(uintptr_t)RTThreadPreemptIsEnabled },
448 { "RTThreadPreemptIsPending", (void *)(uintptr_t)RTThreadPreemptIsPending },
449 { "RTThreadPreemptIsPendingTrusty", (void *)(uintptr_t)RTThreadPreemptIsPendingTrusty },
450 { "RTThreadPreemptIsPossible", (void *)(uintptr_t)RTThreadPreemptIsPossible },
451 { "RTThreadPreemptRestore", (void *)(uintptr_t)RTThreadPreemptRestore },
452 { "RTThreadSelf", (void *)(uintptr_t)RTThreadSelf },
453 { "RTThreadSelfName", (void *)(uintptr_t)RTThreadSelfName },
454 { "RTThreadSleep", (void *)(uintptr_t)RTThreadSleep },
455 { "RTThreadUserReset", (void *)(uintptr_t)RTThreadUserReset },
456 { "RTThreadUserSignal", (void *)(uintptr_t)RTThreadUserSignal },
457 { "RTThreadUserWait", (void *)(uintptr_t)RTThreadUserWait },
458 { "RTThreadUserWaitNoResume", (void *)(uintptr_t)RTThreadUserWaitNoResume },
459 { "RTThreadWait", (void *)(uintptr_t)RTThreadWait },
460 { "RTThreadWaitNoResume", (void *)(uintptr_t)RTThreadWaitNoResume },
461 { "RTThreadYield", (void *)(uintptr_t)RTThreadYield },
462 { "RTTimeNow", (void *)(uintptr_t)RTTimeNow },
463 { "RTTimerCanDoHighResolution", (void *)(uintptr_t)RTTimerCanDoHighResolution },
464 { "RTTimerChangeInterval", (void *)(uintptr_t)RTTimerChangeInterval },
465 { "RTTimerCreate", (void *)(uintptr_t)RTTimerCreate },
466 { "RTTimerCreateEx", (void *)(uintptr_t)RTTimerCreateEx },
467 { "RTTimerDestroy", (void *)(uintptr_t)RTTimerDestroy },
468 { "RTTimerGetSystemGranularity", (void *)(uintptr_t)RTTimerGetSystemGranularity },
469 { "RTTimerReleaseSystemGranularity", (void *)(uintptr_t)RTTimerReleaseSystemGranularity },
470 { "RTTimerRequestSystemGranularity", (void *)(uintptr_t)RTTimerRequestSystemGranularity },
471 { "RTTimerStart", (void *)(uintptr_t)RTTimerStart },
472 { "RTTimerStop", (void *)(uintptr_t)RTTimerStop },
473 { "RTTimeSystemMilliTS", (void *)(uintptr_t)RTTimeSystemMilliTS },
474 { "RTTimeSystemNanoTS", (void *)(uintptr_t)RTTimeSystemNanoTS },
475 { "RTUuidCompare", (void *)(uintptr_t)RTUuidCompare },
476 { "RTUuidCompareStr", (void *)(uintptr_t)RTUuidCompareStr },
477 { "RTUuidFromStr", (void *)(uintptr_t)RTUuidFromStr },
478/* SED: END */
479};
480
481#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
482/**
483 * Drag in the rest of IRPT since we share it with the
484 * rest of the kernel modules on darwin.
485 */
486struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
487{
488 /* VBoxNetAdp */
489 { (PFNRT)RTRandBytes },
490 /* VBoxUSB */
491 { (PFNRT)RTPathStripFilename },
492#if !defined(RT_OS_FREEBSD)
493 { (PFNRT)RTHandleTableAlloc },
494 { (PFNRT)RTStrPurgeEncoding },
495#endif
496 { NULL }
497};
498#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
499
500
501
502/**
503 * Initializes the device extentsion structure.
504 *
505 * @returns IPRT status code.
506 * @param pDevExt The device extension to initialize.
507 * @param cbSession The size of the session structure. The size of
508 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
509 * defined because we're skipping the OS specific members
510 * then.
511 */
512int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
513{
514 int rc;
515
516#ifdef SUPDRV_WITH_RELEASE_LOGGER
517 /*
518 * Create the release log.
519 */
520 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
521 PRTLOGGER pRelLogger;
522 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
523 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
524 if (RT_SUCCESS(rc))
525 RTLogRelSetDefaultInstance(pRelLogger);
526 /** @todo Add native hook for getting logger config parameters and setting
527 * them. On linux we should use the module parameter stuff... */
528#endif
529
530#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
531 /*
532 * Require SSE2 to be present.
533 */
534 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
535 {
536 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
537 return VERR_UNSUPPORTED_CPU;
538 }
539#endif
540
541 /*
542 * Initialize it.
543 */
544 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
545 pDevExt->Spinlock = NIL_RTSPINLOCK;
546 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
547 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
548#ifdef SUPDRV_USE_MUTEX_FOR_LDR
549 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
550#else
551 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
552#endif
553#ifdef SUPDRV_USE_MUTEX_FOR_GIP
554 pDevExt->mtxGip = NIL_RTSEMMUTEX;
555 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
556#else
557 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
558 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
559#endif
560
561 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
562 if (RT_SUCCESS(rc))
563 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
564 if (RT_SUCCESS(rc))
565 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
566
567 if (RT_SUCCESS(rc))
568#ifdef SUPDRV_USE_MUTEX_FOR_LDR
569 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
570#else
571 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
572#endif
573 if (RT_SUCCESS(rc))
574#ifdef SUPDRV_USE_MUTEX_FOR_GIP
575 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
576#else
577 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
578#endif
579 if (RT_SUCCESS(rc))
580 {
581 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
582 if (RT_SUCCESS(rc))
583 {
584#ifdef SUPDRV_USE_MUTEX_FOR_GIP
585 rc = RTSemMutexCreate(&pDevExt->mtxGip);
586#else
587 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
588#endif
589 if (RT_SUCCESS(rc))
590 {
591 rc = supdrvGipCreate(pDevExt);
592 if (RT_SUCCESS(rc))
593 {
594 rc = supdrvTracerInit(pDevExt);
595 if (RT_SUCCESS(rc))
596 {
597 pDevExt->pLdrInitImage = NULL;
598 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
599 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
600 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
601 pDevExt->cbSession = (uint32_t)cbSession;
602
603 /*
604 * Fixup the absolute symbols.
605 *
606 * Because of the table indexing assumptions we'll have a little #ifdef orgy
607 * here rather than distributing this to OS specific files. At least for now.
608 */
609#ifdef RT_OS_DARWIN
610# if ARCH_BITS == 32
611 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
612 {
613 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
614 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
615 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
616 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
617 }
618 else
619 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
620 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
621 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
622 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
623 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
624 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
625 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
626# else /* 64-bit darwin: */
627 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
628 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
629 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
630 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
631 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
632 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
633 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
634 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
635 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
636 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
637
638# endif
639#else /* !RT_OS_DARWIN */
640# if ARCH_BITS == 64
641 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
642 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
643 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
644 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
645# else
646 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
647# endif
648 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
649 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
650 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
651 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
652 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
653 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
654#endif /* !RT_OS_DARWIN */
655 return VINF_SUCCESS;
656 }
657
658 supdrvGipDestroy(pDevExt);
659 }
660
661#ifdef SUPDRV_USE_MUTEX_FOR_GIP
662 RTSemMutexDestroy(pDevExt->mtxGip);
663 pDevExt->mtxGip = NIL_RTSEMMUTEX;
664#else
665 RTSemFastMutexDestroy(pDevExt->mtxGip);
666 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
667#endif
668 }
669 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
670 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
671 }
672 }
673
674#ifdef SUPDRV_USE_MUTEX_FOR_GIP
675 RTSemMutexDestroy(pDevExt->mtxTscDelta);
676 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
677#else
678 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
679 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
680#endif
681#ifdef SUPDRV_USE_MUTEX_FOR_LDR
682 RTSemMutexDestroy(pDevExt->mtxLdr);
683 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
684#else
685 RTSemFastMutexDestroy(pDevExt->mtxLdr);
686 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
687#endif
688 RTSpinlockDestroy(pDevExt->Spinlock);
689 pDevExt->Spinlock = NIL_RTSPINLOCK;
690 RTSpinlockDestroy(pDevExt->hGipSpinlock);
691 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
692 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
693 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
694
695#ifdef SUPDRV_WITH_RELEASE_LOGGER
696 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
697 RTLogDestroy(RTLogSetDefaultInstance(NULL));
698#endif
699
700 return rc;
701}
702
703
704/**
705 * Delete the device extension (e.g. cleanup members).
706 *
707 * @param pDevExt The device extension to delete.
708 */
709void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
710{
711 PSUPDRVOBJ pObj;
712 PSUPDRVUSAGE pUsage;
713
714 /*
715 * Kill mutexes and spinlocks.
716 */
717#ifdef SUPDRV_USE_MUTEX_FOR_GIP
718 RTSemMutexDestroy(pDevExt->mtxGip);
719 pDevExt->mtxGip = NIL_RTSEMMUTEX;
720 RTSemMutexDestroy(pDevExt->mtxTscDelta);
721 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
722#else
723 RTSemFastMutexDestroy(pDevExt->mtxGip);
724 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
725 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
726 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
727#endif
728#ifdef SUPDRV_USE_MUTEX_FOR_LDR
729 RTSemMutexDestroy(pDevExt->mtxLdr);
730 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
731#else
732 RTSemFastMutexDestroy(pDevExt->mtxLdr);
733 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
734#endif
735 RTSpinlockDestroy(pDevExt->Spinlock);
736 pDevExt->Spinlock = NIL_RTSPINLOCK;
737 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
738 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
739 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
740 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
741
742 /*
743 * Free lists.
744 */
745 /* objects. */
746 pObj = pDevExt->pObjs;
747 Assert(!pObj); /* (can trigger on forced unloads) */
748 pDevExt->pObjs = NULL;
749 while (pObj)
750 {
751 void *pvFree = pObj;
752 pObj = pObj->pNext;
753 RTMemFree(pvFree);
754 }
755
756 /* usage records. */
757 pUsage = pDevExt->pUsageFree;
758 pDevExt->pUsageFree = NULL;
759 while (pUsage)
760 {
761 void *pvFree = pUsage;
762 pUsage = pUsage->pNext;
763 RTMemFree(pvFree);
764 }
765
766 /* kill the GIP. */
767 supdrvGipDestroy(pDevExt);
768 RTSpinlockDestroy(pDevExt->hGipSpinlock);
769 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
770
771 supdrvTracerTerm(pDevExt);
772
773#ifdef SUPDRV_WITH_RELEASE_LOGGER
774 /* destroy the loggers. */
775 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
776 RTLogDestroy(RTLogSetDefaultInstance(NULL));
777#endif
778}
779
780
781/**
782 * Create session.
783 *
784 * @returns IPRT status code.
785 * @param pDevExt Device extension.
786 * @param fUser Flag indicating whether this is a user or kernel
787 * session.
788 * @param fUnrestricted Unrestricted access (system) or restricted access
789 * (user)?
790 * @param ppSession Where to store the pointer to the session data.
791 */
792int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
793{
794 int rc;
795 PSUPDRVSESSION pSession;
796
797 if (!SUP_IS_DEVEXT_VALID(pDevExt))
798 return VERR_INVALID_PARAMETER;
799
800 /*
801 * Allocate memory for the session data.
802 */
803 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
804 if (pSession)
805 {
806 /* Initialize session data. */
807 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
808 if (!rc)
809 {
810 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
811 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
812 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
813 if (RT_SUCCESS(rc))
814 {
815 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
816 pSession->pDevExt = pDevExt;
817 pSession->u32Cookie = BIRD_INV;
818 pSession->fUnrestricted = fUnrestricted;
819 /*pSession->fInHashTable = false; */
820 pSession->cRefs = 1;
821 /*pSession->pCommonNextHash = NULL;
822 pSession->ppOsSessionPtr = NULL; */
823 if (fUser)
824 {
825 pSession->Process = RTProcSelf();
826 pSession->R0Process = RTR0ProcHandleSelf();
827 }
828 else
829 {
830 pSession->Process = NIL_RTPROCESS;
831 pSession->R0Process = NIL_RTR0PROCESS;
832 }
833 /*pSession->pLdrUsage = NULL;
834 pSession->pVM = NULL;
835 pSession->pUsage = NULL;
836 pSession->pGip = NULL;
837 pSession->fGipReferenced = false;
838 pSession->Bundle.cUsed = 0; */
839 pSession->Uid = NIL_RTUID;
840 pSession->Gid = NIL_RTGID;
841 /*pSession->uTracerData = 0;*/
842 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
843 RTListInit(&pSession->TpProviders);
844 /*pSession->cTpProviders = 0;*/
845 /*pSession->cTpProbesFiring = 0;*/
846 RTListInit(&pSession->TpUmods);
847 /*RT_ZERO(pSession->apTpLookupTable);*/
848
849 VBOXDRV_SESSION_CREATE(pSession, fUser);
850 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
851 return VINF_SUCCESS;
852 }
853
854 RTSpinlockDestroy(pSession->Spinlock);
855 }
856 RTMemFree(pSession);
857 *ppSession = NULL;
858 Log(("Failed to create spinlock, rc=%d!\n", rc));
859 }
860 else
861 rc = VERR_NO_MEMORY;
862
863 return rc;
864}
865
866
867/**
868 * Cleans up the session in the context of the process to which it belongs, the
869 * caller will free the session and the session spinlock.
870 *
871 * This should normally occur when the session is closed or as the process
872 * exits. Careful reference counting in the OS specfic code makes sure that
873 * there cannot be any races between process/handle cleanup callbacks and
874 * threads doing I/O control calls.
875 *
876 * @param pDevExt The device extension.
877 * @param pSession Session data.
878 */
879static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
880{
881 int rc;
882 PSUPDRVBUNDLE pBundle;
883 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
884
885 Assert(!pSession->fInHashTable);
886 Assert(!pSession->ppOsSessionPtr);
887 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
888 ("R0Process=%p cur=%p; curpid=%u\n",
889 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
890
891 /*
892 * Remove logger instances related to this session.
893 */
894 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
895
896 /*
897 * Destroy the handle table.
898 */
899 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
900 AssertRC(rc);
901 pSession->hHandleTable = NIL_RTHANDLETABLE;
902
903 /*
904 * Release object references made in this session.
905 * In theory there should be noone racing us in this session.
906 */
907 Log2(("release objects - start\n"));
908 if (pSession->pUsage)
909 {
910 PSUPDRVUSAGE pUsage;
911 RTSpinlockAcquire(pDevExt->Spinlock);
912
913 while ((pUsage = pSession->pUsage) != NULL)
914 {
915 PSUPDRVOBJ pObj = pUsage->pObj;
916 pSession->pUsage = pUsage->pNext;
917
918 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
919 if (pUsage->cUsage < pObj->cUsage)
920 {
921 pObj->cUsage -= pUsage->cUsage;
922 RTSpinlockRelease(pDevExt->Spinlock);
923 }
924 else
925 {
926 /* Destroy the object and free the record. */
927 if (pDevExt->pObjs == pObj)
928 pDevExt->pObjs = pObj->pNext;
929 else
930 {
931 PSUPDRVOBJ pObjPrev;
932 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
933 if (pObjPrev->pNext == pObj)
934 {
935 pObjPrev->pNext = pObj->pNext;
936 break;
937 }
938 Assert(pObjPrev);
939 }
940 RTSpinlockRelease(pDevExt->Spinlock);
941
942 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
943 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
944 if (pObj->pfnDestructor)
945 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
946 RTMemFree(pObj);
947 }
948
949 /* free it and continue. */
950 RTMemFree(pUsage);
951
952 RTSpinlockAcquire(pDevExt->Spinlock);
953 }
954
955 RTSpinlockRelease(pDevExt->Spinlock);
956 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
957 }
958 Log2(("release objects - done\n"));
959
960 /*
961 * Make sure the associated VM pointers are NULL.
962 */
963 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
964 {
965 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
966 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
967 pSession->pSessionGVM = NULL;
968 pSession->pSessionVM = NULL;
969 pSession->pFastIoCtrlVM = NULL;
970 }
971
972 /*
973 * Do tracer cleanups related to this session.
974 */
975 Log2(("release tracer stuff - start\n"));
976 supdrvTracerCleanupSession(pDevExt, pSession);
977 Log2(("release tracer stuff - end\n"));
978
979 /*
980 * Release memory allocated in the session.
981 *
982 * We do not serialize this as we assume that the application will
983 * not allocated memory while closing the file handle object.
984 */
985 Log2(("freeing memory:\n"));
986 pBundle = &pSession->Bundle;
987 while (pBundle)
988 {
989 PSUPDRVBUNDLE pToFree;
990 unsigned i;
991
992 /*
993 * Check and unlock all entries in the bundle.
994 */
995 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
996 {
997 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
998 {
999 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1000 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1001 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1002 {
1003 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1004 AssertRC(rc); /** @todo figure out how to handle this. */
1005 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1006 }
1007 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1008 AssertRC(rc); /** @todo figure out how to handle this. */
1009 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1010 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1011 }
1012 }
1013
1014 /*
1015 * Advance and free previous bundle.
1016 */
1017 pToFree = pBundle;
1018 pBundle = pBundle->pNext;
1019
1020 pToFree->pNext = NULL;
1021 pToFree->cUsed = 0;
1022 if (pToFree != &pSession->Bundle)
1023 RTMemFree(pToFree);
1024 }
1025 Log2(("freeing memory - done\n"));
1026
1027 /*
1028 * Deregister component factories.
1029 */
1030 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1031 Log2(("deregistering component factories:\n"));
1032 if (pDevExt->pComponentFactoryHead)
1033 {
1034 PSUPDRVFACTORYREG pPrev = NULL;
1035 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1036 while (pCur)
1037 {
1038 if (pCur->pSession == pSession)
1039 {
1040 /* unlink it */
1041 PSUPDRVFACTORYREG pNext = pCur->pNext;
1042 if (pPrev)
1043 pPrev->pNext = pNext;
1044 else
1045 pDevExt->pComponentFactoryHead = pNext;
1046
1047 /* free it */
1048 pCur->pNext = NULL;
1049 pCur->pSession = NULL;
1050 pCur->pFactory = NULL;
1051 RTMemFree(pCur);
1052
1053 /* next */
1054 pCur = pNext;
1055 }
1056 else
1057 {
1058 /* next */
1059 pPrev = pCur;
1060 pCur = pCur->pNext;
1061 }
1062 }
1063 }
1064 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1065 Log2(("deregistering component factories - done\n"));
1066
1067 /*
1068 * Loaded images needs to be dereferenced and possibly freed up.
1069 */
1070 supdrvLdrLock(pDevExt);
1071 Log2(("freeing images:\n"));
1072 if (pSession->pLdrUsage)
1073 {
1074 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1075 pSession->pLdrUsage = NULL;
1076 while (pUsage)
1077 {
1078 void *pvFree = pUsage;
1079 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1080 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1081 if (pImage->cUsage > cUsage)
1082 pImage->cUsage -= cUsage;
1083 else
1084 supdrvLdrFree(pDevExt, pImage);
1085 pUsage->pImage = NULL;
1086 pUsage = pUsage->pNext;
1087 RTMemFree(pvFree);
1088 }
1089 }
1090 supdrvLdrUnlock(pDevExt);
1091 Log2(("freeing images - done\n"));
1092
1093 /*
1094 * Unmap the GIP.
1095 */
1096 Log2(("umapping GIP:\n"));
1097 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1098 {
1099 SUPR0GipUnmap(pSession);
1100 pSession->fGipReferenced = 0;
1101 }
1102 Log2(("umapping GIP - done\n"));
1103}
1104
1105
1106/**
1107 * Common code for freeing a session when the reference count reaches zero.
1108 *
1109 * @param pDevExt Device extension.
1110 * @param pSession Session data.
1111 * This data will be freed by this routine.
1112 */
1113static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1114{
1115 VBOXDRV_SESSION_CLOSE(pSession);
1116
1117 /*
1118 * Cleanup the session first.
1119 */
1120 supdrvCleanupSession(pDevExt, pSession);
1121 supdrvOSCleanupSession(pDevExt, pSession);
1122
1123 /*
1124 * Free the rest of the session stuff.
1125 */
1126 RTSpinlockDestroy(pSession->Spinlock);
1127 pSession->Spinlock = NIL_RTSPINLOCK;
1128 pSession->pDevExt = NULL;
1129 RTMemFree(pSession);
1130 LogFlow(("supdrvDestroySession: returns\n"));
1131}
1132
1133
1134/**
1135 * Inserts the session into the global hash table.
1136 *
1137 * @retval VINF_SUCCESS on success.
1138 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1139 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1140 * session (asserted).
1141 * @retval VERR_DUPLICATE if there is already a session for that pid.
1142 *
1143 * @param pDevExt The device extension.
1144 * @param pSession The session.
1145 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1146 * available and used. This will set to point to the
1147 * session while under the protection of the session
1148 * hash table spinlock. It will also be kept in
1149 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1150 * cleanup use.
1151 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1152 */
1153int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1154 void *pvUser)
1155{
1156 PSUPDRVSESSION pCur;
1157 unsigned iHash;
1158
1159 /*
1160 * Validate input.
1161 */
1162 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1163 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1164
1165 /*
1166 * Calculate the hash table index and acquire the spinlock.
1167 */
1168 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1169
1170 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1171
1172 /*
1173 * If there are a collisions, we need to carefully check if we got a
1174 * duplicate. There can only be one open session per process.
1175 */
1176 pCur = pDevExt->apSessionHashTab[iHash];
1177 if (pCur)
1178 {
1179 while (pCur && pCur->Process != pSession->Process)
1180 pCur = pCur->pCommonNextHash;
1181
1182 if (pCur)
1183 {
1184 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1185 if (pCur == pSession)
1186 {
1187 Assert(pSession->fInHashTable);
1188 AssertFailed();
1189 return VERR_WRONG_ORDER;
1190 }
1191 Assert(!pSession->fInHashTable);
1192 if (pCur->R0Process == pSession->R0Process)
1193 return VERR_RESOURCE_IN_USE;
1194 return VERR_DUPLICATE;
1195 }
1196 }
1197 Assert(!pSession->fInHashTable);
1198 Assert(!pSession->ppOsSessionPtr);
1199
1200 /*
1201 * Insert it, doing a callout to the OS specific code in case it has
1202 * anything it wishes to do while we're holding the spinlock.
1203 */
1204 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1205 pDevExt->apSessionHashTab[iHash] = pSession;
1206 pSession->fInHashTable = true;
1207 ASMAtomicIncS32(&pDevExt->cSessions);
1208
1209 pSession->ppOsSessionPtr = ppOsSessionPtr;
1210 if (ppOsSessionPtr)
1211 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1212
1213 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1214
1215 /*
1216 * Retain a reference for the pointer in the session table.
1217 */
1218 ASMAtomicIncU32(&pSession->cRefs);
1219
1220 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1221 return VINF_SUCCESS;
1222}
1223
1224
1225/**
1226 * Removes the session from the global hash table.
1227 *
1228 * @retval VINF_SUCCESS on success.
1229 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1230 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1231 * session (asserted).
1232 *
1233 * @param pDevExt The device extension.
1234 * @param pSession The session. The caller is expected to have a reference
1235 * to this so it won't croak on us when we release the hash
1236 * table reference.
1237 * @param pvUser OS specific context value for the
1238 * supdrvOSSessionHashTabInserted callback.
1239 */
1240int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1241{
1242 PSUPDRVSESSION pCur;
1243 unsigned iHash;
1244 int32_t cRefs;
1245
1246 /*
1247 * Validate input.
1248 */
1249 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1250 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1251
1252 /*
1253 * Calculate the hash table index and acquire the spinlock.
1254 */
1255 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1256
1257 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1258
1259 /*
1260 * Unlink it.
1261 */
1262 pCur = pDevExt->apSessionHashTab[iHash];
1263 if (pCur == pSession)
1264 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1265 else
1266 {
1267 PSUPDRVSESSION pPrev = pCur;
1268 while (pCur && pCur != pSession)
1269 {
1270 pPrev = pCur;
1271 pCur = pCur->pCommonNextHash;
1272 }
1273 if (pCur)
1274 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1275 else
1276 {
1277 Assert(!pSession->fInHashTable);
1278 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1279 return VERR_NOT_FOUND;
1280 }
1281 }
1282
1283 pSession->pCommonNextHash = NULL;
1284 pSession->fInHashTable = false;
1285
1286 ASMAtomicDecS32(&pDevExt->cSessions);
1287
1288 /*
1289 * Clear OS specific session pointer if available and do the OS callback.
1290 */
1291 if (pSession->ppOsSessionPtr)
1292 {
1293 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1294 pSession->ppOsSessionPtr = NULL;
1295 }
1296
1297 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1298
1299 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1300
1301 /*
1302 * Drop the reference the hash table had to the session. This shouldn't
1303 * be the last reference!
1304 */
1305 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1306 Assert(cRefs > 0 && cRefs < _1M);
1307 if (cRefs == 0)
1308 supdrvDestroySession(pDevExt, pSession);
1309
1310 return VINF_SUCCESS;
1311}
1312
1313
1314/**
1315 * Looks up the session for the current process in the global hash table or in
1316 * OS specific pointer.
1317 *
1318 * @returns Pointer to the session with a reference that the caller must
1319 * release. If no valid session was found, NULL is returned.
1320 *
1321 * @param pDevExt The device extension.
1322 * @param Process The process ID.
1323 * @param R0Process The ring-0 process handle.
1324 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1325 * this is used instead of the hash table. For
1326 * additional safety it must then be equal to the
1327 * SUPDRVSESSION::ppOsSessionPtr member.
1328 * This can be NULL even if the OS has a session
1329 * pointer.
1330 */
1331PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1332 PSUPDRVSESSION *ppOsSessionPtr)
1333{
1334 PSUPDRVSESSION pCur;
1335 unsigned iHash;
1336
1337 /*
1338 * Validate input.
1339 */
1340 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1341
1342 /*
1343 * Calculate the hash table index and acquire the spinlock.
1344 */
1345 iHash = SUPDRV_SESSION_HASH(Process);
1346
1347 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1348
1349 /*
1350 * If an OS session pointer is provided, always use it.
1351 */
1352 if (ppOsSessionPtr)
1353 {
1354 pCur = *ppOsSessionPtr;
1355 if ( pCur
1356 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1357 || pCur->Process != Process
1358 || pCur->R0Process != R0Process) )
1359 pCur = NULL;
1360 }
1361 else
1362 {
1363 /*
1364 * Otherwise, do the hash table lookup.
1365 */
1366 pCur = pDevExt->apSessionHashTab[iHash];
1367 while ( pCur
1368 && ( pCur->Process != Process
1369 || pCur->R0Process != R0Process) )
1370 pCur = pCur->pCommonNextHash;
1371 }
1372
1373 /*
1374 * Retain the session.
1375 */
1376 if (pCur)
1377 {
1378 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1379 NOREF(cRefs);
1380 Assert(cRefs > 1 && cRefs < _1M);
1381 }
1382
1383 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1384
1385 return pCur;
1386}
1387
1388
1389/**
1390 * Retain a session to make sure it doesn't go away while it is in use.
1391 *
1392 * @returns New reference count on success, UINT32_MAX on failure.
1393 * @param pSession Session data.
1394 */
1395uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1396{
1397 uint32_t cRefs;
1398 AssertPtrReturn(pSession, UINT32_MAX);
1399 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1400
1401 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1402 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1403 return cRefs;
1404}
1405
1406
1407/**
1408 * Releases a given session.
1409 *
1410 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1411 * @param pSession Session data.
1412 */
1413uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1414{
1415 uint32_t cRefs;
1416 AssertPtrReturn(pSession, UINT32_MAX);
1417 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1418
1419 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1420 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1421 if (cRefs == 0)
1422 supdrvDestroySession(pSession->pDevExt, pSession);
1423 return cRefs;
1424}
1425
1426
1427/**
1428 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1429 *
1430 * @returns IPRT status code, see SUPR0ObjAddRef.
1431 * @param hHandleTable The handle table handle. Ignored.
1432 * @param pvObj The object pointer.
1433 * @param pvCtx Context, the handle type. Ignored.
1434 * @param pvUser Session pointer.
1435 */
1436static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1437{
1438 NOREF(pvCtx);
1439 NOREF(hHandleTable);
1440 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1441}
1442
1443
1444/**
1445 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1446 *
1447 * @param hHandleTable The handle table handle. Ignored.
1448 * @param h The handle value. Ignored.
1449 * @param pvObj The object pointer.
1450 * @param pvCtx Context, the handle type. Ignored.
1451 * @param pvUser Session pointer.
1452 */
1453static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1454{
1455 NOREF(pvCtx);
1456 NOREF(h);
1457 NOREF(hHandleTable);
1458 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1459}
1460
1461
1462/**
1463 * Fast path I/O Control worker.
1464 *
1465 * @returns VBox status code that should be passed down to ring-3 unchanged.
1466 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1467 * @param idCpu VMCPU id.
1468 * @param pDevExt Device extention.
1469 * @param pSession Session data.
1470 */
1471int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1472{
1473 /*
1474 * Validate input and check that the VM has a session.
1475 */
1476 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1477 {
1478 PVM pVM = pSession->pSessionVM;
1479 PGVM pGVM = pSession->pSessionGVM;
1480 if (RT_LIKELY( pGVM != NULL
1481 && pVM != NULL
1482 && pVM == pSession->pFastIoCtrlVM))
1483 {
1484 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1485 {
1486 /*
1487 * Make the call.
1488 */
1489 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1490 return VINF_SUCCESS;
1491 }
1492
1493 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1494 }
1495 else
1496 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1497 pGVM, pVM, pSession->pFastIoCtrlVM);
1498 }
1499 else
1500 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1501 return VERR_INTERNAL_ERROR;
1502}
1503
1504
1505/**
1506 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1507 *
1508 * Check if pszStr contains any character of pszChars. We would use strpbrk
1509 * here if this function would be contained in the RedHat kABI white list, see
1510 * http://www.kerneldrivers.org/RHEL5.
1511 *
1512 * @returns true if fine, false if not.
1513 * @param pszName The module name to check.
1514 */
1515static bool supdrvIsLdrModuleNameValid(const char *pszName)
1516{
1517 int chCur;
1518 while ((chCur = *pszName++) != '\0')
1519 {
1520 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1521 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1522 while (offInv-- > 0)
1523 if (s_szInvalidChars[offInv] == chCur)
1524 return false;
1525 }
1526 return true;
1527}
1528
1529
1530
1531/**
1532 * I/O Control inner worker (tracing reasons).
1533 *
1534 * @returns IPRT status code.
1535 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1536 *
1537 * @param uIOCtl Function number.
1538 * @param pDevExt Device extention.
1539 * @param pSession Session data.
1540 * @param pReqHdr The request header.
1541 */
1542static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1543{
1544 /*
1545 * Validation macros
1546 */
1547#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1548 do { \
1549 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1550 { \
1551 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1552 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1553 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1554 } \
1555 } while (0)
1556
1557#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1558
1559#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1560 do { \
1561 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1562 { \
1563 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1564 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1565 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1566 } \
1567 } while (0)
1568
1569#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1570 do { \
1571 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1572 { \
1573 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1574 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1575 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1576 } \
1577 } while (0)
1578
1579#define REQ_CHECK_EXPR(Name, expr) \
1580 do { \
1581 if (RT_UNLIKELY(!(expr))) \
1582 { \
1583 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1584 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1585 } \
1586 } while (0)
1587
1588#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1589 do { \
1590 if (RT_UNLIKELY(!(expr))) \
1591 { \
1592 OSDBGPRINT( fmt ); \
1593 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1594 } \
1595 } while (0)
1596
1597 /*
1598 * The switch.
1599 */
1600 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1601 {
1602 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1603 {
1604 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1605 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1606 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1607 {
1608 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1609 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1610 return 0;
1611 }
1612
1613#if 0
1614 /*
1615 * Call out to the OS specific code and let it do permission checks on the
1616 * client process.
1617 */
1618 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1619 {
1620 pReq->u.Out.u32Cookie = 0xffffffff;
1621 pReq->u.Out.u32SessionCookie = 0xffffffff;
1622 pReq->u.Out.u32SessionVersion = 0xffffffff;
1623 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1624 pReq->u.Out.pSession = NULL;
1625 pReq->u.Out.cFunctions = 0;
1626 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1627 return 0;
1628 }
1629#endif
1630
1631 /*
1632 * Match the version.
1633 * The current logic is very simple, match the major interface version.
1634 */
1635 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1636 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1637 {
1638 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1639 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1640 pReq->u.Out.u32Cookie = 0xffffffff;
1641 pReq->u.Out.u32SessionCookie = 0xffffffff;
1642 pReq->u.Out.u32SessionVersion = 0xffffffff;
1643 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1644 pReq->u.Out.pSession = NULL;
1645 pReq->u.Out.cFunctions = 0;
1646 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1647 return 0;
1648 }
1649
1650 /*
1651 * Fill in return data and be gone.
1652 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1653 * u32SessionVersion <= u32ReqVersion!
1654 */
1655 /** @todo Somehow validate the client and negotiate a secure cookie... */
1656 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1657 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1658 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1659 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1660 pReq->u.Out.pSession = pSession;
1661 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1662 pReq->Hdr.rc = VINF_SUCCESS;
1663 return 0;
1664 }
1665
1666 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1667 {
1668 /* validate */
1669 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1670 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1671
1672 /* execute */
1673 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1674 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1675 pReq->Hdr.rc = VINF_SUCCESS;
1676 return 0;
1677 }
1678
1679 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1680 {
1681 /* validate */
1682 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1683 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1684 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1685 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1686 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1687
1688 /* execute */
1689 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1690 if (RT_FAILURE(pReq->Hdr.rc))
1691 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1692 return 0;
1693 }
1694
1695 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1696 {
1697 /* validate */
1698 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1699 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1700
1701 /* execute */
1702 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1703 return 0;
1704 }
1705
1706 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1707 {
1708 /* validate */
1709 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1710 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1711
1712 /* execute */
1713 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1714 if (RT_FAILURE(pReq->Hdr.rc))
1715 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1716 return 0;
1717 }
1718
1719 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1720 {
1721 /* validate */
1722 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1723 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1724
1725 /* execute */
1726 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1727 return 0;
1728 }
1729
1730 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1731 {
1732 /* validate */
1733 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1734 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1735 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1736 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1737 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1738 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1739 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1740 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1741 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1742 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1743
1744 /* execute */
1745 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1746 return 0;
1747 }
1748
1749 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1750 {
1751 /* validate */
1752 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1753 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1754 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1755 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1756 || ( pReq->u.In.cSymbols <= 16384
1757 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1758 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1759 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1760 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1761 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1762 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1763 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1764 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1765 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1766 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1767 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1768 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1769 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1770 && pReq->u.In.cSegments <= 128
1771 && pReq->u.In.cSegments <= pReq->u.In.cbImageBits / PAGE_SIZE
1772 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1773 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1774 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1775 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1776 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1777
1778 if (pReq->u.In.cSymbols)
1779 {
1780 uint32_t i;
1781 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1782 for (i = 0; i < pReq->u.In.cSymbols; i++)
1783 {
1784 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1785 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1786 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1787 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1788 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1789 pReq->u.In.cbStrTab - paSyms[i].offName),
1790 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1791 }
1792 }
1793 {
1794 uint32_t i;
1795 uint32_t offPrevEnd = 0;
1796 PSUPLDRSEG paSegs = (PSUPLDRSEG)&pReq->u.In.abImage[pReq->u.In.offSegments];
1797 for (i = 0; i < pReq->u.In.cSegments; i++)
1798 {
1799 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1800 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1801 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1802 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1803 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1804 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1805 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1806 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1807 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1808 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1809 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1810 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1811 }
1812 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1813 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1814 }
1815
1816 /* execute */
1817 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1818 return 0;
1819 }
1820
1821 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1822 {
1823 /* validate */
1824 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1825 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1826
1827 /* execute */
1828 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1829 return 0;
1830 }
1831
1832 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1833 {
1834 /* validate */
1835 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1836
1837 /* execute */
1838 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1839 return 0;
1840 }
1841
1842 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1843 {
1844 /* validate */
1845 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1846 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1847 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1848
1849 /* execute */
1850 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1851 return 0;
1852 }
1853
1854 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1855 {
1856 /* validate */
1857 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1858 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1859 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1860
1861 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1862 {
1863 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1864
1865 /* execute */
1866 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1867 {
1868 if (pReq->u.In.pVMR0 == NULL)
1869 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1870 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1871 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1872 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1873 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1874 else
1875 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1876 }
1877 else
1878 pReq->Hdr.rc = VERR_WRONG_ORDER;
1879 }
1880 else
1881 {
1882 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1883 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1884 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1885 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1886 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1887
1888 /* execute */
1889 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1890 {
1891 if (pReq->u.In.pVMR0 == NULL)
1892 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1893 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1894 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1895 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1896 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1897 else
1898 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1899 }
1900 else
1901 pReq->Hdr.rc = VERR_WRONG_ORDER;
1902 }
1903
1904 if ( RT_FAILURE(pReq->Hdr.rc)
1905 && pReq->Hdr.rc != VERR_INTERRUPTED
1906 && pReq->Hdr.rc != VERR_TIMEOUT)
1907 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1908 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1909 else
1910 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1911 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1912 return 0;
1913 }
1914
1915 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1916 {
1917 /* validate */
1918 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1919 PSUPVMMR0REQHDR pVMMReq;
1920 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1921 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1922
1923 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1924 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1925 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1926 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1927 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1928
1929 /* execute */
1930 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1931 {
1932 if (pReq->u.In.pVMR0 == NULL)
1933 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1934 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1935 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1936 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1937 else
1938 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1939 }
1940 else
1941 pReq->Hdr.rc = VERR_WRONG_ORDER;
1942
1943 if ( RT_FAILURE(pReq->Hdr.rc)
1944 && pReq->Hdr.rc != VERR_INTERRUPTED
1945 && pReq->Hdr.rc != VERR_TIMEOUT)
1946 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1947 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1948 else
1949 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1950 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1951 return 0;
1952 }
1953
1954 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1955 {
1956 /* validate */
1957 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1958 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1959
1960 /* execute */
1961 pReq->Hdr.rc = VINF_SUCCESS;
1962 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1963 return 0;
1964 }
1965
1966 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1967 {
1968 /* validate */
1969 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1970 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1971 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1972
1973 /* execute */
1974 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1975 if (RT_FAILURE(pReq->Hdr.rc))
1976 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1977 return 0;
1978 }
1979
1980 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1981 {
1982 /* validate */
1983 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1984 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1985
1986 /* execute */
1987 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1988 return 0;
1989 }
1990
1991 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1992 {
1993 /* validate */
1994 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1995 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1996
1997 /* execute */
1998 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1999 if (RT_SUCCESS(pReq->Hdr.rc))
2000 pReq->u.Out.pGipR0 = pDevExt->pGip;
2001 return 0;
2002 }
2003
2004 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2005 {
2006 /* validate */
2007 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2008 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2009
2010 /* execute */
2011 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2012 return 0;
2013 }
2014
2015 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2016 {
2017 /* validate */
2018 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2019 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2020 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2021 || ( VALID_PTR(pReq->u.In.pVMR0)
2022 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2023 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2024
2025 /* execute */
2026 RTSpinlockAcquire(pDevExt->Spinlock);
2027 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2028 {
2029 if (pSession->pFastIoCtrlVM == NULL)
2030 {
2031 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2032 RTSpinlockRelease(pDevExt->Spinlock);
2033 pReq->Hdr.rc = VINF_SUCCESS;
2034 }
2035 else
2036 {
2037 RTSpinlockRelease(pDevExt->Spinlock);
2038 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2039 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2040 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2041 }
2042 }
2043 else
2044 {
2045 RTSpinlockRelease(pDevExt->Spinlock);
2046 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2047 pSession->pSessionVM, pReq->u.In.pVMR0));
2048 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2049 }
2050 return 0;
2051 }
2052
2053 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2054 {
2055 /* validate */
2056 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2057 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2058 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2059 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2060 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2061 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2062 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2063 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2064 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2065
2066 /* execute */
2067 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2068 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2069 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2070 &pReq->u.Out.aPages[0]);
2071 if (RT_FAILURE(pReq->Hdr.rc))
2072 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2073 return 0;
2074 }
2075
2076 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2077 {
2078 /* validate */
2079 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2080 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2081 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2082 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2083 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2084 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2085
2086 /* execute */
2087 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2088 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2089 if (RT_FAILURE(pReq->Hdr.rc))
2090 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2091 return 0;
2092 }
2093
2094 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2095 {
2096 /* validate */
2097 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2098 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2099 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2100 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2101 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2102 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2103 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2104
2105 /* execute */
2106 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2107 return 0;
2108 }
2109
2110 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2111 {
2112 /* validate */
2113 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2114 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2115
2116 /* execute */
2117 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2118 return 0;
2119 }
2120
2121 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2122 {
2123 /* validate */
2124 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2125 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2126 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2127
2128 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2129 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2130 else
2131 {
2132 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2133 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2134 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2135 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2136 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2137 }
2138 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2139
2140 /* execute */
2141 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2142 return 0;
2143 }
2144
2145 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2146 {
2147 /* validate */
2148 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2149 size_t cbStrTab;
2150 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2151 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2152 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2153 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2154 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2155 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2156 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2157 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2158 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2159 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2160 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2161
2162 /* execute */
2163 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2164 return 0;
2165 }
2166
2167 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2168 {
2169 /* validate */
2170 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2171 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2172 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2173
2174 /* execute */
2175 switch (pReq->u.In.uType)
2176 {
2177 case SUP_SEM_TYPE_EVENT:
2178 {
2179 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2180 switch (pReq->u.In.uOp)
2181 {
2182 case SUPSEMOP2_WAIT_MS_REL:
2183 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2184 break;
2185 case SUPSEMOP2_WAIT_NS_ABS:
2186 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2187 break;
2188 case SUPSEMOP2_WAIT_NS_REL:
2189 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2190 break;
2191 case SUPSEMOP2_SIGNAL:
2192 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2193 break;
2194 case SUPSEMOP2_CLOSE:
2195 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2196 break;
2197 case SUPSEMOP2_RESET:
2198 default:
2199 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2200 break;
2201 }
2202 break;
2203 }
2204
2205 case SUP_SEM_TYPE_EVENT_MULTI:
2206 {
2207 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2208 switch (pReq->u.In.uOp)
2209 {
2210 case SUPSEMOP2_WAIT_MS_REL:
2211 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2212 break;
2213 case SUPSEMOP2_WAIT_NS_ABS:
2214 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2215 break;
2216 case SUPSEMOP2_WAIT_NS_REL:
2217 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2218 break;
2219 case SUPSEMOP2_SIGNAL:
2220 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2221 break;
2222 case SUPSEMOP2_CLOSE:
2223 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2224 break;
2225 case SUPSEMOP2_RESET:
2226 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2227 break;
2228 default:
2229 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2230 break;
2231 }
2232 break;
2233 }
2234
2235 default:
2236 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2237 break;
2238 }
2239 return 0;
2240 }
2241
2242 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2243 {
2244 /* validate */
2245 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2246 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2247 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2248
2249 /* execute */
2250 switch (pReq->u.In.uType)
2251 {
2252 case SUP_SEM_TYPE_EVENT:
2253 {
2254 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2255 switch (pReq->u.In.uOp)
2256 {
2257 case SUPSEMOP3_CREATE:
2258 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2259 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2260 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2261 break;
2262 case SUPSEMOP3_GET_RESOLUTION:
2263 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2264 pReq->Hdr.rc = VINF_SUCCESS;
2265 pReq->Hdr.cbOut = sizeof(*pReq);
2266 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2267 break;
2268 default:
2269 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2270 break;
2271 }
2272 break;
2273 }
2274
2275 case SUP_SEM_TYPE_EVENT_MULTI:
2276 {
2277 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2278 switch (pReq->u.In.uOp)
2279 {
2280 case SUPSEMOP3_CREATE:
2281 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2282 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2283 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2284 break;
2285 case SUPSEMOP3_GET_RESOLUTION:
2286 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2287 pReq->Hdr.rc = VINF_SUCCESS;
2288 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2289 break;
2290 default:
2291 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2292 break;
2293 }
2294 break;
2295 }
2296
2297 default:
2298 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2299 break;
2300 }
2301 return 0;
2302 }
2303
2304 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2305 {
2306 /* validate */
2307 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2308 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2309
2310 /* execute */
2311 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2312 if (RT_FAILURE(pReq->Hdr.rc))
2313 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2314 return 0;
2315 }
2316
2317 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2318 {
2319 /* validate */
2320 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2321 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2322
2323 /* execute */
2324 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2325 return 0;
2326 }
2327
2328 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2329 {
2330 /* validate */
2331 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2332
2333 /* execute */
2334 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2335 return 0;
2336 }
2337
2338 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2339 {
2340 /* validate */
2341 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2342 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2343
2344 /* execute */
2345 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2346 return 0;
2347 }
2348
2349 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2350 {
2351 /* validate */
2352 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2353 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2354 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2355 return VERR_INVALID_PARAMETER;
2356
2357 /* execute */
2358 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2359 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2360 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2361 pReq->u.In.szName, pReq->u.In.fFlags);
2362 return 0;
2363 }
2364
2365 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2366 {
2367 /* validate */
2368 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2369 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2370
2371 /* execute */
2372 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2373 return 0;
2374 }
2375
2376 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2377 {
2378 /* validate */
2379 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2380 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2381
2382 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2383 pReqHdr->rc = VINF_SUCCESS;
2384 return 0;
2385 }
2386
2387 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2388 {
2389 /* validate */
2390 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2391 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2392 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2393 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2394
2395 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2396 return 0;
2397 }
2398
2399 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2400 {
2401 /* validate */
2402 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2403
2404 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2405 return 0;
2406 }
2407
2408 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2409 {
2410 /* validate */
2411 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2412 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2413
2414 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2415 return 0;
2416 }
2417
2418 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2419 {
2420 /* validate */
2421 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2422 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2423
2424 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2425 return 0;
2426 }
2427
2428 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2429 {
2430 /* validate */
2431 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2432 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2433
2434 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2435 return 0;
2436 }
2437
2438 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2439 {
2440 /* validate */
2441 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2442 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2443
2444 /* execute */
2445 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2446 if (RT_FAILURE(pReq->Hdr.rc))
2447 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2448 return 0;
2449 }
2450
2451 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2452 {
2453 /* validate */
2454 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2455 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2456 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2457 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2458 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2459
2460 /* execute */
2461 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2462 if (RT_FAILURE(pReq->Hdr.rc))
2463 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2464 return 0;
2465 }
2466
2467 default:
2468 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2469 break;
2470 }
2471 return VERR_GENERAL_FAILURE;
2472}
2473
2474
2475/**
2476 * I/O Control inner worker for the restricted operations.
2477 *
2478 * @returns IPRT status code.
2479 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2480 *
2481 * @param uIOCtl Function number.
2482 * @param pDevExt Device extention.
2483 * @param pSession Session data.
2484 * @param pReqHdr The request header.
2485 */
2486static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2487{
2488 /*
2489 * The switch.
2490 */
2491 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2492 {
2493 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2494 {
2495 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2496 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2497 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2498 {
2499 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2500 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2501 return 0;
2502 }
2503
2504 /*
2505 * Match the version.
2506 * The current logic is very simple, match the major interface version.
2507 */
2508 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2509 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2510 {
2511 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2512 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2513 pReq->u.Out.u32Cookie = 0xffffffff;
2514 pReq->u.Out.u32SessionCookie = 0xffffffff;
2515 pReq->u.Out.u32SessionVersion = 0xffffffff;
2516 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2517 pReq->u.Out.pSession = NULL;
2518 pReq->u.Out.cFunctions = 0;
2519 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2520 return 0;
2521 }
2522
2523 /*
2524 * Fill in return data and be gone.
2525 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2526 * u32SessionVersion <= u32ReqVersion!
2527 */
2528 /** @todo Somehow validate the client and negotiate a secure cookie... */
2529 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2530 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2531 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2532 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2533 pReq->u.Out.pSession = pSession;
2534 pReq->u.Out.cFunctions = 0;
2535 pReq->Hdr.rc = VINF_SUCCESS;
2536 return 0;
2537 }
2538
2539 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2540 {
2541 /* validate */
2542 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2543 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2544
2545 /* execute */
2546 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2547 if (RT_FAILURE(pReq->Hdr.rc))
2548 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2549 return 0;
2550 }
2551
2552 default:
2553 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2554 break;
2555 }
2556 return VERR_GENERAL_FAILURE;
2557}
2558
2559
2560/**
2561 * I/O Control worker.
2562 *
2563 * @returns IPRT status code.
2564 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2565 *
2566 * @param uIOCtl Function number.
2567 * @param pDevExt Device extention.
2568 * @param pSession Session data.
2569 * @param pReqHdr The request header.
2570 * @param cbReq The size of the request buffer.
2571 */
2572int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2573{
2574 int rc;
2575 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2576
2577 /*
2578 * Validate the request.
2579 */
2580 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2581 {
2582 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2583 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2584 return VERR_INVALID_PARAMETER;
2585 }
2586 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2587 || pReqHdr->cbIn < sizeof(*pReqHdr)
2588 || pReqHdr->cbIn > cbReq
2589 || pReqHdr->cbOut < sizeof(*pReqHdr)
2590 || pReqHdr->cbOut > cbReq))
2591 {
2592 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2593 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2594 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2595 return VERR_INVALID_PARAMETER;
2596 }
2597 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2598 {
2599 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2600 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2601 return VERR_INVALID_PARAMETER;
2602 }
2603 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2604 {
2605 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2606 {
2607 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2608 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2609 return VERR_INVALID_PARAMETER;
2610 }
2611 }
2612 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2613 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2614 {
2615 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2616 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2617 return VERR_INVALID_PARAMETER;
2618 }
2619
2620 /*
2621 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2622 */
2623 if (pSession->fUnrestricted)
2624 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2625 else
2626 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2627
2628 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2629 return rc;
2630}
2631
2632
2633/**
2634 * Inter-Driver Communication (IDC) worker.
2635 *
2636 * @returns VBox status code.
2637 * @retval VINF_SUCCESS on success.
2638 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2639 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2640 *
2641 * @param uReq The request (function) code.
2642 * @param pDevExt Device extention.
2643 * @param pSession Session data.
2644 * @param pReqHdr The request header.
2645 */
2646int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2647{
2648 /*
2649 * The OS specific code has already validated the pSession
2650 * pointer, and the request size being greater or equal to
2651 * size of the header.
2652 *
2653 * So, just check that pSession is a kernel context session.
2654 */
2655 if (RT_UNLIKELY( pSession
2656 && pSession->R0Process != NIL_RTR0PROCESS))
2657 return VERR_INVALID_PARAMETER;
2658
2659/*
2660 * Validation macro.
2661 */
2662#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2663 do { \
2664 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2665 { \
2666 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2667 (long)pReqHdr->cb, (long)(cbExpect))); \
2668 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2669 } \
2670 } while (0)
2671
2672 switch (uReq)
2673 {
2674 case SUPDRV_IDC_REQ_CONNECT:
2675 {
2676 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2677 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2678
2679 /*
2680 * Validate the cookie and other input.
2681 */
2682 if (pReq->Hdr.pSession != NULL)
2683 {
2684 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2685 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2686 }
2687 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2688 {
2689 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2690 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2691 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2692 }
2693 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2694 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2695 {
2696 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2697 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2698 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2699 }
2700 if (pSession != NULL)
2701 {
2702 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2703 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2704 }
2705
2706 /*
2707 * Match the version.
2708 * The current logic is very simple, match the major interface version.
2709 */
2710 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2711 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2712 {
2713 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2714 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2715 pReq->u.Out.pSession = NULL;
2716 pReq->u.Out.uSessionVersion = 0xffffffff;
2717 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2718 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2719 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2720 return VINF_SUCCESS;
2721 }
2722
2723 pReq->u.Out.pSession = NULL;
2724 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2725 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2726 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2727
2728 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2729 if (RT_FAILURE(pReq->Hdr.rc))
2730 {
2731 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2732 return VINF_SUCCESS;
2733 }
2734
2735 pReq->u.Out.pSession = pSession;
2736 pReq->Hdr.pSession = pSession;
2737
2738 return VINF_SUCCESS;
2739 }
2740
2741 case SUPDRV_IDC_REQ_DISCONNECT:
2742 {
2743 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2744
2745 supdrvSessionRelease(pSession);
2746 return pReqHdr->rc = VINF_SUCCESS;
2747 }
2748
2749 case SUPDRV_IDC_REQ_GET_SYMBOL:
2750 {
2751 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2752 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2753
2754 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2755 return VINF_SUCCESS;
2756 }
2757
2758 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2759 {
2760 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2761 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2762
2763 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2764 return VINF_SUCCESS;
2765 }
2766
2767 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2768 {
2769 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2770 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2771
2772 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2773 return VINF_SUCCESS;
2774 }
2775
2776 default:
2777 Log(("Unknown IDC %#lx\n", (long)uReq));
2778 break;
2779 }
2780
2781#undef REQ_CHECK_IDC_SIZE
2782 return VERR_NOT_SUPPORTED;
2783}
2784
2785
2786/**
2787 * Register a object for reference counting.
2788 * The object is registered with one reference in the specified session.
2789 *
2790 * @returns Unique identifier on success (pointer).
2791 * All future reference must use this identifier.
2792 * @returns NULL on failure.
2793 * @param pSession The caller's session.
2794 * @param enmType The object type.
2795 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2796 * @param pvUser1 The first user argument.
2797 * @param pvUser2 The second user argument.
2798 */
2799SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2800{
2801 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2802 PSUPDRVOBJ pObj;
2803 PSUPDRVUSAGE pUsage;
2804
2805 /*
2806 * Validate the input.
2807 */
2808 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2809 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2810 AssertPtrReturn(pfnDestructor, NULL);
2811
2812 /*
2813 * Allocate and initialize the object.
2814 */
2815 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2816 if (!pObj)
2817 return NULL;
2818 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2819 pObj->enmType = enmType;
2820 pObj->pNext = NULL;
2821 pObj->cUsage = 1;
2822 pObj->pfnDestructor = pfnDestructor;
2823 pObj->pvUser1 = pvUser1;
2824 pObj->pvUser2 = pvUser2;
2825 pObj->CreatorUid = pSession->Uid;
2826 pObj->CreatorGid = pSession->Gid;
2827 pObj->CreatorProcess= pSession->Process;
2828 supdrvOSObjInitCreator(pObj, pSession);
2829
2830 /*
2831 * Allocate the usage record.
2832 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2833 */
2834 RTSpinlockAcquire(pDevExt->Spinlock);
2835
2836 pUsage = pDevExt->pUsageFree;
2837 if (pUsage)
2838 pDevExt->pUsageFree = pUsage->pNext;
2839 else
2840 {
2841 RTSpinlockRelease(pDevExt->Spinlock);
2842 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2843 if (!pUsage)
2844 {
2845 RTMemFree(pObj);
2846 return NULL;
2847 }
2848 RTSpinlockAcquire(pDevExt->Spinlock);
2849 }
2850
2851 /*
2852 * Insert the object and create the session usage record.
2853 */
2854 /* The object. */
2855 pObj->pNext = pDevExt->pObjs;
2856 pDevExt->pObjs = pObj;
2857
2858 /* The session record. */
2859 pUsage->cUsage = 1;
2860 pUsage->pObj = pObj;
2861 pUsage->pNext = pSession->pUsage;
2862 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2863 pSession->pUsage = pUsage;
2864
2865 RTSpinlockRelease(pDevExt->Spinlock);
2866
2867 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2868 return pObj;
2869}
2870
2871
2872/**
2873 * Increment the reference counter for the object associating the reference
2874 * with the specified session.
2875 *
2876 * @returns IPRT status code.
2877 * @param pvObj The identifier returned by SUPR0ObjRegister().
2878 * @param pSession The session which is referencing the object.
2879 *
2880 * @remarks The caller should not own any spinlocks and must carefully protect
2881 * itself against potential race with the destructor so freed memory
2882 * isn't accessed here.
2883 */
2884SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2885{
2886 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2887}
2888
2889
2890/**
2891 * Increment the reference counter for the object associating the reference
2892 * with the specified session.
2893 *
2894 * @returns IPRT status code.
2895 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2896 * couldn't be allocated. (If you see this you're not doing the right
2897 * thing and it won't ever work reliably.)
2898 *
2899 * @param pvObj The identifier returned by SUPR0ObjRegister().
2900 * @param pSession The session which is referencing the object.
2901 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2902 * first reference to an object in a session with this
2903 * argument set.
2904 *
2905 * @remarks The caller should not own any spinlocks and must carefully protect
2906 * itself against potential race with the destructor so freed memory
2907 * isn't accessed here.
2908 */
2909SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2910{
2911 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2912 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2913 int rc = VINF_SUCCESS;
2914 PSUPDRVUSAGE pUsagePre;
2915 PSUPDRVUSAGE pUsage;
2916
2917 /*
2918 * Validate the input.
2919 * Be ready for the destruction race (someone might be stuck in the
2920 * destructor waiting a lock we own).
2921 */
2922 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2923 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2924 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2925 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2926 VERR_INVALID_PARAMETER);
2927
2928 RTSpinlockAcquire(pDevExt->Spinlock);
2929
2930 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2931 {
2932 RTSpinlockRelease(pDevExt->Spinlock);
2933
2934 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2935 return VERR_WRONG_ORDER;
2936 }
2937
2938 /*
2939 * Preallocate the usage record if we can.
2940 */
2941 pUsagePre = pDevExt->pUsageFree;
2942 if (pUsagePre)
2943 pDevExt->pUsageFree = pUsagePre->pNext;
2944 else if (!fNoBlocking)
2945 {
2946 RTSpinlockRelease(pDevExt->Spinlock);
2947 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2948 if (!pUsagePre)
2949 return VERR_NO_MEMORY;
2950
2951 RTSpinlockAcquire(pDevExt->Spinlock);
2952 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2953 {
2954 RTSpinlockRelease(pDevExt->Spinlock);
2955
2956 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2957 return VERR_WRONG_ORDER;
2958 }
2959 }
2960
2961 /*
2962 * Reference the object.
2963 */
2964 pObj->cUsage++;
2965
2966 /*
2967 * Look for the session record.
2968 */
2969 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2970 {
2971 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2972 if (pUsage->pObj == pObj)
2973 break;
2974 }
2975 if (pUsage)
2976 pUsage->cUsage++;
2977 else if (pUsagePre)
2978 {
2979 /* create a new session record. */
2980 pUsagePre->cUsage = 1;
2981 pUsagePre->pObj = pObj;
2982 pUsagePre->pNext = pSession->pUsage;
2983 pSession->pUsage = pUsagePre;
2984 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2985
2986 pUsagePre = NULL;
2987 }
2988 else
2989 {
2990 pObj->cUsage--;
2991 rc = VERR_TRY_AGAIN;
2992 }
2993
2994 /*
2995 * Put any unused usage record into the free list..
2996 */
2997 if (pUsagePre)
2998 {
2999 pUsagePre->pNext = pDevExt->pUsageFree;
3000 pDevExt->pUsageFree = pUsagePre;
3001 }
3002
3003 RTSpinlockRelease(pDevExt->Spinlock);
3004
3005 return rc;
3006}
3007
3008
3009/**
3010 * Decrement / destroy a reference counter record for an object.
3011 *
3012 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3013 *
3014 * @returns IPRT status code.
3015 * @retval VINF_SUCCESS if not destroyed.
3016 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3017 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3018 * string builds.
3019 *
3020 * @param pvObj The identifier returned by SUPR0ObjRegister().
3021 * @param pSession The session which is referencing the object.
3022 */
3023SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3024{
3025 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3026 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3027 int rc = VERR_INVALID_PARAMETER;
3028 PSUPDRVUSAGE pUsage;
3029 PSUPDRVUSAGE pUsagePrev;
3030
3031 /*
3032 * Validate the input.
3033 */
3034 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3035 AssertMsgReturn(VALID_PTR(pObj)&& pObj->u32Magic == SUPDRVOBJ_MAGIC,
3036 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3037 VERR_INVALID_PARAMETER);
3038
3039 /*
3040 * Acquire the spinlock and look for the usage record.
3041 */
3042 RTSpinlockAcquire(pDevExt->Spinlock);
3043
3044 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3045 pUsage;
3046 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3047 {
3048 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3049 if (pUsage->pObj == pObj)
3050 {
3051 rc = VINF_SUCCESS;
3052 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3053 if (pUsage->cUsage > 1)
3054 {
3055 pObj->cUsage--;
3056 pUsage->cUsage--;
3057 }
3058 else
3059 {
3060 /*
3061 * Free the session record.
3062 */
3063 if (pUsagePrev)
3064 pUsagePrev->pNext = pUsage->pNext;
3065 else
3066 pSession->pUsage = pUsage->pNext;
3067 pUsage->pNext = pDevExt->pUsageFree;
3068 pDevExt->pUsageFree = pUsage;
3069
3070 /* What about the object? */
3071 if (pObj->cUsage > 1)
3072 pObj->cUsage--;
3073 else
3074 {
3075 /*
3076 * Object is to be destroyed, unlink it.
3077 */
3078 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3079 rc = VINF_OBJECT_DESTROYED;
3080 if (pDevExt->pObjs == pObj)
3081 pDevExt->pObjs = pObj->pNext;
3082 else
3083 {
3084 PSUPDRVOBJ pObjPrev;
3085 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3086 if (pObjPrev->pNext == pObj)
3087 {
3088 pObjPrev->pNext = pObj->pNext;
3089 break;
3090 }
3091 Assert(pObjPrev);
3092 }
3093 }
3094 }
3095 break;
3096 }
3097 }
3098
3099 RTSpinlockRelease(pDevExt->Spinlock);
3100
3101 /*
3102 * Call the destructor and free the object if required.
3103 */
3104 if (rc == VINF_OBJECT_DESTROYED)
3105 {
3106 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3107 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3108 if (pObj->pfnDestructor)
3109 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3110 RTMemFree(pObj);
3111 }
3112
3113 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3114 return rc;
3115}
3116
3117
3118/**
3119 * Verifies that the current process can access the specified object.
3120 *
3121 * @returns The following IPRT status code:
3122 * @retval VINF_SUCCESS if access was granted.
3123 * @retval VERR_PERMISSION_DENIED if denied access.
3124 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3125 *
3126 * @param pvObj The identifier returned by SUPR0ObjRegister().
3127 * @param pSession The session which wishes to access the object.
3128 * @param pszObjName Object string name. This is optional and depends on the object type.
3129 *
3130 * @remark The caller is responsible for making sure the object isn't removed while
3131 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3132 */
3133SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3134{
3135 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3136 int rc;
3137
3138 /*
3139 * Validate the input.
3140 */
3141 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3142 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3143 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3144 VERR_INVALID_PARAMETER);
3145
3146 /*
3147 * Check access. (returns true if a decision has been made.)
3148 */
3149 rc = VERR_INTERNAL_ERROR;
3150 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3151 return rc;
3152
3153 /*
3154 * Default policy is to allow the user to access his own
3155 * stuff but nothing else.
3156 */
3157 if (pObj->CreatorUid == pSession->Uid)
3158 return VINF_SUCCESS;
3159 return VERR_PERMISSION_DENIED;
3160}
3161
3162
3163/**
3164 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3165 *
3166 * @returns The associated VM pointer.
3167 * @param pSession The session of the current thread.
3168 */
3169SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3170{
3171 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3172 return pSession->pSessionVM;
3173}
3174
3175
3176/**
3177 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3178 *
3179 * @returns The associated GVM pointer.
3180 * @param pSession The session of the current thread.
3181 */
3182SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3183{
3184 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3185 return pSession->pSessionGVM;
3186}
3187
3188
3189/**
3190 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3191 *
3192 * This will fail if there is already a VM associated with the session and pVM
3193 * isn't NULL.
3194 *
3195 * @retval VINF_SUCCESS
3196 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3197 * session.
3198 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3199 * the session is invalid.
3200 *
3201 * @param pSession The session of the current thread.
3202 * @param pGVM The GVM to associate with the session. Pass NULL to
3203 * dissassociate.
3204 * @param pVM The VM to associate with the session. Pass NULL to
3205 * dissassociate.
3206 */
3207SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3208{
3209 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3210 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3211
3212 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3213 if (pGVM)
3214 {
3215 if (!pSession->pSessionGVM)
3216 {
3217 pSession->pSessionGVM = pGVM;
3218 pSession->pSessionVM = pVM;
3219 pSession->pFastIoCtrlVM = NULL;
3220 }
3221 else
3222 {
3223 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3224 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3225 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3226 return VERR_ALREADY_EXISTS;
3227 }
3228 }
3229 else
3230 {
3231 pSession->pSessionGVM = NULL;
3232 pSession->pSessionVM = NULL;
3233 pSession->pFastIoCtrlVM = NULL;
3234 }
3235 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3236 return VINF_SUCCESS;
3237}
3238
3239
3240/** @copydoc RTLogGetDefaultInstanceEx
3241 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3242SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3243{
3244 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3245}
3246
3247
3248/** @copydoc RTLogRelGetDefaultInstanceEx
3249 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3250SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3251{
3252 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3253}
3254
3255
3256/**
3257 * Lock pages.
3258 *
3259 * @returns IPRT status code.
3260 * @param pSession Session to which the locked memory should be associated.
3261 * @param pvR3 Start of the memory range to lock.
3262 * This must be page aligned.
3263 * @param cPages Number of pages to lock.
3264 * @param paPages Where to put the physical addresses of locked memory.
3265 */
3266SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3267{
3268 int rc;
3269 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3270 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3271 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3272
3273 /*
3274 * Verify input.
3275 */
3276 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3277 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3278 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3279 || !pvR3)
3280 {
3281 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3282 return VERR_INVALID_PARAMETER;
3283 }
3284
3285 /*
3286 * Let IPRT do the job.
3287 */
3288 Mem.eType = MEMREF_TYPE_LOCKED;
3289 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3290 if (RT_SUCCESS(rc))
3291 {
3292 uint32_t iPage = cPages;
3293 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3294 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3295
3296 while (iPage-- > 0)
3297 {
3298 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3299 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3300 {
3301 AssertMsgFailed(("iPage=%d\n", iPage));
3302 rc = VERR_INTERNAL_ERROR;
3303 break;
3304 }
3305 }
3306 if (RT_SUCCESS(rc))
3307 rc = supdrvMemAdd(&Mem, pSession);
3308 if (RT_FAILURE(rc))
3309 {
3310 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3311 AssertRC(rc2);
3312 }
3313 }
3314
3315 return rc;
3316}
3317
3318
3319/**
3320 * Unlocks the memory pointed to by pv.
3321 *
3322 * @returns IPRT status code.
3323 * @param pSession Session to which the memory was locked.
3324 * @param pvR3 Memory to unlock.
3325 */
3326SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3327{
3328 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3329 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3330 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3331}
3332
3333
3334/**
3335 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3336 * backing.
3337 *
3338 * @returns IPRT status code.
3339 * @param pSession Session data.
3340 * @param cPages Number of pages to allocate.
3341 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3342 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3343 * @param pHCPhys Where to put the physical address of allocated memory.
3344 */
3345SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3346{
3347 int rc;
3348 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3349 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3350
3351 /*
3352 * Validate input.
3353 */
3354 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3355 if (!ppvR3 || !ppvR0 || !pHCPhys)
3356 {
3357 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3358 pSession, ppvR0, ppvR3, pHCPhys));
3359 return VERR_INVALID_PARAMETER;
3360
3361 }
3362 if (cPages < 1 || cPages >= 256)
3363 {
3364 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3365 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3366 }
3367
3368 /*
3369 * Let IPRT do the job.
3370 */
3371 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3372 if (RT_SUCCESS(rc))
3373 {
3374 int rc2;
3375 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3376 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3377 if (RT_SUCCESS(rc))
3378 {
3379 Mem.eType = MEMREF_TYPE_CONT;
3380 rc = supdrvMemAdd(&Mem, pSession);
3381 if (!rc)
3382 {
3383 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3384 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3385 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3386 return 0;
3387 }
3388
3389 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3390 AssertRC(rc2);
3391 }
3392 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3393 AssertRC(rc2);
3394 }
3395
3396 return rc;
3397}
3398
3399
3400/**
3401 * Frees memory allocated using SUPR0ContAlloc().
3402 *
3403 * @returns IPRT status code.
3404 * @param pSession The session to which the memory was allocated.
3405 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3406 */
3407SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3408{
3409 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3410 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3411 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3412}
3413
3414
3415/**
3416 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3417 *
3418 * The memory isn't zeroed.
3419 *
3420 * @returns IPRT status code.
3421 * @param pSession Session data.
3422 * @param cPages Number of pages to allocate.
3423 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3424 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3425 * @param paPages Where to put the physical addresses of allocated memory.
3426 */
3427SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3428{
3429 unsigned iPage;
3430 int rc;
3431 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3432 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3433
3434 /*
3435 * Validate input.
3436 */
3437 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3438 if (!ppvR3 || !ppvR0 || !paPages)
3439 {
3440 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3441 pSession, ppvR3, ppvR0, paPages));
3442 return VERR_INVALID_PARAMETER;
3443
3444 }
3445 if (cPages < 1 || cPages >= 256)
3446 {
3447 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3448 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3449 }
3450
3451 /*
3452 * Let IPRT do the work.
3453 */
3454 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3455 if (RT_SUCCESS(rc))
3456 {
3457 int rc2;
3458 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3459 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3460 if (RT_SUCCESS(rc))
3461 {
3462 Mem.eType = MEMREF_TYPE_LOW;
3463 rc = supdrvMemAdd(&Mem, pSession);
3464 if (!rc)
3465 {
3466 for (iPage = 0; iPage < cPages; iPage++)
3467 {
3468 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3469 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3470 }
3471 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3472 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3473 return 0;
3474 }
3475
3476 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3477 AssertRC(rc2);
3478 }
3479
3480 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3481 AssertRC(rc2);
3482 }
3483
3484 return rc;
3485}
3486
3487
3488/**
3489 * Frees memory allocated using SUPR0LowAlloc().
3490 *
3491 * @returns IPRT status code.
3492 * @param pSession The session to which the memory was allocated.
3493 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3494 */
3495SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3496{
3497 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3498 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3499 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3500}
3501
3502
3503
3504/**
3505 * Allocates a chunk of memory with both R0 and R3 mappings.
3506 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3507 *
3508 * @returns IPRT status code.
3509 * @param pSession The session to associated the allocation with.
3510 * @param cb Number of bytes to allocate.
3511 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3512 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3513 */
3514SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3515{
3516 int rc;
3517 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3518 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3519
3520 /*
3521 * Validate input.
3522 */
3523 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3524 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3525 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3526 if (cb < 1 || cb >= _4M)
3527 {
3528 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3529 return VERR_INVALID_PARAMETER;
3530 }
3531
3532 /*
3533 * Let IPRT do the work.
3534 */
3535 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3536 if (RT_SUCCESS(rc))
3537 {
3538 int rc2;
3539 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3540 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3541 if (RT_SUCCESS(rc))
3542 {
3543 Mem.eType = MEMREF_TYPE_MEM;
3544 rc = supdrvMemAdd(&Mem, pSession);
3545 if (!rc)
3546 {
3547 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3548 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3549 return VINF_SUCCESS;
3550 }
3551
3552 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3553 AssertRC(rc2);
3554 }
3555
3556 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3557 AssertRC(rc2);
3558 }
3559
3560 return rc;
3561}
3562
3563
3564/**
3565 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3566 *
3567 * @returns IPRT status code.
3568 * @param pSession The session to which the memory was allocated.
3569 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3570 * @param paPages Where to store the physical addresses.
3571 */
3572SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3573{
3574 PSUPDRVBUNDLE pBundle;
3575 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3576
3577 /*
3578 * Validate input.
3579 */
3580 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3581 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3582 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3583
3584 /*
3585 * Search for the address.
3586 */
3587 RTSpinlockAcquire(pSession->Spinlock);
3588 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3589 {
3590 if (pBundle->cUsed > 0)
3591 {
3592 unsigned i;
3593 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3594 {
3595 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3596 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3597 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3598 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3599 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3600 )
3601 )
3602 {
3603 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3604 size_t iPage;
3605 for (iPage = 0; iPage < cPages; iPage++)
3606 {
3607 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3608 paPages[iPage].uReserved = 0;
3609 }
3610 RTSpinlockRelease(pSession->Spinlock);
3611 return VINF_SUCCESS;
3612 }
3613 }
3614 }
3615 }
3616 RTSpinlockRelease(pSession->Spinlock);
3617 Log(("Failed to find %p!!!\n", (void *)uPtr));
3618 return VERR_INVALID_PARAMETER;
3619}
3620
3621
3622/**
3623 * Free memory allocated by SUPR0MemAlloc().
3624 *
3625 * @returns IPRT status code.
3626 * @param pSession The session owning the allocation.
3627 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3628 */
3629SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3630{
3631 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3632 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3633 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3634}
3635
3636
3637/**
3638 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3639 *
3640 * The memory is fixed and it's possible to query the physical addresses using
3641 * SUPR0MemGetPhys().
3642 *
3643 * @returns IPRT status code.
3644 * @param pSession The session to associated the allocation with.
3645 * @param cPages The number of pages to allocate.
3646 * @param fFlags Flags, reserved for the future. Must be zero.
3647 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3648 * NULL if no ring-3 mapping.
3649 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3650 * NULL if no ring-0 mapping.
3651 * @param paPages Where to store the addresses of the pages. Optional.
3652 */
3653SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3654{
3655 int rc;
3656 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3657 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3658
3659 /*
3660 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3661 */
3662 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3663 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3664 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3665 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3666 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3667 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3668 {
3669 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3670 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3671 }
3672
3673 /*
3674 * Let IPRT do the work.
3675 */
3676 if (ppvR0)
3677 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3678 else
3679 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3680 if (RT_SUCCESS(rc))
3681 {
3682 int rc2;
3683 if (ppvR3)
3684 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3685 else
3686 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3687 if (RT_SUCCESS(rc))
3688 {
3689 Mem.eType = MEMREF_TYPE_PAGE;
3690 rc = supdrvMemAdd(&Mem, pSession);
3691 if (!rc)
3692 {
3693 if (ppvR3)
3694 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3695 if (ppvR0)
3696 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3697 if (paPages)
3698 {
3699 uint32_t iPage = cPages;
3700 while (iPage-- > 0)
3701 {
3702 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3703 Assert(paPages[iPage] != NIL_RTHCPHYS);
3704 }
3705 }
3706 return VINF_SUCCESS;
3707 }
3708
3709 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3710 AssertRC(rc2);
3711 }
3712
3713 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3714 AssertRC(rc2);
3715 }
3716 return rc;
3717}
3718
3719
3720/**
3721 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3722 * space.
3723 *
3724 * @returns IPRT status code.
3725 * @param pSession The session to associated the allocation with.
3726 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3727 * @param offSub Where to start mapping. Must be page aligned.
3728 * @param cbSub How much to map. Must be page aligned.
3729 * @param fFlags Flags, MBZ.
3730 * @param ppvR0 Where to return the address of the ring-0 mapping on
3731 * success.
3732 */
3733SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3734 uint32_t fFlags, PRTR0PTR ppvR0)
3735{
3736 int rc;
3737 PSUPDRVBUNDLE pBundle;
3738 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3739 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3740
3741 /*
3742 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3743 */
3744 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3745 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3746 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3747 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3748 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3749 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3750
3751 /*
3752 * Find the memory object.
3753 */
3754 RTSpinlockAcquire(pSession->Spinlock);
3755 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3756 {
3757 if (pBundle->cUsed > 0)
3758 {
3759 unsigned i;
3760 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3761 {
3762 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3763 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3764 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3765 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3766 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3767 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3768 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3769 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3770 {
3771 hMemObj = pBundle->aMem[i].MemObj;
3772 break;
3773 }
3774 }
3775 }
3776 }
3777 RTSpinlockRelease(pSession->Spinlock);
3778
3779 rc = VERR_INVALID_PARAMETER;
3780 if (hMemObj != NIL_RTR0MEMOBJ)
3781 {
3782 /*
3783 * Do some further input validations before calling IPRT.
3784 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3785 */
3786 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3787 if ( offSub < cbMemObj
3788 && cbSub <= cbMemObj
3789 && offSub + cbSub <= cbMemObj)
3790 {
3791 RTR0MEMOBJ hMapObj;
3792 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3793 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3794 if (RT_SUCCESS(rc))
3795 *ppvR0 = RTR0MemObjAddress(hMapObj);
3796 }
3797 else
3798 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3799
3800 }
3801 return rc;
3802}
3803
3804
3805/**
3806 * Changes the page level protection of one or more pages previously allocated
3807 * by SUPR0PageAllocEx.
3808 *
3809 * @returns IPRT status code.
3810 * @param pSession The session to associated the allocation with.
3811 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3812 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3813 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3814 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3815 * @param offSub Where to start changing. Must be page aligned.
3816 * @param cbSub How much to change. Must be page aligned.
3817 * @param fProt The new page level protection, see RTMEM_PROT_*.
3818 */
3819SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3820{
3821 int rc;
3822 PSUPDRVBUNDLE pBundle;
3823 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3824 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3825 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3826
3827 /*
3828 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3829 */
3830 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3831 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3832 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3833 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3834 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3835
3836 /*
3837 * Find the memory object.
3838 */
3839 RTSpinlockAcquire(pSession->Spinlock);
3840 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3841 {
3842 if (pBundle->cUsed > 0)
3843 {
3844 unsigned i;
3845 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3846 {
3847 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3848 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3849 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3850 || pvR3 == NIL_RTR3PTR)
3851 && ( pvR0 == NIL_RTR0PTR
3852 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3853 && ( pvR3 == NIL_RTR3PTR
3854 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3855 {
3856 if (pvR0 != NIL_RTR0PTR)
3857 hMemObjR0 = pBundle->aMem[i].MemObj;
3858 if (pvR3 != NIL_RTR3PTR)
3859 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3860 break;
3861 }
3862 }
3863 }
3864 }
3865 RTSpinlockRelease(pSession->Spinlock);
3866
3867 rc = VERR_INVALID_PARAMETER;
3868 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3869 || hMemObjR3 != NIL_RTR0MEMOBJ)
3870 {
3871 /*
3872 * Do some further input validations before calling IPRT.
3873 */
3874 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3875 if ( offSub < cbMemObj
3876 && cbSub <= cbMemObj
3877 && offSub + cbSub <= cbMemObj)
3878 {
3879 rc = VINF_SUCCESS;
3880 if (hMemObjR3 != NIL_RTR0PTR)
3881 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3882 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3883 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3884 }
3885 else
3886 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3887
3888 }
3889 return rc;
3890
3891}
3892
3893
3894/**
3895 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3896 *
3897 * @returns IPRT status code.
3898 * @param pSession The session owning the allocation.
3899 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3900 * SUPR0PageAllocEx().
3901 */
3902SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3903{
3904 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3905 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3906 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3907}
3908
3909
3910/**
3911 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3912 *
3913 * @param pDevExt The device extension.
3914 * @param pszFile The source file where the caller detected the bad
3915 * context.
3916 * @param uLine The line number in @a pszFile.
3917 * @param pszExtra Optional additional message to give further hints.
3918 */
3919void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3920{
3921 uint32_t cCalls;
3922
3923 /*
3924 * Shorten the filename before displaying the message.
3925 */
3926 for (;;)
3927 {
3928 const char *pszTmp = strchr(pszFile, '/');
3929 if (!pszTmp)
3930 pszTmp = strchr(pszFile, '\\');
3931 if (!pszTmp)
3932 break;
3933 pszFile = pszTmp + 1;
3934 }
3935 if (RT_VALID_PTR(pszExtra) && *pszExtra)
3936 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
3937 else
3938 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
3939
3940 /*
3941 * Record the incident so that we stand a chance of blocking I/O controls
3942 * before panicing the system.
3943 */
3944 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
3945 if (cCalls > UINT32_MAX - _1K)
3946 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
3947}
3948
3949
3950/**
3951 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3952 *
3953 * @param pSession The session of the caller.
3954 * @param pszFile The source file where the caller detected the bad
3955 * context.
3956 * @param uLine The line number in @a pszFile.
3957 * @param pszExtra Optional additional message to give further hints.
3958 */
3959SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
3960{
3961 PSUPDRVDEVEXT pDevExt;
3962
3963 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
3964 pDevExt = pSession->pDevExt;
3965
3966 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
3967}
3968
3969
3970/**
3971 * Gets the paging mode of the current CPU.
3972 *
3973 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3974 */
3975SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3976{
3977 SUPPAGINGMODE enmMode;
3978
3979 RTR0UINTREG cr0 = ASMGetCR0();
3980 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3981 enmMode = SUPPAGINGMODE_INVALID;
3982 else
3983 {
3984 RTR0UINTREG cr4 = ASMGetCR4();
3985 uint32_t fNXEPlusLMA = 0;
3986 if (cr4 & X86_CR4_PAE)
3987 {
3988 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
3989 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
3990 {
3991 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3992 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3993 fNXEPlusLMA |= RT_BIT(0);
3994 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3995 fNXEPlusLMA |= RT_BIT(1);
3996 }
3997 }
3998
3999 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4000 {
4001 case 0:
4002 enmMode = SUPPAGINGMODE_32_BIT;
4003 break;
4004
4005 case X86_CR4_PGE:
4006 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4007 break;
4008
4009 case X86_CR4_PAE:
4010 enmMode = SUPPAGINGMODE_PAE;
4011 break;
4012
4013 case X86_CR4_PAE | RT_BIT(0):
4014 enmMode = SUPPAGINGMODE_PAE_NX;
4015 break;
4016
4017 case X86_CR4_PAE | X86_CR4_PGE:
4018 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4019 break;
4020
4021 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4022 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4023 break;
4024
4025 case RT_BIT(1) | X86_CR4_PAE:
4026 enmMode = SUPPAGINGMODE_AMD64;
4027 break;
4028
4029 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4030 enmMode = SUPPAGINGMODE_AMD64_NX;
4031 break;
4032
4033 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4034 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4035 break;
4036
4037 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4038 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4039 break;
4040
4041 default:
4042 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4043 enmMode = SUPPAGINGMODE_INVALID;
4044 break;
4045 }
4046 }
4047 return enmMode;
4048}
4049
4050
4051/**
4052 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4053 *
4054 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4055 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4056 * for code with interrupts enabled.
4057 *
4058 * @returns the old CR4 value.
4059 *
4060 * @param fOrMask bits to be set in CR4.
4061 * @param fAndMask bits to be cleard in CR4.
4062 *
4063 * @remarks Must be called with preemption/interrupts disabled.
4064 */
4065SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4066{
4067#ifdef RT_OS_LINUX
4068 return supdrvOSChangeCR4(fOrMask, fAndMask);
4069#else
4070 RTCCUINTREG uOld = ASMGetCR4();
4071 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4072 if (uNew != uOld)
4073 ASMSetCR4(uNew);
4074 return uOld;
4075#endif
4076}
4077
4078
4079/**
4080 * Enables or disabled hardware virtualization extensions using native OS APIs.
4081 *
4082 * @returns VBox status code.
4083 * @retval VINF_SUCCESS on success.
4084 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4085 *
4086 * @param fEnable Whether to enable or disable.
4087 */
4088SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4089{
4090#ifdef RT_OS_DARWIN
4091 return supdrvOSEnableVTx(fEnable);
4092#else
4093 RT_NOREF1(fEnable);
4094 return VERR_NOT_SUPPORTED;
4095#endif
4096}
4097
4098
4099/**
4100 * Suspends hardware virtualization extensions using the native OS API.
4101 *
4102 * This is called prior to entering raw-mode context.
4103 *
4104 * @returns @c true if suspended, @c false if not.
4105 */
4106SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4107{
4108#ifdef RT_OS_DARWIN
4109 return supdrvOSSuspendVTxOnCpu();
4110#else
4111 return false;
4112#endif
4113}
4114
4115
4116/**
4117 * Resumes hardware virtualization extensions using the native OS API.
4118 *
4119 * This is called after to entering raw-mode context.
4120 *
4121 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4122 */
4123SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4124{
4125#ifdef RT_OS_DARWIN
4126 supdrvOSResumeVTxOnCpu(fSuspended);
4127#else
4128 RT_NOREF1(fSuspended);
4129 Assert(!fSuspended);
4130#endif
4131}
4132
4133
4134SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4135{
4136#ifdef RT_OS_LINUX
4137 return supdrvOSGetCurrentGdtRw(pGdtRw);
4138#else
4139 NOREF(pGdtRw);
4140 return VERR_NOT_IMPLEMENTED;
4141#endif
4142}
4143
4144
4145/**
4146 * Gets AMD-V and VT-x support for the calling CPU.
4147 *
4148 * @returns VBox status code.
4149 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4150 * (SUPVTCAPS_AMD_V) is supported.
4151 */
4152SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4153{
4154 Assert(pfCaps);
4155 *pfCaps = 0;
4156
4157 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4158 if (ASMHasCpuId())
4159 {
4160 /* Check the range of standard CPUID leafs. */
4161 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4162 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4163 if (ASMIsValidStdRange(uMaxLeaf))
4164 {
4165 /* Query the standard CPUID leaf. */
4166 uint32_t fFeatEcx, fFeatEdx, uDummy;
4167 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4168
4169 /* Check if the vendor is Intel (or compatible). */
4170 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4171 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4172 || ASMIsShanghaiCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4173 {
4174 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4175 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4176 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4177 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4178 {
4179 *pfCaps = SUPVTCAPS_VT_X;
4180 return VINF_SUCCESS;
4181 }
4182 return VERR_VMX_NO_VMX;
4183 }
4184
4185 /* Check if the vendor is AMD (or compatible). */
4186 if ( ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4187 || ASMIsHygonCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4188 {
4189 uint32_t fExtFeatEcx, uExtMaxId;
4190 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4191 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4192
4193 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4194 if ( ASMIsValidExtRange(uExtMaxId)
4195 && uExtMaxId >= 0x8000000a
4196 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4197 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4198 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4199 {
4200 *pfCaps = SUPVTCAPS_AMD_V;
4201 return VINF_SUCCESS;
4202 }
4203 return VERR_SVM_NO_SVM;
4204 }
4205 }
4206 }
4207 return VERR_UNSUPPORTED_CPU;
4208}
4209
4210
4211/**
4212 * Checks if Intel VT-x feature is usable on this CPU.
4213 *
4214 * @returns VBox status code.
4215 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4216 * ambiguity that makes us unsure whether we
4217 * really can use VT-x or not.
4218 *
4219 * @remarks Must be called with preemption disabled.
4220 * The caller is also expected to check that the CPU is an Intel (or
4221 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4222 * function might throw a \#GP fault as it tries to read/write MSRs
4223 * that may not be present!
4224 */
4225SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4226{
4227 uint64_t fFeatMsr;
4228 bool fMaybeSmxMode;
4229 bool fMsrLocked;
4230 bool fSmxVmxAllowed;
4231 bool fVmxAllowed;
4232 bool fIsSmxModeAmbiguous;
4233 int rc;
4234
4235 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4236
4237 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4238 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4239 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4240 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4241 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4242 fIsSmxModeAmbiguous = false;
4243 rc = VERR_INTERNAL_ERROR_5;
4244
4245 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4246 if (fMsrLocked)
4247 {
4248 if (fVmxAllowed && fSmxVmxAllowed)
4249 rc = VINF_SUCCESS;
4250 else if (!fVmxAllowed && !fSmxVmxAllowed)
4251 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4252 else if (!fMaybeSmxMode)
4253 {
4254 if (fVmxAllowed)
4255 rc = VINF_SUCCESS;
4256 else
4257 rc = VERR_VMX_MSR_VMX_DISABLED;
4258 }
4259 else
4260 {
4261 /*
4262 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4263 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4264 * See @bugref{6873}.
4265 */
4266 Assert(fMaybeSmxMode == true);
4267 fIsSmxModeAmbiguous = true;
4268 rc = VINF_SUCCESS;
4269 }
4270 }
4271 else
4272 {
4273 /*
4274 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4275 * this MSR can no longer be modified.
4276 *
4277 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4278 * accurately. See @bugref{6873}.
4279 *
4280 * We need to check for SMX hardware support here, before writing the MSR as
4281 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4282 * for it.
4283 */
4284 uint32_t fFeaturesECX, uDummy;
4285#ifdef VBOX_STRICT
4286 /* Callers should have verified these at some point. */
4287 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4288 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4289 Assert(ASMIsValidStdRange(uMaxId));
4290 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4291 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4292 || ASMIsShanghaiCpuEx( uVendorEBX, uVendorECX, uVendorEDX));
4293#endif
4294 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4295 bool fSmxVmxHwSupport = false;
4296 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4297 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4298 fSmxVmxHwSupport = true;
4299
4300 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4301 | MSR_IA32_FEATURE_CONTROL_VMXON;
4302 if (fSmxVmxHwSupport)
4303 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4304
4305 /*
4306 * Commit.
4307 */
4308 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4309
4310 /*
4311 * Verify.
4312 */
4313 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4314 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4315 if (fMsrLocked)
4316 {
4317 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4318 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4319 if ( fVmxAllowed
4320 && ( !fSmxVmxHwSupport
4321 || fSmxVmxAllowed))
4322 rc = VINF_SUCCESS;
4323 else
4324 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4325 }
4326 else
4327 rc = VERR_VMX_MSR_LOCKING_FAILED;
4328 }
4329
4330 if (pfIsSmxModeAmbiguous)
4331 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4332
4333 return rc;
4334}
4335
4336
4337/**
4338 * Checks if AMD-V SVM feature is usable on this CPU.
4339 *
4340 * @returns VBox status code.
4341 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4342 *
4343 * @remarks Must be called with preemption disabled.
4344 */
4345SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4346{
4347 int rc;
4348 uint64_t fVmCr;
4349 uint64_t fEfer;
4350
4351 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4352 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4353 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4354 {
4355 rc = VINF_SUCCESS;
4356 if (fInitSvm)
4357 {
4358 /* Turn on SVM in the EFER MSR. */
4359 fEfer = ASMRdMsr(MSR_K6_EFER);
4360 if (fEfer & MSR_K6_EFER_SVME)
4361 rc = VERR_SVM_IN_USE;
4362 else
4363 {
4364 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4365
4366 /* Paranoia. */
4367 fEfer = ASMRdMsr(MSR_K6_EFER);
4368 if (fEfer & MSR_K6_EFER_SVME)
4369 {
4370 /* Restore previous value. */
4371 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4372 }
4373 else
4374 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4375 }
4376 }
4377 }
4378 else
4379 rc = VERR_SVM_DISABLED;
4380 return rc;
4381}
4382
4383
4384/**
4385 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4386 *
4387 * @returns VBox status code.
4388 * @retval VERR_VMX_NO_VMX
4389 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4390 * @retval VERR_VMX_MSR_VMX_DISABLED
4391 * @retval VERR_VMX_MSR_LOCKING_FAILED
4392 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4393 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4394 * @retval VERR_SVM_NO_SVM
4395 * @retval VERR_SVM_DISABLED
4396 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4397 * (centaur)/Shanghai CPU.
4398 *
4399 * @param pfCaps Where to store the capabilities.
4400 */
4401int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4402{
4403 int rc = VERR_UNSUPPORTED_CPU;
4404 bool fIsSmxModeAmbiguous = false;
4405 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4406
4407 /*
4408 * Input validation.
4409 */
4410 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4411 *pfCaps = 0;
4412
4413 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4414 RTThreadPreemptDisable(&PreemptState);
4415
4416 /* Check if VT-x/AMD-V is supported. */
4417 rc = SUPR0GetVTSupport(pfCaps);
4418 if (RT_SUCCESS(rc))
4419 {
4420 /* Check if VT-x is supported. */
4421 if (*pfCaps & SUPVTCAPS_VT_X)
4422 {
4423 /* Check if VT-x is usable. */
4424 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4425 if (RT_SUCCESS(rc))
4426 {
4427 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4428 VMXCTLSMSR vtCaps;
4429 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4430 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4431 {
4432 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4433 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4434 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4435 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4436 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4437 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4438 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4439 }
4440 }
4441 }
4442 /* Check if AMD-V is supported. */
4443 else if (*pfCaps & SUPVTCAPS_AMD_V)
4444 {
4445 /* Check is SVM is usable. */
4446 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4447 if (RT_SUCCESS(rc))
4448 {
4449 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4450 uint32_t uDummy, fSvmFeatures;
4451 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4452 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4453 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4454 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD)
4455 *pfCaps |= SUPVTCAPS_AMDV_VIRT_VMSAVE_VMLOAD;
4456 }
4457 }
4458 }
4459
4460 /* Restore preemption. */
4461 RTThreadPreemptRestore(&PreemptState);
4462
4463 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4464 if (fIsSmxModeAmbiguous)
4465 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4466
4467 return rc;
4468}
4469
4470
4471/**
4472 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4473 *
4474 * @returns VBox status code.
4475 * @retval VERR_VMX_NO_VMX
4476 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4477 * @retval VERR_VMX_MSR_VMX_DISABLED
4478 * @retval VERR_VMX_MSR_LOCKING_FAILED
4479 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4480 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4481 * @retval VERR_SVM_NO_SVM
4482 * @retval VERR_SVM_DISABLED
4483 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4484 * (centaur)/Shanghai CPU.
4485 *
4486 * @param pSession The session handle.
4487 * @param pfCaps Where to store the capabilities.
4488 */
4489SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4490{
4491 /*
4492 * Input validation.
4493 */
4494 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4495 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4496
4497 /*
4498 * Call common worker.
4499 */
4500 return supdrvQueryVTCapsInternal(pfCaps);
4501}
4502
4503
4504/**
4505 * Queries the CPU microcode revision.
4506 *
4507 * @returns VBox status code.
4508 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4509 * readable microcode rev.
4510 *
4511 * @param puRevision Where to store the microcode revision.
4512 */
4513static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4514{
4515 int rc = VERR_UNSUPPORTED_CPU;
4516 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4517
4518 /*
4519 * Input validation.
4520 */
4521 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4522
4523 *puRevision = 0;
4524
4525 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4526 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4527 RTThreadPreemptDisable(&PreemptState);
4528
4529 if (ASMHasCpuId())
4530 {
4531 uint32_t uDummy, uTFMSEAX;
4532 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4533
4534 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4535 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4536
4537 if (ASMIsValidStdRange(uMaxId))
4538 {
4539 uint64_t uRevMsr;
4540 if (ASMIsIntelCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4541 {
4542 /* Architectural MSR available on Pentium Pro and later. */
4543 if (ASMGetCpuFamily(uTFMSEAX) >= 6)
4544 {
4545 /* Revision is in the high dword. */
4546 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4547 *puRevision = RT_HIDWORD(uRevMsr);
4548 rc = VINF_SUCCESS;
4549 }
4550 }
4551 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4552 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4553 {
4554 /* Not well documented, but at least all AMD64 CPUs support this. */
4555 if (ASMGetCpuFamily(uTFMSEAX) >= 15)
4556 {
4557 /* Revision is in the low dword. */
4558 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4559 *puRevision = RT_LODWORD(uRevMsr);
4560 rc = VINF_SUCCESS;
4561 }
4562 }
4563 }
4564 }
4565
4566 RTThreadPreemptRestore(&PreemptState);
4567
4568 return rc;
4569}
4570
4571/**
4572 * Queries the CPU microcode revision.
4573 *
4574 * @returns VBox status code.
4575 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4576 * readable microcode rev.
4577 *
4578 * @param pSession The session handle.
4579 * @param puRevision Where to store the microcode revision.
4580 */
4581SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4582{
4583 /*
4584 * Input validation.
4585 */
4586 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4587 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4588
4589 /*
4590 * Call common worker.
4591 */
4592 return supdrvQueryUcodeRev(puRevision);
4593}
4594
4595
4596/**
4597 * Gets hardware-virtualization MSRs of the calling CPU.
4598 *
4599 * @returns VBox status code.
4600 * @param pMsrs Where to store the hardware-virtualization MSRs.
4601 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4602 * to explicitly check for the presence of VT-x/AMD-V before
4603 * querying MSRs.
4604 * @param fForce Force querying of MSRs from the hardware.
4605 */
4606SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4607{
4608 NOREF(fForce);
4609
4610 int rc;
4611 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4612
4613 /*
4614 * Input validation.
4615 */
4616 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4617
4618 /*
4619 * Disable preemption so we make sure we don't migrate CPUs and because
4620 * we access global data.
4621 */
4622 RTThreadPreemptDisable(&PreemptState);
4623
4624 /*
4625 * Query the MSRs from the hardware.
4626 */
4627 /** @todo Cache MSR values so future accesses can avoid querying the hardware as
4628 * it may be expensive (esp. in nested virtualization scenarios). Do this
4629 * with proper locking and race safety. */
4630 SUPHWVIRTMSRS Msrs;
4631 RT_ZERO(Msrs);
4632
4633 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4634 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4635 rc = SUPR0GetVTSupport(&fCaps);
4636 else
4637 rc = VINF_SUCCESS;
4638 if (RT_SUCCESS(rc))
4639 {
4640 if (fCaps & SUPVTCAPS_VT_X)
4641 {
4642 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4643 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4644 Msrs.u.vmx.u64PinCtls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4645 Msrs.u.vmx.u64ProcCtls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4646 Msrs.u.vmx.u64ExitCtls = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4647 Msrs.u.vmx.u64EntryCtls = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4648 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4649 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4650 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4651 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4652 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4653 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4654
4655 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4656 {
4657 Msrs.u.vmx.u64TruePinCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4658 Msrs.u.vmx.u64TrueProcCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4659 Msrs.u.vmx.u64TrueEntryCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4660 Msrs.u.vmx.u64TrueExitCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4661 }
4662
4663 uint32_t const fProcCtlsAllowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls);
4664 if (fProcCtlsAllowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4665 {
4666 Msrs.u.vmx.u64ProcCtls2 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4667
4668 uint32_t const fProcCtls2Allowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls2);
4669 if (fProcCtls2Allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4670 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4671
4672 if (fProcCtls2Allowed1 & VMX_PROC_CTLS2_VMFUNC)
4673 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4674 }
4675 }
4676 else if (fCaps & SUPVTCAPS_AMD_V)
4677 {
4678 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4679 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4680 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4681 }
4682 else
4683 {
4684 RTThreadPreemptRestore(&PreemptState);
4685 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4686 VERR_INTERNAL_ERROR_2);
4687 }
4688
4689 /*
4690 * Copy the MSRs out.
4691 */
4692 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4693 }
4694
4695 RTThreadPreemptRestore(&PreemptState);
4696
4697 return rc;
4698}
4699
4700
4701/**
4702 * Register a component factory with the support driver.
4703 *
4704 * This is currently restricted to kernel sessions only.
4705 *
4706 * @returns VBox status code.
4707 * @retval VINF_SUCCESS on success.
4708 * @retval VERR_NO_MEMORY if we're out of memory.
4709 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4710 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4711 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4712 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4713 *
4714 * @param pSession The SUPDRV session (must be a ring-0 session).
4715 * @param pFactory Pointer to the component factory registration structure.
4716 *
4717 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4718 */
4719SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4720{
4721 PSUPDRVFACTORYREG pNewReg;
4722 const char *psz;
4723 int rc;
4724
4725 /*
4726 * Validate parameters.
4727 */
4728 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4729 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4730 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4731 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4732 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4733 AssertReturn(psz, VERR_INVALID_PARAMETER);
4734
4735 /*
4736 * Allocate and initialize a new registration structure.
4737 */
4738 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4739 if (pNewReg)
4740 {
4741 pNewReg->pNext = NULL;
4742 pNewReg->pFactory = pFactory;
4743 pNewReg->pSession = pSession;
4744 pNewReg->cchName = psz - &pFactory->szName[0];
4745
4746 /*
4747 * Add it to the tail of the list after checking for prior registration.
4748 */
4749 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4750 if (RT_SUCCESS(rc))
4751 {
4752 PSUPDRVFACTORYREG pPrev = NULL;
4753 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4754 while (pCur && pCur->pFactory != pFactory)
4755 {
4756 pPrev = pCur;
4757 pCur = pCur->pNext;
4758 }
4759 if (!pCur)
4760 {
4761 if (pPrev)
4762 pPrev->pNext = pNewReg;
4763 else
4764 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4765 rc = VINF_SUCCESS;
4766 }
4767 else
4768 rc = VERR_ALREADY_EXISTS;
4769
4770 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4771 }
4772
4773 if (RT_FAILURE(rc))
4774 RTMemFree(pNewReg);
4775 }
4776 else
4777 rc = VERR_NO_MEMORY;
4778 return rc;
4779}
4780
4781
4782/**
4783 * Deregister a component factory.
4784 *
4785 * @returns VBox status code.
4786 * @retval VINF_SUCCESS on success.
4787 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4788 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4789 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4790 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4791 *
4792 * @param pSession The SUPDRV session (must be a ring-0 session).
4793 * @param pFactory Pointer to the component factory registration structure
4794 * previously passed SUPR0ComponentRegisterFactory().
4795 *
4796 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4797 */
4798SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4799{
4800 int rc;
4801
4802 /*
4803 * Validate parameters.
4804 */
4805 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4806 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4807 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4808
4809 /*
4810 * Take the lock and look for the registration record.
4811 */
4812 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4813 if (RT_SUCCESS(rc))
4814 {
4815 PSUPDRVFACTORYREG pPrev = NULL;
4816 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4817 while (pCur && pCur->pFactory != pFactory)
4818 {
4819 pPrev = pCur;
4820 pCur = pCur->pNext;
4821 }
4822 if (pCur)
4823 {
4824 if (!pPrev)
4825 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4826 else
4827 pPrev->pNext = pCur->pNext;
4828
4829 pCur->pNext = NULL;
4830 pCur->pFactory = NULL;
4831 pCur->pSession = NULL;
4832 rc = VINF_SUCCESS;
4833 }
4834 else
4835 rc = VERR_NOT_FOUND;
4836
4837 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4838
4839 RTMemFree(pCur);
4840 }
4841 return rc;
4842}
4843
4844
4845/**
4846 * Queries a component factory.
4847 *
4848 * @returns VBox status code.
4849 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4850 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4851 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4852 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4853 *
4854 * @param pSession The SUPDRV session.
4855 * @param pszName The name of the component factory.
4856 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4857 * @param ppvFactoryIf Where to store the factory interface.
4858 */
4859SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4860{
4861 const char *pszEnd;
4862 size_t cchName;
4863 int rc;
4864
4865 /*
4866 * Validate parameters.
4867 */
4868 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4869
4870 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4871 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4872 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4873 cchName = pszEnd - pszName;
4874
4875 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4876 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4877 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4878
4879 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4880 *ppvFactoryIf = NULL;
4881
4882 /*
4883 * Take the lock and try all factories by this name.
4884 */
4885 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4886 if (RT_SUCCESS(rc))
4887 {
4888 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4889 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4890 while (pCur)
4891 {
4892 if ( pCur->cchName == cchName
4893 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4894 {
4895 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4896 if (pvFactory)
4897 {
4898 *ppvFactoryIf = pvFactory;
4899 rc = VINF_SUCCESS;
4900 break;
4901 }
4902 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4903 }
4904
4905 /* next */
4906 pCur = pCur->pNext;
4907 }
4908
4909 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4910 }
4911 return rc;
4912}
4913
4914
4915/**
4916 * Adds a memory object to the session.
4917 *
4918 * @returns IPRT status code.
4919 * @param pMem Memory tracking structure containing the
4920 * information to track.
4921 * @param pSession The session.
4922 */
4923static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
4924{
4925 PSUPDRVBUNDLE pBundle;
4926
4927 /*
4928 * Find free entry and record the allocation.
4929 */
4930 RTSpinlockAcquire(pSession->Spinlock);
4931 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4932 {
4933 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
4934 {
4935 unsigned i;
4936 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4937 {
4938 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
4939 {
4940 pBundle->cUsed++;
4941 pBundle->aMem[i] = *pMem;
4942 RTSpinlockRelease(pSession->Spinlock);
4943 return VINF_SUCCESS;
4944 }
4945 }
4946 AssertFailed(); /* !!this can't be happening!!! */
4947 }
4948 }
4949 RTSpinlockRelease(pSession->Spinlock);
4950
4951 /*
4952 * Need to allocate a new bundle.
4953 * Insert into the last entry in the bundle.
4954 */
4955 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
4956 if (!pBundle)
4957 return VERR_NO_MEMORY;
4958
4959 /* take last entry. */
4960 pBundle->cUsed++;
4961 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
4962
4963 /* insert into list. */
4964 RTSpinlockAcquire(pSession->Spinlock);
4965 pBundle->pNext = pSession->Bundle.pNext;
4966 pSession->Bundle.pNext = pBundle;
4967 RTSpinlockRelease(pSession->Spinlock);
4968
4969 return VINF_SUCCESS;
4970}
4971
4972
4973/**
4974 * Releases a memory object referenced by pointer and type.
4975 *
4976 * @returns IPRT status code.
4977 * @param pSession Session data.
4978 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
4979 * @param eType Memory type.
4980 */
4981static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
4982{
4983 PSUPDRVBUNDLE pBundle;
4984
4985 /*
4986 * Validate input.
4987 */
4988 if (!uPtr)
4989 {
4990 Log(("Illegal address %p\n", (void *)uPtr));
4991 return VERR_INVALID_PARAMETER;
4992 }
4993
4994 /*
4995 * Search for the address.
4996 */
4997 RTSpinlockAcquire(pSession->Spinlock);
4998 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4999 {
5000 if (pBundle->cUsed > 0)
5001 {
5002 unsigned i;
5003 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5004 {
5005 if ( pBundle->aMem[i].eType == eType
5006 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5007 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5008 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5009 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5010 )
5011 {
5012 /* Make a copy of it and release it outside the spinlock. */
5013 SUPDRVMEMREF Mem = pBundle->aMem[i];
5014 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5015 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5016 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5017 RTSpinlockRelease(pSession->Spinlock);
5018
5019 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5020 {
5021 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5022 AssertRC(rc); /** @todo figure out how to handle this. */
5023 }
5024 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5025 {
5026 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5027 AssertRC(rc); /** @todo figure out how to handle this. */
5028 }
5029 return VINF_SUCCESS;
5030 }
5031 }
5032 }
5033 }
5034 RTSpinlockRelease(pSession->Spinlock);
5035 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5036 return VERR_INVALID_PARAMETER;
5037}
5038
5039
5040/**
5041 * Opens an image. If it's the first time it's opened the call must upload
5042 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5043 *
5044 * This is the 1st step of the loading.
5045 *
5046 * @returns IPRT status code.
5047 * @param pDevExt Device globals.
5048 * @param pSession Session data.
5049 * @param pReq The open request.
5050 */
5051static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5052{
5053 int rc;
5054 PSUPDRVLDRIMAGE pImage;
5055 void *pv;
5056 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5057 SUPDRV_CHECK_SMAP_SETUP();
5058 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5059 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5060
5061 /*
5062 * Check if we got an instance of the image already.
5063 */
5064 supdrvLdrLock(pDevExt);
5065 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5066 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5067 {
5068 if ( pImage->szName[cchName] == '\0'
5069 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5070 {
5071 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
5072 {
5073 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5074 * that indicates that the images are different. */
5075 pImage->cUsage++;
5076 pReq->u.Out.pvImageBase = pImage->pvImage;
5077 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5078 pReq->u.Out.fNativeLoader = pImage->fNative;
5079 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5080 supdrvLdrUnlock(pDevExt);
5081 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5082 return VINF_SUCCESS;
5083 }
5084 supdrvLdrUnlock(pDevExt);
5085 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5086 return VERR_TOO_MANY_REFERENCES;
5087 }
5088 }
5089 /* (not found - add it!) */
5090
5091 /* If the loader interface is locked down, make userland fail early */
5092 if (pDevExt->fLdrLockedDown)
5093 {
5094 supdrvLdrUnlock(pDevExt);
5095 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5096 return VERR_PERMISSION_DENIED;
5097 }
5098
5099 /*
5100 * Allocate memory.
5101 */
5102 Assert(cchName < sizeof(pImage->szName));
5103 pv = RTMemAlloc(sizeof(SUPDRVLDRIMAGE));
5104 if (!pv)
5105 {
5106 supdrvLdrUnlock(pDevExt);
5107 Log(("supdrvIOCtl_LdrOpen: RTMemAlloc() failed\n"));
5108 return /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_2;
5109 }
5110 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5111
5112 /*
5113 * Setup and link in the LDR stuff.
5114 */
5115 pImage = (PSUPDRVLDRIMAGE)pv;
5116 pImage->pvImage = NULL;
5117#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5118 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5119#else
5120 pImage->pvImageAlloc = NULL;
5121#endif
5122 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5123 pImage->cbImageBits = pReq->u.In.cbImageBits;
5124 pImage->cSymbols = 0;
5125 pImage->paSymbols = NULL;
5126 pImage->pachStrTab = NULL;
5127 pImage->cbStrTab = 0;
5128 pImage->cSegments = 0;
5129 pImage->paSegments = NULL;
5130 pImage->pfnModuleInit = NULL;
5131 pImage->pfnModuleTerm = NULL;
5132 pImage->pfnServiceReqHandler = NULL;
5133 pImage->uState = SUP_IOCTL_LDR_OPEN;
5134 pImage->cUsage = 1;
5135 pImage->pDevExt = pDevExt;
5136 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5137 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5138
5139 /*
5140 * Try load it using the native loader, if that isn't supported, fall back
5141 * on the older method.
5142 */
5143 pImage->fNative = true;
5144 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5145 if (rc == VERR_NOT_SUPPORTED)
5146 {
5147#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5148 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5149 if (RT_SUCCESS(rc))
5150 {
5151 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5152 pImage->fNative = false;
5153 }
5154#else
5155 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
5156 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
5157 pImage->fNative = false;
5158 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
5159#endif
5160 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5161 }
5162 if (RT_FAILURE(rc))
5163 {
5164 supdrvLdrUnlock(pDevExt);
5165 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5166 RTMemFree(pImage);
5167 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5168 return rc;
5169 }
5170 Assert(VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5171
5172 /*
5173 * Link it.
5174 */
5175 pImage->pNext = pDevExt->pLdrImages;
5176 pDevExt->pLdrImages = pImage;
5177
5178 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5179
5180 pReq->u.Out.pvImageBase = pImage->pvImage;
5181 pReq->u.Out.fNeedsLoading = true;
5182 pReq->u.Out.fNativeLoader = pImage->fNative;
5183 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5184
5185 supdrvLdrUnlock(pDevExt);
5186 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5187 return VINF_SUCCESS;
5188}
5189
5190
5191/**
5192 * Formats a load error message.
5193 *
5194 * @returns @a rc
5195 * @param rc Return code.
5196 * @param pReq The request.
5197 * @param pszFormat The error message format string.
5198 * @param ... Argument to the format string.
5199 */
5200int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5201{
5202 va_list va;
5203 va_start(va, pszFormat);
5204 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5205 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5206 va_end(va);
5207 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5208 return rc;
5209}
5210
5211
5212/**
5213 * Worker that validates a pointer to an image entrypoint.
5214 *
5215 * Calls supdrvLdrLoadError on error.
5216 *
5217 * @returns IPRT status code.
5218 * @param pDevExt The device globals.
5219 * @param pImage The loader image.
5220 * @param pv The pointer into the image.
5221 * @param fMayBeNull Whether it may be NULL.
5222 * @param pszSymbol The entrypoint name or log name. If the symbol is
5223 * capitalized it signifies a specific symbol, otherwise it
5224 * for logging.
5225 * @param pbImageBits The image bits prepared by ring-3.
5226 * @param pReq The request for passing to supdrvLdrLoadError.
5227 *
5228 * @note Will leave the loader lock on failure!
5229 */
5230static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5231 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5232{
5233 if (!fMayBeNull || pv)
5234 {
5235 uint32_t iSeg;
5236
5237 /* Must be within the image bits: */
5238 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5239 if (uRva >= pImage->cbImageBits)
5240 {
5241 supdrvLdrUnlock(pDevExt);
5242 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5243 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5244 pv, pszSymbol, uRva, pImage->cbImageBits);
5245 }
5246
5247 /* Must be in an executable segment: */
5248 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5249 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5250 {
5251 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5252 break;
5253 supdrvLdrUnlock(pDevExt);
5254 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5255 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5256 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5257 pImage->paSegments[iSeg].fProt);
5258 }
5259 if (iSeg >= pImage->cSegments)
5260 {
5261 supdrvLdrUnlock(pDevExt);
5262 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5263 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5264 pv, pszSymbol, uRva);
5265 }
5266
5267 if (pImage->fNative)
5268 {
5269 /** @todo pass pReq along to the native code. */
5270 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5271 if (RT_FAILURE(rc))
5272 {
5273 supdrvLdrUnlock(pDevExt);
5274 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5275 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5276 }
5277 }
5278 }
5279 return VINF_SUCCESS;
5280}
5281
5282
5283/**
5284 * Loads the image bits.
5285 *
5286 * This is the 2nd step of the loading.
5287 *
5288 * @returns IPRT status code.
5289 * @param pDevExt Device globals.
5290 * @param pSession Session data.
5291 * @param pReq The request.
5292 */
5293static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5294{
5295 PSUPDRVLDRUSAGE pUsage;
5296 PSUPDRVLDRIMAGE pImage;
5297 int rc;
5298 SUPDRV_CHECK_SMAP_SETUP();
5299 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5300 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5301
5302 /*
5303 * Find the ldr image.
5304 */
5305 supdrvLdrLock(pDevExt);
5306 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5307
5308 pUsage = pSession->pLdrUsage;
5309 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5310 pUsage = pUsage->pNext;
5311 if (!pUsage)
5312 {
5313 supdrvLdrUnlock(pDevExt);
5314 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5315 }
5316 pImage = pUsage->pImage;
5317
5318 /*
5319 * Validate input.
5320 */
5321 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5322 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5323 {
5324 supdrvLdrUnlock(pDevExt);
5325 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5326 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5327 }
5328
5329 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5330 {
5331 unsigned uState = pImage->uState;
5332 supdrvLdrUnlock(pDevExt);
5333 if (uState != SUP_IOCTL_LDR_LOAD)
5334 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5335 pReq->u.Out.uErrorMagic = 0;
5336 return VERR_ALREADY_LOADED;
5337 }
5338
5339 /* If the loader interface is locked down, don't load new images */
5340 if (pDevExt->fLdrLockedDown)
5341 {
5342 supdrvLdrUnlock(pDevExt);
5343 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5344 }
5345
5346 /*
5347 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5348 */
5349 pImage->cSegments = pReq->u.In.cSegments;
5350 {
5351 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5352 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
5353 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5354 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5355 else
5356 {
5357 supdrvLdrUnlock(pDevExt);
5358 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5359 }
5360 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5361 }
5362
5363 /*
5364 * Validate entrypoints.
5365 */
5366 switch (pReq->u.In.eEPType)
5367 {
5368 case SUPLDRLOADEP_NOTHING:
5369 break;
5370
5371 case SUPLDRLOADEP_VMMR0:
5372 if (pReq->u.In.EP.VMMR0.pvVMMR0 != pImage->pvImage)
5373 {
5374 supdrvLdrUnlock(pDevExt);
5375 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid pvVMMR0 pointer: %p, expected %p", pReq->u.In.EP.VMMR0.pvVMMR0, pImage->pvImage);
5376 }
5377 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5378 if (RT_FAILURE(rc))
5379 return rc;
5380 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5381 if (RT_FAILURE(rc))
5382 return rc;
5383 break;
5384
5385 case SUPLDRLOADEP_SERVICE:
5386 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5387 if (RT_FAILURE(rc))
5388 return rc;
5389 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5390 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5391 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5392 {
5393 supdrvLdrUnlock(pDevExt);
5394 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5395 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5396 pReq->u.In.EP.Service.apvReserved[2]);
5397 }
5398 break;
5399
5400 default:
5401 supdrvLdrUnlock(pDevExt);
5402 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5403 }
5404
5405 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5406 if (RT_FAILURE(rc))
5407 return rc;
5408 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5409 if (RT_FAILURE(rc))
5410 return rc;
5411 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5412
5413 /*
5414 * Allocate and copy the tables if non-native.
5415 * (No need to do try/except as this is a buffered request.)
5416 */
5417 if (!pImage->fNative)
5418 {
5419 pImage->cbStrTab = pReq->u.In.cbStrTab;
5420 if (pImage->cbStrTab)
5421 {
5422 pImage->pachStrTab = (char *)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5423 if (!pImage->pachStrTab)
5424 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5425 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5426 }
5427
5428 pImage->cSymbols = pReq->u.In.cSymbols;
5429 if (RT_SUCCESS(rc) && pImage->cSymbols)
5430 {
5431 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5432 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
5433 if (!pImage->paSymbols)
5434 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5435 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5436 }
5437 }
5438
5439 /*
5440 * Copy the bits and apply permissions / complete native loading.
5441 */
5442 if (RT_SUCCESS(rc))
5443 {
5444 pImage->uState = SUP_IOCTL_LDR_LOAD;
5445 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5446 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5447
5448 if (pImage->fNative)
5449 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5450 else
5451 {
5452#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5453 uint32_t i;
5454 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5455
5456 for (i = 0; i < pImage->cSegments; i++)
5457 {
5458 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5459 pImage->paSegments[i].fProt);
5460 if (RT_SUCCESS(rc))
5461 continue;
5462 if (rc == VERR_NOT_SUPPORTED)
5463 rc = VINF_SUCCESS;
5464 else
5465 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5466 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5467 break;
5468 }
5469#else
5470 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5471#endif
5472 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5473 }
5474 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5475 }
5476
5477 /*
5478 * Update any entry points.
5479 */
5480 if (RT_SUCCESS(rc))
5481 {
5482 switch (pReq->u.In.eEPType)
5483 {
5484 default:
5485 case SUPLDRLOADEP_NOTHING:
5486 rc = VINF_SUCCESS;
5487 break;
5488 case SUPLDRLOADEP_VMMR0:
5489 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0,
5490 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5491 break;
5492 case SUPLDRLOADEP_SERVICE:
5493 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5494 rc = VINF_SUCCESS;
5495 break;
5496 }
5497 }
5498
5499 /*
5500 * On success call the module initialization.
5501 */
5502 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5503 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5504 {
5505 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5506 pDevExt->pLdrInitImage = pImage;
5507 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5508 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5509 rc = pImage->pfnModuleInit(pImage);
5510 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5511 pDevExt->pLdrInitImage = NULL;
5512 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5513 if (RT_FAILURE(rc))
5514 {
5515 if (pDevExt->pvVMMR0 == pImage->pvImage)
5516 supdrvLdrUnsetVMMR0EPs(pDevExt);
5517 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5518 }
5519 }
5520 if (RT_SUCCESS(rc))
5521 {
5522 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5523 pReq->u.Out.uErrorMagic = 0;
5524 pReq->u.Out.szError[0] = '\0';
5525 }
5526 else
5527 {
5528 /* Inform the tracing component in case ModuleInit registered TPs. */
5529 supdrvTracerModuleUnloading(pDevExt, pImage);
5530
5531 pImage->uState = SUP_IOCTL_LDR_OPEN;
5532 pImage->pfnModuleInit = NULL;
5533 pImage->pfnModuleTerm = NULL;
5534 pImage->pfnServiceReqHandler= NULL;
5535 pImage->cbStrTab = 0;
5536 RTMemFree(pImage->pachStrTab);
5537 pImage->pachStrTab = NULL;
5538 RTMemFree(pImage->paSymbols);
5539 pImage->paSymbols = NULL;
5540 pImage->cSymbols = 0;
5541 }
5542
5543 supdrvLdrUnlock(pDevExt);
5544 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5545 return rc;
5546}
5547
5548
5549/**
5550 * Frees a previously loaded (prep'ed) image.
5551 *
5552 * @returns IPRT status code.
5553 * @param pDevExt Device globals.
5554 * @param pSession Session data.
5555 * @param pReq The request.
5556 */
5557static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
5558{
5559 int rc;
5560 PSUPDRVLDRUSAGE pUsagePrev;
5561 PSUPDRVLDRUSAGE pUsage;
5562 PSUPDRVLDRIMAGE pImage;
5563 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
5564
5565 /*
5566 * Find the ldr image.
5567 */
5568 supdrvLdrLock(pDevExt);
5569 pUsagePrev = NULL;
5570 pUsage = pSession->pLdrUsage;
5571 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5572 {
5573 pUsagePrev = pUsage;
5574 pUsage = pUsage->pNext;
5575 }
5576 if (!pUsage)
5577 {
5578 supdrvLdrUnlock(pDevExt);
5579 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
5580 return VERR_INVALID_HANDLE;
5581 }
5582 if (pUsage->cRing3Usage == 0)
5583 {
5584 supdrvLdrUnlock(pDevExt);
5585 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
5586 return VERR_CALLER_NO_REFERENCE;
5587 }
5588
5589 /*
5590 * Check if we can remove anything.
5591 */
5592 rc = VINF_SUCCESS;
5593 pImage = pUsage->pImage;
5594 if (pImage->cUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
5595 {
5596 /*
5597 * Check if there are any objects with destructors in the image, if
5598 * so leave it for the session cleanup routine so we get a chance to
5599 * clean things up in the right order and not leave them all dangling.
5600 */
5601 RTSpinlockAcquire(pDevExt->Spinlock);
5602 if (pImage->cUsage <= 1)
5603 {
5604 PSUPDRVOBJ pObj;
5605 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5606 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5607 {
5608 rc = VERR_DANGLING_OBJECTS;
5609 break;
5610 }
5611 }
5612 else
5613 {
5614 PSUPDRVUSAGE pGenUsage;
5615 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5616 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5617 {
5618 rc = VERR_DANGLING_OBJECTS;
5619 break;
5620 }
5621 }
5622 RTSpinlockRelease(pDevExt->Spinlock);
5623 if (rc == VINF_SUCCESS)
5624 {
5625 /* unlink it */
5626 if (pUsagePrev)
5627 pUsagePrev->pNext = pUsage->pNext;
5628 else
5629 pSession->pLdrUsage = pUsage->pNext;
5630
5631 /* free it */
5632 pUsage->pImage = NULL;
5633 pUsage->pNext = NULL;
5634 RTMemFree(pUsage);
5635
5636 /*
5637 * Dereference the image.
5638 */
5639 if (pImage->cUsage <= 1)
5640 supdrvLdrFree(pDevExt, pImage);
5641 else
5642 pImage->cUsage--;
5643 }
5644 else
5645 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
5646 }
5647 else
5648 {
5649 /*
5650 * Dereference both image and usage.
5651 */
5652 pImage->cUsage--;
5653 pUsage->cRing3Usage--;
5654 }
5655
5656 supdrvLdrUnlock(pDevExt);
5657 return rc;
5658}
5659
5660
5661/**
5662 * Lock down the image loader interface.
5663 *
5664 * @returns IPRT status code.
5665 * @param pDevExt Device globals.
5666 */
5667static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
5668{
5669 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
5670
5671 supdrvLdrLock(pDevExt);
5672 if (!pDevExt->fLdrLockedDown)
5673 {
5674 pDevExt->fLdrLockedDown = true;
5675 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
5676 }
5677 supdrvLdrUnlock(pDevExt);
5678
5679 return VINF_SUCCESS;
5680}
5681
5682
5683/**
5684 * Queries the address of a symbol in an open image.
5685 *
5686 * @returns IPRT status code.
5687 * @param pDevExt Device globals.
5688 * @param pSession Session data.
5689 * @param pReq The request buffer.
5690 */
5691static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
5692{
5693 PSUPDRVLDRIMAGE pImage;
5694 PSUPDRVLDRUSAGE pUsage;
5695 uint32_t i;
5696 PSUPLDRSYM paSyms;
5697 const char *pchStrings;
5698 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
5699 void *pvSymbol = NULL;
5700 int rc = VERR_SYMBOL_NOT_FOUND;
5701 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
5702
5703 /*
5704 * Find the ldr image.
5705 */
5706 supdrvLdrLock(pDevExt);
5707 pUsage = pSession->pLdrUsage;
5708 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5709 pUsage = pUsage->pNext;
5710 if (!pUsage)
5711 {
5712 supdrvLdrUnlock(pDevExt);
5713 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
5714 return VERR_INVALID_HANDLE;
5715 }
5716 pImage = pUsage->pImage;
5717 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
5718 {
5719 unsigned uState = pImage->uState;
5720 supdrvLdrUnlock(pDevExt);
5721 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
5722 return VERR_ALREADY_LOADED;
5723 }
5724
5725 /*
5726 * Search the image exports / symbol strings.
5727 *
5728 * Note! The int32_t is for native loading on solaris where the data
5729 * and text segments are in very different places.
5730 */
5731 if (pImage->fNative)
5732 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pReq->u.In.szSymbol, cbSymbol - 1, &pvSymbol);
5733 else
5734 {
5735 pchStrings = pImage->pachStrTab;
5736 paSyms = pImage->paSymbols;
5737 for (i = 0; i < pImage->cSymbols; i++)
5738 {
5739 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5740 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
5741 {
5742 pvSymbol = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
5743 rc = VINF_SUCCESS;
5744 break;
5745 }
5746 }
5747 }
5748 supdrvLdrUnlock(pDevExt);
5749 pReq->u.Out.pvSymbol = pvSymbol;
5750 return rc;
5751}
5752
5753
5754/**
5755 * Gets the address of a symbol in an open image or the support driver.
5756 *
5757 * @returns VINF_SUCCESS on success.
5758 * @returns
5759 * @param pDevExt Device globals.
5760 * @param pSession Session data.
5761 * @param pReq The request buffer.
5762 */
5763static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
5764{
5765 int rc = VINF_SUCCESS;
5766 const char *pszSymbol = pReq->u.In.pszSymbol;
5767 const char *pszModule = pReq->u.In.pszModule;
5768 size_t cbSymbol;
5769 char const *pszEnd;
5770 uint32_t i;
5771
5772 /*
5773 * Input validation.
5774 */
5775 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
5776 pszEnd = RTStrEnd(pszSymbol, 512);
5777 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5778 cbSymbol = pszEnd - pszSymbol + 1;
5779
5780 if (pszModule)
5781 {
5782 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
5783 pszEnd = RTStrEnd(pszModule, 64);
5784 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5785 }
5786 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
5787
5788
5789 if ( !pszModule
5790 || !strcmp(pszModule, "SupDrv"))
5791 {
5792 /*
5793 * Search the support driver export table.
5794 */
5795 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5796 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5797 {
5798 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
5799 break;
5800 }
5801 }
5802 else
5803 {
5804 /*
5805 * Find the loader image.
5806 */
5807 PSUPDRVLDRIMAGE pImage;
5808
5809 supdrvLdrLock(pDevExt);
5810
5811 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5812 if (!strcmp(pImage->szName, pszModule))
5813 break;
5814 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
5815 {
5816 /*
5817 * Search the image exports / symbol strings.
5818 */
5819 if (pImage->fNative)
5820 {
5821 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cbSymbol - 1, (void **)&pReq->u.Out.pfnSymbol);
5822 if (RT_SUCCESS(rc))
5823 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5824 }
5825 else
5826 {
5827 const char *pchStrings = pImage->pachStrTab;
5828 PCSUPLDRSYM paSyms = pImage->paSymbols;
5829 rc = VERR_SYMBOL_NOT_FOUND;
5830 for (i = 0; i < pImage->cSymbols; i++)
5831 {
5832 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5833 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
5834 {
5835 /*
5836 * Found it! Calc the symbol address and add a reference to the module.
5837 */
5838 pReq->u.Out.pfnSymbol = (PFNRT)((uintptr_t)pImage->pvImage + (int32_t)paSyms[i].offSymbol);
5839 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5840 break;
5841 }
5842 }
5843 }
5844 }
5845 else
5846 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
5847
5848 supdrvLdrUnlock(pDevExt);
5849 }
5850 return rc;
5851}
5852
5853
5854/**
5855 * Looks up a symbol in g_aFunctions
5856 *
5857 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
5858 * @param pszSymbol The symbol to look up.
5859 * @param puValue Where to return the value.
5860 */
5861int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
5862{
5863 uint32_t i;
5864 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5865 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5866 {
5867 *puValue = (uintptr_t)g_aFunctions[i].pfn;
5868 return VINF_SUCCESS;
5869 }
5870
5871 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
5872 {
5873 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
5874 return VINF_SUCCESS;
5875 }
5876
5877 return VERR_SYMBOL_NOT_FOUND;
5878}
5879
5880
5881/**
5882 * Updates the VMMR0 entry point pointers.
5883 *
5884 * @returns IPRT status code.
5885 * @param pDevExt Device globals.
5886 * @param pvVMMR0 VMMR0 image handle.
5887 * @param pvVMMR0EntryFast VMMR0EntryFast address.
5888 * @param pvVMMR0EntryEx VMMR0EntryEx address.
5889 * @remark Caller must own the loader mutex.
5890 */
5891static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
5892{
5893 int rc = VINF_SUCCESS;
5894 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryFast=%p\n", pvVMMR0, pvVMMR0EntryFast));
5895
5896
5897 /*
5898 * Check if not yet set.
5899 */
5900 if (!pDevExt->pvVMMR0)
5901 {
5902 pDevExt->pvVMMR0 = pvVMMR0;
5903 *(void **)&pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
5904 *(void **)&pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
5905 ASMCompilerBarrier(); /* the above isn't nice, so be careful... */
5906 }
5907 else
5908 {
5909 /*
5910 * Return failure or success depending on whether the values match or not.
5911 */
5912 if ( pDevExt->pvVMMR0 != pvVMMR0
5913 || (uintptr_t)pDevExt->pfnVMMR0EntryFast != (uintptr_t)pvVMMR0EntryFast
5914 || (uintptr_t)pDevExt->pfnVMMR0EntryEx != (uintptr_t)pvVMMR0EntryEx)
5915 {
5916 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
5917 rc = VERR_INVALID_PARAMETER;
5918 }
5919 }
5920 return rc;
5921}
5922
5923
5924/**
5925 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
5926 *
5927 * @param pDevExt Device globals.
5928 */
5929static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
5930{
5931 pDevExt->pvVMMR0 = NULL;
5932 pDevExt->pfnVMMR0EntryFast = NULL;
5933 pDevExt->pfnVMMR0EntryEx = NULL;
5934}
5935
5936
5937/**
5938 * Adds a usage reference in the specified session of an image.
5939 *
5940 * Called while owning the loader semaphore.
5941 *
5942 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
5943 * @param pSession Session in question.
5944 * @param pImage Image which the session is using.
5945 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
5946 */
5947static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
5948{
5949 PSUPDRVLDRUSAGE pUsage;
5950 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
5951
5952 /*
5953 * Referenced it already?
5954 */
5955 pUsage = pSession->pLdrUsage;
5956 while (pUsage)
5957 {
5958 if (pUsage->pImage == pImage)
5959 {
5960 if (fRing3Usage)
5961 pUsage->cRing3Usage++;
5962 else
5963 pUsage->cRing0Usage++;
5964 return VINF_SUCCESS;
5965 }
5966 pUsage = pUsage->pNext;
5967 }
5968
5969 /*
5970 * Allocate new usage record.
5971 */
5972 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
5973 AssertReturn(pUsage, /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_5);
5974 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
5975 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
5976 pUsage->pImage = pImage;
5977 pUsage->pNext = pSession->pLdrUsage;
5978 pSession->pLdrUsage = pUsage;
5979 return VINF_SUCCESS;
5980}
5981
5982
5983/**
5984 * Frees a load image.
5985 *
5986 * @param pDevExt Pointer to device extension.
5987 * @param pImage Pointer to the image we're gonna free.
5988 * This image must exit!
5989 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
5990 */
5991static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
5992{
5993 PSUPDRVLDRIMAGE pImagePrev;
5994 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
5995
5996 /*
5997 * Warn if we're releasing images while the image loader interface is
5998 * locked down -- we won't be able to reload them!
5999 */
6000 if (pDevExt->fLdrLockedDown)
6001 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6002
6003 /* find it - arg. should've used doubly linked list. */
6004 Assert(pDevExt->pLdrImages);
6005 pImagePrev = NULL;
6006 if (pDevExt->pLdrImages != pImage)
6007 {
6008 pImagePrev = pDevExt->pLdrImages;
6009 while (pImagePrev->pNext != pImage)
6010 pImagePrev = pImagePrev->pNext;
6011 Assert(pImagePrev->pNext == pImage);
6012 }
6013
6014 /* unlink */
6015 if (pImagePrev)
6016 pImagePrev->pNext = pImage->pNext;
6017 else
6018 pDevExt->pLdrImages = pImage->pNext;
6019
6020 /* check if this is VMMR0.r0 unset its entry point pointers. */
6021 if (pDevExt->pvVMMR0 == pImage->pvImage)
6022 supdrvLdrUnsetVMMR0EPs(pDevExt);
6023
6024 /* check for objects with destructors in this image. (Shouldn't happen.) */
6025 if (pDevExt->pObjs)
6026 {
6027 unsigned cObjs = 0;
6028 PSUPDRVOBJ pObj;
6029 RTSpinlockAcquire(pDevExt->Spinlock);
6030 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6031 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6032 {
6033 pObj->pfnDestructor = NULL;
6034 cObjs++;
6035 }
6036 RTSpinlockRelease(pDevExt->Spinlock);
6037 if (cObjs)
6038 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6039 }
6040
6041 /* call termination function if fully loaded. */
6042 if ( pImage->pfnModuleTerm
6043 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6044 {
6045 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6046 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6047 pImage->pfnModuleTerm(pImage);
6048 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6049 }
6050
6051 /* Inform the tracing component. */
6052 supdrvTracerModuleUnloading(pDevExt, pImage);
6053
6054 /* Do native unload if appropriate, then inform the native code about the
6055 unloading (mainly for non-native loading case). */
6056 if (pImage->fNative)
6057 supdrvOSLdrUnload(pDevExt, pImage);
6058 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6059
6060 /* free the image */
6061 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6062 pImage->cUsage = 0;
6063 pImage->pDevExt = NULL;
6064 pImage->pNext = NULL;
6065 pImage->uState = SUP_IOCTL_LDR_FREE;
6066#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
6067 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6068 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6069#else
6070 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
6071 pImage->pvImageAlloc = NULL;
6072#endif
6073 pImage->pvImage = NULL;
6074 RTMemFree(pImage->pachStrTab);
6075 pImage->pachStrTab = NULL;
6076 RTMemFree(pImage->paSymbols);
6077 pImage->paSymbols = NULL;
6078 RTMemFree(pImage->paSegments);
6079 pImage->paSegments = NULL;
6080 RTMemFree(pImage);
6081}
6082
6083
6084/**
6085 * Acquires the loader lock.
6086 *
6087 * @returns IPRT status code.
6088 * @param pDevExt The device extension.
6089 * @note Not recursive on all platforms yet.
6090 */
6091DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6092{
6093#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6094 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6095#else
6096 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6097#endif
6098 AssertRC(rc);
6099 return rc;
6100}
6101
6102
6103/**
6104 * Releases the loader lock.
6105 *
6106 * @returns IPRT status code.
6107 * @param pDevExt The device extension.
6108 */
6109DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6110{
6111#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6112 return RTSemMutexRelease(pDevExt->mtxLdr);
6113#else
6114 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6115#endif
6116}
6117
6118
6119/**
6120 * Acquires the global loader lock.
6121 *
6122 * This can be useful when accessing structures being modified by the ModuleInit
6123 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6124 *
6125 * @returns VBox status code.
6126 * @param pSession The session doing the locking.
6127 *
6128 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6129 */
6130SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6131{
6132 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6133 return supdrvLdrLock(pSession->pDevExt);
6134}
6135
6136
6137/**
6138 * Releases the global loader lock.
6139 *
6140 * Must correspond to a SUPR0LdrLock call!
6141 *
6142 * @returns VBox status code.
6143 * @param pSession The session doing the locking.
6144 *
6145 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6146 */
6147SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6148{
6149 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6150 return supdrvLdrUnlock(pSession->pDevExt);
6151}
6152
6153
6154/**
6155 * For checking lock ownership in Assert() statements during ModuleInit and
6156 * ModuleTerm.
6157 *
6158 * @returns Whether we own the loader lock or not.
6159 * @param hMod The module in question.
6160 * @param fWantToHear For hosts where it is difficult to know who owns the
6161 * lock, this will be returned instead.
6162 */
6163SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6164{
6165 PSUPDRVDEVEXT pDevExt;
6166 RTNATIVETHREAD hOwner;
6167
6168 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6169 AssertPtrReturn(pImage, fWantToHear);
6170 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6171
6172 pDevExt = pImage->pDevExt;
6173 AssertPtrReturn(pDevExt, fWantToHear);
6174
6175 /*
6176 * Expecting this to be called at init/term time only, so this will be sufficient.
6177 */
6178 hOwner = pDevExt->hLdrInitThread;
6179 if (hOwner == NIL_RTNATIVETHREAD)
6180 hOwner = pDevExt->hLdrTermThread;
6181 if (hOwner != NIL_RTNATIVETHREAD)
6182 return hOwner == RTThreadNativeSelf();
6183
6184 /*
6185 * Neither of the two semaphore variants currently offers very good
6186 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6187 */
6188#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6189 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6190#else
6191 return fWantToHear;
6192#endif
6193}
6194
6195
6196/**
6197 * Locates and retains the given module for ring-0 usage.
6198 *
6199 * @returns VBox status code.
6200 * @param pSession The session to associate the module reference with.
6201 * @param pszName The module name (no path).
6202 * @param phMod Where to return the module handle. The module is
6203 * referenced and a call to SUPR0LdrModRelease() is
6204 * necessary when done with it.
6205 */
6206SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6207{
6208 int rc;
6209 size_t cchName;
6210 PSUPDRVDEVEXT pDevExt;
6211
6212 /*
6213 * Validate input.
6214 */
6215 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6216 *phMod = NULL;
6217 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6218 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6219 cchName = strlen(pszName);
6220 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6221 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6222
6223 /*
6224 * Do the lookup.
6225 */
6226 pDevExt = pSession->pDevExt;
6227 rc = supdrvLdrLock(pDevExt);
6228 if (RT_SUCCESS(rc))
6229 {
6230 PSUPDRVLDRIMAGE pImage;
6231 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6232 {
6233 if ( pImage->szName[cchName] == '\0'
6234 && !memcmp(pImage->szName, pszName, cchName))
6235 {
6236 /*
6237 * Check the state and make sure we don't overflow the reference counter before return it.
6238 */
6239 uint32_t uState = pImage->uState;
6240 if (uState == SUP_IOCTL_LDR_LOAD)
6241 {
6242 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6243 {
6244 pImage->cUsage++;
6245 supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6246 *phMod = pImage;
6247 supdrvLdrUnlock(pDevExt);
6248 return VINF_SUCCESS;
6249 }
6250 supdrvLdrUnlock(pDevExt);
6251 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6252 return VERR_TOO_MANY_REFERENCES;
6253 }
6254 supdrvLdrUnlock(pDevExt);
6255 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6256 return VERR_INVALID_STATE;
6257 }
6258 }
6259 supdrvLdrUnlock(pDevExt);
6260 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6261 rc = VERR_MODULE_NOT_FOUND;
6262 }
6263 return rc;
6264}
6265
6266
6267/**
6268 * Retains a ring-0 module reference.
6269 *
6270 * Release reference when done by calling SUPR0LdrModRelease().
6271 *
6272 * @returns VBox status code.
6273 * @param pSession The session to reference the module in. A usage
6274 * record is added if needed.
6275 * @param hMod The handle to the module to retain.
6276 */
6277SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6278{
6279 PSUPDRVDEVEXT pDevExt;
6280 PSUPDRVLDRIMAGE pImage;
6281 int rc;
6282
6283 /* Validate input a little. */
6284 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6285 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6286 pImage = (PSUPDRVLDRIMAGE)hMod;
6287 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6288
6289 /* Reference the module: */
6290 pDevExt = pSession->pDevExt;
6291 rc = supdrvLdrLock(pDevExt);
6292 if (RT_SUCCESS(rc))
6293 {
6294 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6295 {
6296 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6297 {
6298 rc = supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6299 if (RT_SUCCESS(rc))
6300 {
6301 pImage->cUsage++;
6302 rc = VINF_SUCCESS;
6303 }
6304 }
6305 else
6306 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6307 }
6308 else
6309 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6310 supdrvLdrUnlock(pDevExt);
6311 }
6312 return rc;
6313}
6314
6315
6316/**
6317 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6318 * SUPR0LdrModRetain().
6319 *
6320 * @returns VBox status code.
6321 * @param pSession The session that the module was retained in.
6322 * @param hMod The module handle. NULL is silently ignored.
6323 */
6324SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6325{
6326 PSUPDRVDEVEXT pDevExt;
6327 PSUPDRVLDRIMAGE pImage;
6328 int rc;
6329
6330 /*
6331 * Validate input.
6332 */
6333 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6334 if (!hMod)
6335 return VINF_SUCCESS;
6336 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6337 pImage = (PSUPDRVLDRIMAGE)hMod;
6338 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6339
6340 /*
6341 * Take the loader lock and revalidate the module:
6342 */
6343 pDevExt = pSession->pDevExt;
6344 rc = supdrvLdrLock(pDevExt);
6345 if (RT_SUCCESS(rc))
6346 {
6347 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6348 {
6349 /*
6350 * Find the usage record for the module:
6351 */
6352 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6353 PSUPDRVLDRUSAGE pUsage;
6354
6355 rc = VERR_MODULE_NOT_FOUND;
6356 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6357 {
6358 if (pUsage->pImage == pImage)
6359 {
6360 /*
6361 * Drop a ring-0 reference:
6362 */
6363 Assert(pImage->cUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6364 if (pUsage->cRing0Usage > 0)
6365 {
6366 if (pImage->cUsage > 1)
6367 {
6368 pImage->cUsage -= 1;
6369 pUsage->cRing0Usage -= 1;
6370 rc = VINF_SUCCESS;
6371 }
6372 else
6373 {
6374 supdrvLdrFree(pDevExt, pImage);
6375
6376 if (pPrevUsage)
6377 pPrevUsage->pNext = pUsage->pNext;
6378 else
6379 pSession->pLdrUsage = pUsage->pNext;
6380 pUsage->pNext = NULL;
6381 pUsage->pImage = NULL;
6382 pUsage->cRing0Usage = 0;
6383 pUsage->cRing3Usage = 0;
6384 RTMemFree(pUsage);
6385
6386 rc = VINF_OBJECT_DESTROYED;
6387 }
6388 }
6389 else
6390 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6391 break;
6392 }
6393 pPrevUsage = pUsage;
6394 }
6395 }
6396 else
6397 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6398 supdrvLdrUnlock(pDevExt);
6399 }
6400 return rc;
6401
6402}
6403
6404
6405/**
6406 * Implements the service call request.
6407 *
6408 * @returns VBox status code.
6409 * @param pDevExt The device extension.
6410 * @param pSession The calling session.
6411 * @param pReq The request packet, valid.
6412 */
6413static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6414{
6415#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
6416 int rc;
6417
6418 /*
6419 * Find the module first in the module referenced by the calling session.
6420 */
6421 rc = supdrvLdrLock(pDevExt);
6422 if (RT_SUCCESS(rc))
6423 {
6424 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
6425 PSUPDRVLDRUSAGE pUsage;
6426
6427 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6428 if ( pUsage->pImage->pfnServiceReqHandler
6429 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
6430 {
6431 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
6432 break;
6433 }
6434 supdrvLdrUnlock(pDevExt);
6435
6436 if (pfnServiceReqHandler)
6437 {
6438 /*
6439 * Call it.
6440 */
6441 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
6442 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
6443 else
6444 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
6445 }
6446 else
6447 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
6448 }
6449
6450 /* log it */
6451 if ( RT_FAILURE(rc)
6452 && rc != VERR_INTERRUPTED
6453 && rc != VERR_TIMEOUT)
6454 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6455 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6456 else
6457 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6458 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6459 return rc;
6460#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6461 RT_NOREF3(pDevExt, pSession, pReq);
6462 return VERR_NOT_IMPLEMENTED;
6463#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6464}
6465
6466
6467/**
6468 * Implements the logger settings request.
6469 *
6470 * @returns VBox status code.
6471 * @param pReq The request.
6472 */
6473static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
6474{
6475 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
6476 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
6477 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
6478 PRTLOGGER pLogger = NULL;
6479 int rc;
6480
6481 /*
6482 * Some further validation.
6483 */
6484 switch (pReq->u.In.fWhat)
6485 {
6486 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6487 case SUPLOGGERSETTINGS_WHAT_CREATE:
6488 break;
6489
6490 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6491 if (*pszGroup || *pszFlags || *pszDest)
6492 return VERR_INVALID_PARAMETER;
6493 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
6494 return VERR_ACCESS_DENIED;
6495 break;
6496
6497 default:
6498 return VERR_INTERNAL_ERROR;
6499 }
6500
6501 /*
6502 * Get the logger.
6503 */
6504 switch (pReq->u.In.fWhich)
6505 {
6506 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6507 pLogger = RTLogGetDefaultInstance();
6508 break;
6509
6510 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6511 pLogger = RTLogRelGetDefaultInstance();
6512 break;
6513
6514 default:
6515 return VERR_INTERNAL_ERROR;
6516 }
6517
6518 /*
6519 * Do the job.
6520 */
6521 switch (pReq->u.In.fWhat)
6522 {
6523 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6524 if (pLogger)
6525 {
6526 rc = RTLogFlags(pLogger, pszFlags);
6527 if (RT_SUCCESS(rc))
6528 rc = RTLogGroupSettings(pLogger, pszGroup);
6529 NOREF(pszDest);
6530 }
6531 else
6532 rc = VERR_NOT_FOUND;
6533 break;
6534
6535 case SUPLOGGERSETTINGS_WHAT_CREATE:
6536 {
6537 if (pLogger)
6538 rc = VERR_ALREADY_EXISTS;
6539 else
6540 {
6541 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
6542
6543 rc = RTLogCreate(&pLogger,
6544 0 /* fFlags */,
6545 pszGroup,
6546 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
6547 ? "VBOX_LOG"
6548 : "VBOX_RELEASE_LOG",
6549 RT_ELEMENTS(s_apszGroups),
6550 s_apszGroups,
6551 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
6552 NULL);
6553 if (RT_SUCCESS(rc))
6554 {
6555 rc = RTLogFlags(pLogger, pszFlags);
6556 NOREF(pszDest);
6557 if (RT_SUCCESS(rc))
6558 {
6559 switch (pReq->u.In.fWhich)
6560 {
6561 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6562 pLogger = RTLogSetDefaultInstance(pLogger);
6563 break;
6564 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6565 pLogger = RTLogRelSetDefaultInstance(pLogger);
6566 break;
6567 }
6568 }
6569 RTLogDestroy(pLogger);
6570 }
6571 }
6572 break;
6573 }
6574
6575 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6576 switch (pReq->u.In.fWhich)
6577 {
6578 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6579 pLogger = RTLogSetDefaultInstance(NULL);
6580 break;
6581 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6582 pLogger = RTLogRelSetDefaultInstance(NULL);
6583 break;
6584 }
6585 rc = RTLogDestroy(pLogger);
6586 break;
6587
6588 default:
6589 {
6590 rc = VERR_INTERNAL_ERROR;
6591 break;
6592 }
6593 }
6594
6595 return rc;
6596}
6597
6598
6599/**
6600 * Implements the MSR prober operations.
6601 *
6602 * @returns VBox status code.
6603 * @param pDevExt The device extension.
6604 * @param pReq The request.
6605 */
6606static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
6607{
6608#ifdef SUPDRV_WITH_MSR_PROBER
6609 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
6610 int rc;
6611
6612 switch (pReq->u.In.enmOp)
6613 {
6614 case SUPMSRPROBEROP_READ:
6615 {
6616 uint64_t uValue;
6617 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
6618 if (RT_SUCCESS(rc))
6619 {
6620 pReq->u.Out.uResults.Read.uValue = uValue;
6621 pReq->u.Out.uResults.Read.fGp = false;
6622 }
6623 else if (rc == VERR_ACCESS_DENIED)
6624 {
6625 pReq->u.Out.uResults.Read.uValue = 0;
6626 pReq->u.Out.uResults.Read.fGp = true;
6627 rc = VINF_SUCCESS;
6628 }
6629 break;
6630 }
6631
6632 case SUPMSRPROBEROP_WRITE:
6633 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
6634 if (RT_SUCCESS(rc))
6635 pReq->u.Out.uResults.Write.fGp = false;
6636 else if (rc == VERR_ACCESS_DENIED)
6637 {
6638 pReq->u.Out.uResults.Write.fGp = true;
6639 rc = VINF_SUCCESS;
6640 }
6641 break;
6642
6643 case SUPMSRPROBEROP_MODIFY:
6644 case SUPMSRPROBEROP_MODIFY_FASTER:
6645 rc = supdrvOSMsrProberModify(idCpu, pReq);
6646 break;
6647
6648 default:
6649 return VERR_INVALID_FUNCTION;
6650 }
6651 RT_NOREF1(pDevExt);
6652 return rc;
6653#else
6654 RT_NOREF2(pDevExt, pReq);
6655 return VERR_NOT_IMPLEMENTED;
6656#endif
6657}
6658
6659
6660/**
6661 * Resume built-in keyboard on MacBook Air and Pro hosts.
6662 * If there is no built-in keyboard device, return success anyway.
6663 *
6664 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
6665 */
6666static int supdrvIOCtl_ResumeSuspendedKbds(void)
6667{
6668#if defined(RT_OS_DARWIN)
6669 return supdrvDarwinResumeSuspendedKbds();
6670#else
6671 return VERR_NOT_IMPLEMENTED;
6672#endif
6673}
6674
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette