VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 78351

Last change on this file since 78351 was 77727, checked in by vboxsync, 6 years ago

IPRT,HostDrivers: Fixed some warnings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 239.3 KB
Line 
1/* $Id: SUPDrv.cpp 77727 2019-03-15 14:14:18Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_vmx.h>
68
69#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
70# include "dtrace/SUPDrv.h"
71#else
72# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
73# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
74# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
75# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
76#endif
77
78/*
79 * Logging assignments:
80 * Log - useful stuff, like failures.
81 * LogFlow - program flow, except the really noisy bits.
82 * Log2 - Cleanup.
83 * Log3 - Loader flow noise.
84 * Log4 - Call VMMR0 flow noise.
85 * Log5 - Native yet-to-be-defined noise.
86 * Log6 - Native ioctl flow noise.
87 *
88 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
89 * instantiation in log-vbox.c(pp).
90 */
91
92
93/*********************************************************************************************************************************
94* Defined Constants And Macros *
95*********************************************************************************************************************************/
96/** @def VBOX_SVN_REV
97 * The makefile should define this if it can. */
98#ifndef VBOX_SVN_REV
99# define VBOX_SVN_REV 0
100#endif
101
102/** @ SUPDRV_CHECK_SMAP_SETUP
103 * SMAP check setup. */
104/** @def SUPDRV_CHECK_SMAP_CHECK
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
106 * will be logged and @a a_BadExpr is executed. */
107#if defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)
108# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
109# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
110 do { \
111 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
112 { \
113 RTCCUINTREG fEfl = ASMGetFlags(); \
114 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
115 { /* likely */ } \
116 else \
117 { \
118 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
119 a_BadExpr; \
120 } \
121 } \
122 } while (0)
123#else
124# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
125# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
133static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
134static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
135static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
136static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
137static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
138static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
139static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
140static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
141static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
142static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
143static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
144static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
145static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
146DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
147DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
148static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
149static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
150static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
151static int supdrvIOCtl_ResumeSuspendedKbds(void);
152
153
154/*********************************************************************************************************************************
155* Global Variables *
156*********************************************************************************************************************************/
157/**
158 * Array of the R0 SUP API.
159 *
160 * While making changes to these exports, make sure to update the IOC
161 * minor version (SUPDRV_IOC_VERSION).
162 *
163 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
164 * produce definition files from which import libraries are generated.
165 * Take care when commenting things and especially with \#ifdef'ing.
166 */
167static SUPFUNC g_aFunctions[] =
168{
169/* SED: START */
170 /* name function */
171 /* Entries with absolute addresses determined at runtime, fixup
172 code makes ugly ASSUMPTIONS about the order here: */
173 { "SUPR0AbsIs64bit", (void *)0 },
174 { "SUPR0Abs64bitKernelCS", (void *)0 },
175 { "SUPR0Abs64bitKernelSS", (void *)0 },
176 { "SUPR0Abs64bitKernelDS", (void *)0 },
177 { "SUPR0AbsKernelCS", (void *)0 },
178 { "SUPR0AbsKernelSS", (void *)0 },
179 { "SUPR0AbsKernelDS", (void *)0 },
180 { "SUPR0AbsKernelES", (void *)0 },
181 { "SUPR0AbsKernelFS", (void *)0 },
182 { "SUPR0AbsKernelGS", (void *)0 },
183 /* Normal function pointers: */
184 { "g_pSUPGlobalInfoPage", (void *)&g_pSUPGlobalInfoPage }, /* SED: DATA */
185 { "SUPGetGIP", (void *)(uintptr_t)SUPGetGIP },
186 { "SUPReadTscWithDelta", (void *)(uintptr_t)SUPReadTscWithDelta },
187 { "SUPGetTscDeltaSlow", (void *)(uintptr_t)SUPGetTscDeltaSlow },
188 { "SUPGetCpuHzFromGipForAsyncMode", (void *)(uintptr_t)SUPGetCpuHzFromGipForAsyncMode },
189 { "SUPIsTscFreqCompatible", (void *)(uintptr_t)SUPIsTscFreqCompatible },
190 { "SUPIsTscFreqCompatibleEx", (void *)(uintptr_t)SUPIsTscFreqCompatibleEx },
191 { "SUPR0BadContext", (void *)(uintptr_t)SUPR0BadContext },
192 { "SUPR0ComponentDeregisterFactory", (void *)(uintptr_t)SUPR0ComponentDeregisterFactory },
193 { "SUPR0ComponentQueryFactory", (void *)(uintptr_t)SUPR0ComponentQueryFactory },
194 { "SUPR0ComponentRegisterFactory", (void *)(uintptr_t)SUPR0ComponentRegisterFactory },
195 { "SUPR0ContAlloc", (void *)(uintptr_t)SUPR0ContAlloc },
196 { "SUPR0ContFree", (void *)(uintptr_t)SUPR0ContFree },
197 { "SUPR0ChangeCR4", (void *)(uintptr_t)SUPR0ChangeCR4 },
198 { "SUPR0EnableVTx", (void *)(uintptr_t)SUPR0EnableVTx },
199 { "SUPR0SuspendVTxOnCpu", (void *)(uintptr_t)SUPR0SuspendVTxOnCpu },
200 { "SUPR0ResumeVTxOnCpu", (void *)(uintptr_t)SUPR0ResumeVTxOnCpu },
201 { "SUPR0GetCurrentGdtRw", (void *)(uintptr_t)SUPR0GetCurrentGdtRw },
202 { "SUPR0GetKernelFeatures", (void *)(uintptr_t)SUPR0GetKernelFeatures },
203 { "SUPR0GetHwvirtMsrs", (void *)(uintptr_t)SUPR0GetHwvirtMsrs },
204 { "SUPR0GetPagingMode", (void *)(uintptr_t)SUPR0GetPagingMode },
205 { "SUPR0GetSvmUsability", (void *)(uintptr_t)SUPR0GetSvmUsability },
206 { "SUPR0GetVTSupport", (void *)(uintptr_t)SUPR0GetVTSupport },
207 { "SUPR0GetVmxUsability", (void *)(uintptr_t)SUPR0GetVmxUsability },
208 { "SUPR0GetRawModeUsability", (void *)(uintptr_t)SUPR0GetRawModeUsability },
209 { "SUPR0LockMem", (void *)(uintptr_t)SUPR0LockMem },
210 { "SUPR0LowAlloc", (void *)(uintptr_t)SUPR0LowAlloc },
211 { "SUPR0LowFree", (void *)(uintptr_t)SUPR0LowFree },
212 { "SUPR0MemAlloc", (void *)(uintptr_t)SUPR0MemAlloc },
213 { "SUPR0MemFree", (void *)(uintptr_t)SUPR0MemFree },
214 { "SUPR0MemGetPhys", (void *)(uintptr_t)SUPR0MemGetPhys },
215 { "SUPR0ObjAddRef", (void *)(uintptr_t)SUPR0ObjAddRef },
216 { "SUPR0ObjAddRefEx", (void *)(uintptr_t)SUPR0ObjAddRefEx },
217 { "SUPR0ObjRegister", (void *)(uintptr_t)SUPR0ObjRegister },
218 { "SUPR0ObjRelease", (void *)(uintptr_t)SUPR0ObjRelease },
219 { "SUPR0ObjVerifyAccess", (void *)(uintptr_t)SUPR0ObjVerifyAccess },
220 { "SUPR0PageAllocEx", (void *)(uintptr_t)SUPR0PageAllocEx },
221 { "SUPR0PageFree", (void *)(uintptr_t)SUPR0PageFree },
222 { "SUPR0Printf", (void *)(uintptr_t)SUPR0Printf },
223 { "SUPR0GetSessionGVM", (void *)(uintptr_t)SUPR0GetSessionGVM },
224 { "SUPR0GetSessionVM", (void *)(uintptr_t)SUPR0GetSessionVM },
225 { "SUPR0SetSessionVM", (void *)(uintptr_t)SUPR0SetSessionVM },
226 { "SUPR0TscDeltaMeasureBySetIndex", (void *)(uintptr_t)SUPR0TscDeltaMeasureBySetIndex },
227 { "SUPR0TracerDeregisterDrv", (void *)(uintptr_t)SUPR0TracerDeregisterDrv },
228 { "SUPR0TracerDeregisterImpl", (void *)(uintptr_t)SUPR0TracerDeregisterImpl },
229 { "SUPR0TracerFireProbe", (void *)(uintptr_t)SUPR0TracerFireProbe },
230 { "SUPR0TracerRegisterDrv", (void *)(uintptr_t)SUPR0TracerRegisterDrv },
231 { "SUPR0TracerRegisterImpl", (void *)(uintptr_t)SUPR0TracerRegisterImpl },
232 { "SUPR0TracerRegisterModule", (void *)(uintptr_t)SUPR0TracerRegisterModule },
233 { "SUPR0TracerUmodProbeFire", (void *)(uintptr_t)SUPR0TracerUmodProbeFire },
234 { "SUPR0UnlockMem", (void *)(uintptr_t)SUPR0UnlockMem },
235#ifdef RT_OS_WINDOWS
236 { "SUPR0IoCtlSetupForHandle", (void *)(uintptr_t)SUPR0IoCtlSetupForHandle }, /* only-windows */
237 { "SUPR0IoCtlPerform", (void *)(uintptr_t)SUPR0IoCtlPerform }, /* only-windows */
238 { "SUPR0IoCtlCleanup", (void *)(uintptr_t)SUPR0IoCtlCleanup }, /* only-windows */
239#endif
240 { "SUPSemEventClose", (void *)(uintptr_t)SUPSemEventClose },
241 { "SUPSemEventCreate", (void *)(uintptr_t)SUPSemEventCreate },
242 { "SUPSemEventGetResolution", (void *)(uintptr_t)SUPSemEventGetResolution },
243 { "SUPSemEventMultiClose", (void *)(uintptr_t)SUPSemEventMultiClose },
244 { "SUPSemEventMultiCreate", (void *)(uintptr_t)SUPSemEventMultiCreate },
245 { "SUPSemEventMultiGetResolution", (void *)(uintptr_t)SUPSemEventMultiGetResolution },
246 { "SUPSemEventMultiReset", (void *)(uintptr_t)SUPSemEventMultiReset },
247 { "SUPSemEventMultiSignal", (void *)(uintptr_t)SUPSemEventMultiSignal },
248 { "SUPSemEventMultiWait", (void *)(uintptr_t)SUPSemEventMultiWait },
249 { "SUPSemEventMultiWaitNoResume", (void *)(uintptr_t)SUPSemEventMultiWaitNoResume },
250 { "SUPSemEventMultiWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsAbsIntr },
251 { "SUPSemEventMultiWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsRelIntr },
252 { "SUPSemEventSignal", (void *)(uintptr_t)SUPSemEventSignal },
253 { "SUPSemEventWait", (void *)(uintptr_t)SUPSemEventWait },
254 { "SUPSemEventWaitNoResume", (void *)(uintptr_t)SUPSemEventWaitNoResume },
255 { "SUPSemEventWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventWaitNsAbsIntr },
256 { "SUPSemEventWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventWaitNsRelIntr },
257
258 { "RTAssertAreQuiet", (void *)(uintptr_t)RTAssertAreQuiet },
259 { "RTAssertMayPanic", (void *)(uintptr_t)RTAssertMayPanic },
260 { "RTAssertMsg1", (void *)(uintptr_t)RTAssertMsg1 },
261 { "RTAssertMsg2AddV", (void *)(uintptr_t)RTAssertMsg2AddV },
262 { "RTAssertMsg2V", (void *)(uintptr_t)RTAssertMsg2V },
263 { "RTAssertSetMayPanic", (void *)(uintptr_t)RTAssertSetMayPanic },
264 { "RTAssertSetQuiet", (void *)(uintptr_t)RTAssertSetQuiet },
265 { "RTCrc32", (void *)(uintptr_t)RTCrc32 },
266 { "RTCrc32Finish", (void *)(uintptr_t)RTCrc32Finish },
267 { "RTCrc32Process", (void *)(uintptr_t)RTCrc32Process },
268 { "RTCrc32Start", (void *)(uintptr_t)RTCrc32Start },
269 { "RTErrConvertFromErrno", (void *)(uintptr_t)RTErrConvertFromErrno },
270 { "RTErrConvertToErrno", (void *)(uintptr_t)RTErrConvertToErrno },
271 { "RTHandleTableAllocWithCtx", (void *)(uintptr_t)RTHandleTableAllocWithCtx },
272 { "RTHandleTableCreate", (void *)(uintptr_t)RTHandleTableCreate },
273 { "RTHandleTableCreateEx", (void *)(uintptr_t)RTHandleTableCreateEx },
274 { "RTHandleTableDestroy", (void *)(uintptr_t)RTHandleTableDestroy },
275 { "RTHandleTableFreeWithCtx", (void *)(uintptr_t)RTHandleTableFreeWithCtx },
276 { "RTHandleTableLookupWithCtx", (void *)(uintptr_t)RTHandleTableLookupWithCtx },
277 { "RTLogDefaultInstance", (void *)(uintptr_t)RTLogDefaultInstance },
278 { "RTLogDefaultInstanceEx", (void *)(uintptr_t)RTLogDefaultInstanceEx },
279 { "RTLogGetDefaultInstance", (void *)(uintptr_t)RTLogGetDefaultInstance },
280 { "RTLogGetDefaultInstanceEx", (void *)(uintptr_t)RTLogGetDefaultInstanceEx },
281 { "SUPR0GetDefaultLogInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogInstanceEx },
282 { "RTLogLoggerExV", (void *)(uintptr_t)RTLogLoggerExV },
283 { "RTLogPrintfV", (void *)(uintptr_t)RTLogPrintfV },
284 { "RTLogRelGetDefaultInstance", (void *)(uintptr_t)RTLogRelGetDefaultInstance },
285 { "RTLogRelGetDefaultInstanceEx", (void *)(uintptr_t)RTLogRelGetDefaultInstanceEx },
286 { "SUPR0GetDefaultLogRelInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogRelInstanceEx },
287 { "RTLogSetDefaultInstanceThread", (void *)(uintptr_t)RTLogSetDefaultInstanceThread },
288 { "RTMemAllocExTag", (void *)(uintptr_t)RTMemAllocExTag },
289 { "RTMemAllocTag", (void *)(uintptr_t)RTMemAllocTag },
290 { "RTMemAllocVarTag", (void *)(uintptr_t)RTMemAllocVarTag },
291 { "RTMemAllocZTag", (void *)(uintptr_t)RTMemAllocZTag },
292 { "RTMemAllocZVarTag", (void *)(uintptr_t)RTMemAllocZVarTag },
293 { "RTMemDupExTag", (void *)(uintptr_t)RTMemDupExTag },
294 { "RTMemDupTag", (void *)(uintptr_t)RTMemDupTag },
295 { "RTMemFree", (void *)(uintptr_t)RTMemFree },
296 { "RTMemFreeEx", (void *)(uintptr_t)RTMemFreeEx },
297 { "RTMemReallocTag", (void *)(uintptr_t)RTMemReallocTag },
298 { "RTMpCpuId", (void *)(uintptr_t)RTMpCpuId },
299 { "RTMpCpuIdFromSetIndex", (void *)(uintptr_t)RTMpCpuIdFromSetIndex },
300 { "RTMpCpuIdToSetIndex", (void *)(uintptr_t)RTMpCpuIdToSetIndex },
301 { "RTMpCurSetIndex", (void *)(uintptr_t)RTMpCurSetIndex },
302 { "RTMpCurSetIndexAndId", (void *)(uintptr_t)RTMpCurSetIndexAndId },
303 { "RTMpGetArraySize", (void *)(uintptr_t)RTMpGetArraySize },
304 { "RTMpGetCount", (void *)(uintptr_t)RTMpGetCount },
305 { "RTMpGetMaxCpuId", (void *)(uintptr_t)RTMpGetMaxCpuId },
306 { "RTMpGetOnlineCount", (void *)(uintptr_t)RTMpGetOnlineCount },
307 { "RTMpGetOnlineSet", (void *)(uintptr_t)RTMpGetOnlineSet },
308 { "RTMpGetSet", (void *)(uintptr_t)RTMpGetSet },
309 { "RTMpIsCpuOnline", (void *)(uintptr_t)RTMpIsCpuOnline },
310 { "RTMpIsCpuPossible", (void *)(uintptr_t)RTMpIsCpuPossible },
311 { "RTMpIsCpuWorkPending", (void *)(uintptr_t)RTMpIsCpuWorkPending },
312 { "RTMpNotificationDeregister", (void *)(uintptr_t)RTMpNotificationDeregister },
313 { "RTMpNotificationRegister", (void *)(uintptr_t)RTMpNotificationRegister },
314 { "RTMpOnAll", (void *)(uintptr_t)RTMpOnAll },
315 { "RTMpOnOthers", (void *)(uintptr_t)RTMpOnOthers },
316 { "RTMpOnSpecific", (void *)(uintptr_t)RTMpOnSpecific },
317 { "RTMpPokeCpu", (void *)(uintptr_t)RTMpPokeCpu },
318 { "RTNetIPv4AddDataChecksum", (void *)(uintptr_t)RTNetIPv4AddDataChecksum },
319 { "RTNetIPv4AddTCPChecksum", (void *)(uintptr_t)RTNetIPv4AddTCPChecksum },
320 { "RTNetIPv4AddUDPChecksum", (void *)(uintptr_t)RTNetIPv4AddUDPChecksum },
321 { "RTNetIPv4FinalizeChecksum", (void *)(uintptr_t)RTNetIPv4FinalizeChecksum },
322 { "RTNetIPv4HdrChecksum", (void *)(uintptr_t)RTNetIPv4HdrChecksum },
323 { "RTNetIPv4IsDHCPValid", (void *)(uintptr_t)RTNetIPv4IsDHCPValid },
324 { "RTNetIPv4IsHdrValid", (void *)(uintptr_t)RTNetIPv4IsHdrValid },
325 { "RTNetIPv4IsTCPSizeValid", (void *)(uintptr_t)RTNetIPv4IsTCPSizeValid },
326 { "RTNetIPv4IsTCPValid", (void *)(uintptr_t)RTNetIPv4IsTCPValid },
327 { "RTNetIPv4IsUDPSizeValid", (void *)(uintptr_t)RTNetIPv4IsUDPSizeValid },
328 { "RTNetIPv4IsUDPValid", (void *)(uintptr_t)RTNetIPv4IsUDPValid },
329 { "RTNetIPv4PseudoChecksum", (void *)(uintptr_t)RTNetIPv4PseudoChecksum },
330 { "RTNetIPv4PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv4PseudoChecksumBits },
331 { "RTNetIPv4TCPChecksum", (void *)(uintptr_t)RTNetIPv4TCPChecksum },
332 { "RTNetIPv4UDPChecksum", (void *)(uintptr_t)RTNetIPv4UDPChecksum },
333 { "RTNetIPv6PseudoChecksum", (void *)(uintptr_t)RTNetIPv6PseudoChecksum },
334 { "RTNetIPv6PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv6PseudoChecksumBits },
335 { "RTNetIPv6PseudoChecksumEx", (void *)(uintptr_t)RTNetIPv6PseudoChecksumEx },
336 { "RTNetTCPChecksum", (void *)(uintptr_t)RTNetTCPChecksum },
337 { "RTNetUDPChecksum", (void *)(uintptr_t)RTNetUDPChecksum },
338 { "RTPowerNotificationDeregister", (void *)(uintptr_t)RTPowerNotificationDeregister },
339 { "RTPowerNotificationRegister", (void *)(uintptr_t)RTPowerNotificationRegister },
340 { "RTProcSelf", (void *)(uintptr_t)RTProcSelf },
341 { "RTR0AssertPanicSystem", (void *)(uintptr_t)RTR0AssertPanicSystem },
342#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
343 { "RTR0DbgKrnlInfoOpen", (void *)(uintptr_t)RTR0DbgKrnlInfoOpen }, /* only-darwin, only-solaris, only-windows */
344 { "RTR0DbgKrnlInfoQueryMember", (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember }, /* only-darwin, only-solaris, only-windows */
345# if defined(RT_OS_SOLARIS)
346 { "RTR0DbgKrnlInfoQuerySize", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySize }, /* only-solaris */
347# endif
348 { "RTR0DbgKrnlInfoQuerySymbol", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol }, /* only-darwin, only-solaris, only-windows */
349 { "RTR0DbgKrnlInfoRelease", (void *)(uintptr_t)RTR0DbgKrnlInfoRelease }, /* only-darwin, only-solaris, only-windows */
350 { "RTR0DbgKrnlInfoRetain", (void *)(uintptr_t)RTR0DbgKrnlInfoRetain }, /* only-darwin, only-solaris, only-windows */
351#endif
352 { "RTR0MemAreKrnlAndUsrDifferent", (void *)(uintptr_t)RTR0MemAreKrnlAndUsrDifferent },
353 { "RTR0MemKernelIsValidAddr", (void *)(uintptr_t)RTR0MemKernelIsValidAddr },
354 { "RTR0MemKernelCopyFrom", (void *)(uintptr_t)RTR0MemKernelCopyFrom },
355 { "RTR0MemKernelCopyTo", (void *)(uintptr_t)RTR0MemKernelCopyTo },
356 { "RTR0MemObjAddress", (void *)(uintptr_t)RTR0MemObjAddress },
357 { "RTR0MemObjAddressR3", (void *)(uintptr_t)RTR0MemObjAddressR3 },
358 { "RTR0MemObjAllocContTag", (void *)(uintptr_t)RTR0MemObjAllocContTag },
359 { "RTR0MemObjAllocLowTag", (void *)(uintptr_t)RTR0MemObjAllocLowTag },
360 { "RTR0MemObjAllocPageTag", (void *)(uintptr_t)RTR0MemObjAllocPageTag },
361 { "RTR0MemObjAllocPhysExTag", (void *)(uintptr_t)RTR0MemObjAllocPhysExTag },
362 { "RTR0MemObjAllocPhysNCTag", (void *)(uintptr_t)RTR0MemObjAllocPhysNCTag },
363 { "RTR0MemObjAllocPhysTag", (void *)(uintptr_t)RTR0MemObjAllocPhysTag },
364 { "RTR0MemObjEnterPhysTag", (void *)(uintptr_t)RTR0MemObjEnterPhysTag },
365 { "RTR0MemObjFree", (void *)(uintptr_t)RTR0MemObjFree },
366 { "RTR0MemObjGetPagePhysAddr", (void *)(uintptr_t)RTR0MemObjGetPagePhysAddr },
367 { "RTR0MemObjIsMapping", (void *)(uintptr_t)RTR0MemObjIsMapping },
368 { "RTR0MemObjLockUserTag", (void *)(uintptr_t)RTR0MemObjLockUserTag },
369 { "RTR0MemObjMapKernelExTag", (void *)(uintptr_t)RTR0MemObjMapKernelExTag },
370 { "RTR0MemObjMapKernelTag", (void *)(uintptr_t)RTR0MemObjMapKernelTag },
371 { "RTR0MemObjMapUserTag", (void *)(uintptr_t)RTR0MemObjMapUserTag },
372 { "RTR0MemObjProtect", (void *)(uintptr_t)RTR0MemObjProtect },
373 { "RTR0MemObjSize", (void *)(uintptr_t)RTR0MemObjSize },
374 { "RTR0MemUserCopyFrom", (void *)(uintptr_t)RTR0MemUserCopyFrom },
375 { "RTR0MemUserCopyTo", (void *)(uintptr_t)RTR0MemUserCopyTo },
376 { "RTR0MemUserIsValidAddr", (void *)(uintptr_t)RTR0MemUserIsValidAddr },
377 { "RTR0ProcHandleSelf", (void *)(uintptr_t)RTR0ProcHandleSelf },
378 { "RTSemEventCreate", (void *)(uintptr_t)RTSemEventCreate },
379 { "RTSemEventDestroy", (void *)(uintptr_t)RTSemEventDestroy },
380 { "RTSemEventGetResolution", (void *)(uintptr_t)RTSemEventGetResolution },
381 { "RTSemEventMultiCreate", (void *)(uintptr_t)RTSemEventMultiCreate },
382 { "RTSemEventMultiDestroy", (void *)(uintptr_t)RTSemEventMultiDestroy },
383 { "RTSemEventMultiGetResolution", (void *)(uintptr_t)RTSemEventMultiGetResolution },
384 { "RTSemEventMultiReset", (void *)(uintptr_t)RTSemEventMultiReset },
385 { "RTSemEventMultiSignal", (void *)(uintptr_t)RTSemEventMultiSignal },
386 { "RTSemEventMultiWait", (void *)(uintptr_t)RTSemEventMultiWait },
387 { "RTSemEventMultiWaitEx", (void *)(uintptr_t)RTSemEventMultiWaitEx },
388 { "RTSemEventMultiWaitExDebug", (void *)(uintptr_t)RTSemEventMultiWaitExDebug },
389 { "RTSemEventMultiWaitNoResume", (void *)(uintptr_t)RTSemEventMultiWaitNoResume },
390 { "RTSemEventSignal", (void *)(uintptr_t)RTSemEventSignal },
391 { "RTSemEventWait", (void *)(uintptr_t)RTSemEventWait },
392 { "RTSemEventWaitEx", (void *)(uintptr_t)RTSemEventWaitEx },
393 { "RTSemEventWaitExDebug", (void *)(uintptr_t)RTSemEventWaitExDebug },
394 { "RTSemEventWaitNoResume", (void *)(uintptr_t)RTSemEventWaitNoResume },
395 { "RTSemFastMutexCreate", (void *)(uintptr_t)RTSemFastMutexCreate },
396 { "RTSemFastMutexDestroy", (void *)(uintptr_t)RTSemFastMutexDestroy },
397 { "RTSemFastMutexRelease", (void *)(uintptr_t)RTSemFastMutexRelease },
398 { "RTSemFastMutexRequest", (void *)(uintptr_t)RTSemFastMutexRequest },
399 { "RTSemMutexCreate", (void *)(uintptr_t)RTSemMutexCreate },
400 { "RTSemMutexDestroy", (void *)(uintptr_t)RTSemMutexDestroy },
401 { "RTSemMutexRelease", (void *)(uintptr_t)RTSemMutexRelease },
402 { "RTSemMutexRequest", (void *)(uintptr_t)RTSemMutexRequest },
403 { "RTSemMutexRequestDebug", (void *)(uintptr_t)RTSemMutexRequestDebug },
404 { "RTSemMutexRequestNoResume", (void *)(uintptr_t)RTSemMutexRequestNoResume },
405 { "RTSemMutexRequestNoResumeDebug", (void *)(uintptr_t)RTSemMutexRequestNoResumeDebug },
406 { "RTSpinlockAcquire", (void *)(uintptr_t)RTSpinlockAcquire },
407 { "RTSpinlockCreate", (void *)(uintptr_t)RTSpinlockCreate },
408 { "RTSpinlockDestroy", (void *)(uintptr_t)RTSpinlockDestroy },
409 { "RTSpinlockRelease", (void *)(uintptr_t)RTSpinlockRelease },
410 { "RTStrCopy", (void *)(uintptr_t)RTStrCopy },
411 { "RTStrDupTag", (void *)(uintptr_t)RTStrDupTag },
412 { "RTStrFormat", (void *)(uintptr_t)RTStrFormat },
413 { "RTStrFormatNumber", (void *)(uintptr_t)RTStrFormatNumber },
414 { "RTStrFormatTypeDeregister", (void *)(uintptr_t)RTStrFormatTypeDeregister },
415 { "RTStrFormatTypeRegister", (void *)(uintptr_t)RTStrFormatTypeRegister },
416 { "RTStrFormatTypeSetUser", (void *)(uintptr_t)RTStrFormatTypeSetUser },
417 { "RTStrFormatV", (void *)(uintptr_t)RTStrFormatV },
418 { "RTStrFree", (void *)(uintptr_t)RTStrFree },
419 { "RTStrNCmp", (void *)(uintptr_t)RTStrNCmp },
420 { "RTStrPrintf", (void *)(uintptr_t)RTStrPrintf },
421 { "RTStrPrintfEx", (void *)(uintptr_t)RTStrPrintfEx },
422 { "RTStrPrintfExV", (void *)(uintptr_t)RTStrPrintfExV },
423 { "RTStrPrintfV", (void *)(uintptr_t)RTStrPrintfV },
424 { "RTThreadCreate", (void *)(uintptr_t)RTThreadCreate },
425 { "RTThreadCtxHookIsEnabled", (void *)(uintptr_t)RTThreadCtxHookIsEnabled },
426 { "RTThreadCtxHookCreate", (void *)(uintptr_t)RTThreadCtxHookCreate },
427 { "RTThreadCtxHookDestroy", (void *)(uintptr_t)RTThreadCtxHookDestroy },
428 { "RTThreadCtxHookDisable", (void *)(uintptr_t)RTThreadCtxHookDisable },
429 { "RTThreadCtxHookEnable", (void *)(uintptr_t)RTThreadCtxHookEnable },
430 { "RTThreadGetName", (void *)(uintptr_t)RTThreadGetName },
431 { "RTThreadGetNative", (void *)(uintptr_t)RTThreadGetNative },
432 { "RTThreadGetType", (void *)(uintptr_t)RTThreadGetType },
433 { "RTThreadIsInInterrupt", (void *)(uintptr_t)RTThreadIsInInterrupt },
434 { "RTThreadNativeSelf", (void *)(uintptr_t)RTThreadNativeSelf },
435 { "RTThreadPreemptDisable", (void *)(uintptr_t)RTThreadPreemptDisable },
436 { "RTThreadPreemptIsEnabled", (void *)(uintptr_t)RTThreadPreemptIsEnabled },
437 { "RTThreadPreemptIsPending", (void *)(uintptr_t)RTThreadPreemptIsPending },
438 { "RTThreadPreemptIsPendingTrusty", (void *)(uintptr_t)RTThreadPreemptIsPendingTrusty },
439 { "RTThreadPreemptIsPossible", (void *)(uintptr_t)RTThreadPreemptIsPossible },
440 { "RTThreadPreemptRestore", (void *)(uintptr_t)RTThreadPreemptRestore },
441 { "RTThreadSelf", (void *)(uintptr_t)RTThreadSelf },
442 { "RTThreadSelfName", (void *)(uintptr_t)RTThreadSelfName },
443 { "RTThreadSleep", (void *)(uintptr_t)RTThreadSleep },
444 { "RTThreadUserReset", (void *)(uintptr_t)RTThreadUserReset },
445 { "RTThreadUserSignal", (void *)(uintptr_t)RTThreadUserSignal },
446 { "RTThreadUserWait", (void *)(uintptr_t)RTThreadUserWait },
447 { "RTThreadUserWaitNoResume", (void *)(uintptr_t)RTThreadUserWaitNoResume },
448 { "RTThreadWait", (void *)(uintptr_t)RTThreadWait },
449 { "RTThreadWaitNoResume", (void *)(uintptr_t)RTThreadWaitNoResume },
450 { "RTThreadYield", (void *)(uintptr_t)RTThreadYield },
451 { "RTTimeMilliTS", (void *)(uintptr_t)RTTimeMilliTS },
452 { "RTTimeNanoTS", (void *)(uintptr_t)RTTimeNanoTS },
453 { "RTTimeNow", (void *)(uintptr_t)RTTimeNow },
454 { "RTTimerCanDoHighResolution", (void *)(uintptr_t)RTTimerCanDoHighResolution },
455 { "RTTimerChangeInterval", (void *)(uintptr_t)RTTimerChangeInterval },
456 { "RTTimerCreate", (void *)(uintptr_t)RTTimerCreate },
457 { "RTTimerCreateEx", (void *)(uintptr_t)RTTimerCreateEx },
458 { "RTTimerDestroy", (void *)(uintptr_t)RTTimerDestroy },
459 { "RTTimerGetSystemGranularity", (void *)(uintptr_t)RTTimerGetSystemGranularity },
460 { "RTTimerReleaseSystemGranularity", (void *)(uintptr_t)RTTimerReleaseSystemGranularity },
461 { "RTTimerRequestSystemGranularity", (void *)(uintptr_t)RTTimerRequestSystemGranularity },
462 { "RTTimerStart", (void *)(uintptr_t)RTTimerStart },
463 { "RTTimerStop", (void *)(uintptr_t)RTTimerStop },
464 { "RTTimeSystemMilliTS", (void *)(uintptr_t)RTTimeSystemMilliTS },
465 { "RTTimeSystemNanoTS", (void *)(uintptr_t)RTTimeSystemNanoTS },
466 { "RTUuidCompare", (void *)(uintptr_t)RTUuidCompare },
467 { "RTUuidCompareStr", (void *)(uintptr_t)RTUuidCompareStr },
468 { "RTUuidFromStr", (void *)(uintptr_t)RTUuidFromStr },
469/* SED: END */
470};
471
472#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
473/**
474 * Drag in the rest of IRPT since we share it with the
475 * rest of the kernel modules on darwin.
476 */
477PFNRT g_apfnVBoxDrvIPRTDeps[] =
478{
479 /* VBoxNetAdp */
480 (PFNRT)RTRandBytes,
481 /* VBoxUSB */
482 (PFNRT)RTPathStripFilename,
483#if !defined(RT_OS_FREEBSD)
484 (PFNRT)RTHandleTableAlloc,
485 (PFNRT)RTStrPurgeEncoding,
486#endif
487 NULL
488};
489#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
490
491/** Hardware-virtualization MSRs. */
492static SUPHWVIRTMSRS g_HwvirtMsrs;
493/** Whether the hardware-virtualization MSRs are cached. */
494static bool g_fHwvirtMsrsCached;
495
496
497/**
498 * Initializes the device extentsion structure.
499 *
500 * @returns IPRT status code.
501 * @param pDevExt The device extension to initialize.
502 * @param cbSession The size of the session structure. The size of
503 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
504 * defined because we're skipping the OS specific members
505 * then.
506 */
507int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
508{
509 int rc;
510
511#ifdef SUPDRV_WITH_RELEASE_LOGGER
512 /*
513 * Create the release log.
514 */
515 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
516 PRTLOGGER pRelLogger;
517 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
518 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
519 if (RT_SUCCESS(rc))
520 RTLogRelSetDefaultInstance(pRelLogger);
521 /** @todo Add native hook for getting logger config parameters and setting
522 * them. On linux we should use the module parameter stuff... */
523#endif
524
525#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
526 /*
527 * Require SSE2 to be present.
528 */
529 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
530 {
531 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
532 return VERR_UNSUPPORTED_CPU;
533 }
534#endif
535
536 /*
537 * Initialize it.
538 */
539 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
540 pDevExt->Spinlock = NIL_RTSPINLOCK;
541 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
542 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
543#ifdef SUPDRV_USE_MUTEX_FOR_LDR
544 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
545#else
546 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
547#endif
548#ifdef SUPDRV_USE_MUTEX_FOR_GIP
549 pDevExt->mtxGip = NIL_RTSEMMUTEX;
550 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
551#else
552 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
553 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
554#endif
555
556 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
557 if (RT_SUCCESS(rc))
558 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
559 if (RT_SUCCESS(rc))
560 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
561
562 if (RT_SUCCESS(rc))
563#ifdef SUPDRV_USE_MUTEX_FOR_LDR
564 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
565#else
566 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
567#endif
568 if (RT_SUCCESS(rc))
569#ifdef SUPDRV_USE_MUTEX_FOR_GIP
570 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
571#else
572 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
573#endif
574 if (RT_SUCCESS(rc))
575 {
576 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
577 if (RT_SUCCESS(rc))
578 {
579#ifdef SUPDRV_USE_MUTEX_FOR_GIP
580 rc = RTSemMutexCreate(&pDevExt->mtxGip);
581#else
582 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
583#endif
584 if (RT_SUCCESS(rc))
585 {
586 rc = supdrvGipCreate(pDevExt);
587 if (RT_SUCCESS(rc))
588 {
589 rc = supdrvTracerInit(pDevExt);
590 if (RT_SUCCESS(rc))
591 {
592 pDevExt->pLdrInitImage = NULL;
593 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
594 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
595 pDevExt->cbSession = (uint32_t)cbSession;
596
597 /*
598 * Fixup the absolute symbols.
599 *
600 * Because of the table indexing assumptions we'll have a little #ifdef orgy
601 * here rather than distributing this to OS specific files. At least for now.
602 */
603#ifdef RT_OS_DARWIN
604# if ARCH_BITS == 32
605 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
606 {
607 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
608 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
609 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
610 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
611 }
612 else
613 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
614 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
615 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
616 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
617 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
618 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
619 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
620# else /* 64-bit darwin: */
621 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
622 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
623 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
624 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
625 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
626 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
627 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
628 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
629 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
630 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
631
632# endif
633#else /* !RT_OS_DARWIN */
634# if ARCH_BITS == 64
635 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
636 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
637 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
638 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
639# else
640 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
641# endif
642 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
643 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
644 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
645 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
646 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
647 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
648#endif /* !RT_OS_DARWIN */
649 return VINF_SUCCESS;
650 }
651
652 supdrvGipDestroy(pDevExt);
653 }
654
655#ifdef SUPDRV_USE_MUTEX_FOR_GIP
656 RTSemMutexDestroy(pDevExt->mtxGip);
657 pDevExt->mtxGip = NIL_RTSEMMUTEX;
658#else
659 RTSemFastMutexDestroy(pDevExt->mtxGip);
660 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
661#endif
662 }
663 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
664 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
665 }
666 }
667
668#ifdef SUPDRV_USE_MUTEX_FOR_GIP
669 RTSemMutexDestroy(pDevExt->mtxTscDelta);
670 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
671#else
672 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
673 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
674#endif
675#ifdef SUPDRV_USE_MUTEX_FOR_LDR
676 RTSemMutexDestroy(pDevExt->mtxLdr);
677 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
678#else
679 RTSemFastMutexDestroy(pDevExt->mtxLdr);
680 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
681#endif
682 RTSpinlockDestroy(pDevExt->Spinlock);
683 pDevExt->Spinlock = NIL_RTSPINLOCK;
684 RTSpinlockDestroy(pDevExt->hGipSpinlock);
685 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
686 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
687 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
688
689#ifdef SUPDRV_WITH_RELEASE_LOGGER
690 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
691 RTLogDestroy(RTLogSetDefaultInstance(NULL));
692#endif
693
694 return rc;
695}
696
697
698/**
699 * Delete the device extension (e.g. cleanup members).
700 *
701 * @param pDevExt The device extension to delete.
702 */
703void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
704{
705 PSUPDRVOBJ pObj;
706 PSUPDRVUSAGE pUsage;
707
708 /*
709 * Kill mutexes and spinlocks.
710 */
711#ifdef SUPDRV_USE_MUTEX_FOR_GIP
712 RTSemMutexDestroy(pDevExt->mtxGip);
713 pDevExt->mtxGip = NIL_RTSEMMUTEX;
714 RTSemMutexDestroy(pDevExt->mtxTscDelta);
715 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
716#else
717 RTSemFastMutexDestroy(pDevExt->mtxGip);
718 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
719 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
720 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
721#endif
722#ifdef SUPDRV_USE_MUTEX_FOR_LDR
723 RTSemMutexDestroy(pDevExt->mtxLdr);
724 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
725#else
726 RTSemFastMutexDestroy(pDevExt->mtxLdr);
727 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
728#endif
729 RTSpinlockDestroy(pDevExt->Spinlock);
730 pDevExt->Spinlock = NIL_RTSPINLOCK;
731 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
732 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
733 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
734 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
735
736 /*
737 * Free lists.
738 */
739 /* objects. */
740 pObj = pDevExt->pObjs;
741 Assert(!pObj); /* (can trigger on forced unloads) */
742 pDevExt->pObjs = NULL;
743 while (pObj)
744 {
745 void *pvFree = pObj;
746 pObj = pObj->pNext;
747 RTMemFree(pvFree);
748 }
749
750 /* usage records. */
751 pUsage = pDevExt->pUsageFree;
752 pDevExt->pUsageFree = NULL;
753 while (pUsage)
754 {
755 void *pvFree = pUsage;
756 pUsage = pUsage->pNext;
757 RTMemFree(pvFree);
758 }
759
760 /* kill the GIP. */
761 supdrvGipDestroy(pDevExt);
762 RTSpinlockDestroy(pDevExt->hGipSpinlock);
763 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
764
765 supdrvTracerTerm(pDevExt);
766
767#ifdef SUPDRV_WITH_RELEASE_LOGGER
768 /* destroy the loggers. */
769 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
770 RTLogDestroy(RTLogSetDefaultInstance(NULL));
771#endif
772}
773
774
775/**
776 * Create session.
777 *
778 * @returns IPRT status code.
779 * @param pDevExt Device extension.
780 * @param fUser Flag indicating whether this is a user or kernel
781 * session.
782 * @param fUnrestricted Unrestricted access (system) or restricted access
783 * (user)?
784 * @param ppSession Where to store the pointer to the session data.
785 */
786int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
787{
788 int rc;
789 PSUPDRVSESSION pSession;
790
791 if (!SUP_IS_DEVEXT_VALID(pDevExt))
792 return VERR_INVALID_PARAMETER;
793
794 /*
795 * Allocate memory for the session data.
796 */
797 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
798 if (pSession)
799 {
800 /* Initialize session data. */
801 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
802 if (!rc)
803 {
804 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
805 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
806 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
807 if (RT_SUCCESS(rc))
808 {
809 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
810 pSession->pDevExt = pDevExt;
811 pSession->u32Cookie = BIRD_INV;
812 pSession->fUnrestricted = fUnrestricted;
813 /*pSession->fInHashTable = false; */
814 pSession->cRefs = 1;
815 /*pSession->pCommonNextHash = NULL;
816 pSession->ppOsSessionPtr = NULL; */
817 if (fUser)
818 {
819 pSession->Process = RTProcSelf();
820 pSession->R0Process = RTR0ProcHandleSelf();
821 }
822 else
823 {
824 pSession->Process = NIL_RTPROCESS;
825 pSession->R0Process = NIL_RTR0PROCESS;
826 }
827 /*pSession->pLdrUsage = NULL;
828 pSession->pVM = NULL;
829 pSession->pUsage = NULL;
830 pSession->pGip = NULL;
831 pSession->fGipReferenced = false;
832 pSession->Bundle.cUsed = 0; */
833 pSession->Uid = NIL_RTUID;
834 pSession->Gid = NIL_RTGID;
835 /*pSession->uTracerData = 0;*/
836 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
837 RTListInit(&pSession->TpProviders);
838 /*pSession->cTpProviders = 0;*/
839 /*pSession->cTpProbesFiring = 0;*/
840 RTListInit(&pSession->TpUmods);
841 /*RT_ZERO(pSession->apTpLookupTable);*/
842
843 VBOXDRV_SESSION_CREATE(pSession, fUser);
844 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
845 return VINF_SUCCESS;
846 }
847
848 RTSpinlockDestroy(pSession->Spinlock);
849 }
850 RTMemFree(pSession);
851 *ppSession = NULL;
852 Log(("Failed to create spinlock, rc=%d!\n", rc));
853 }
854 else
855 rc = VERR_NO_MEMORY;
856
857 return rc;
858}
859
860
861/**
862 * Cleans up the session in the context of the process to which it belongs, the
863 * caller will free the session and the session spinlock.
864 *
865 * This should normally occur when the session is closed or as the process
866 * exits. Careful reference counting in the OS specfic code makes sure that
867 * there cannot be any races between process/handle cleanup callbacks and
868 * threads doing I/O control calls.
869 *
870 * @param pDevExt The device extension.
871 * @param pSession Session data.
872 */
873static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
874{
875 int rc;
876 PSUPDRVBUNDLE pBundle;
877 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
878
879 Assert(!pSession->fInHashTable);
880 Assert(!pSession->ppOsSessionPtr);
881 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
882 ("R0Process=%p cur=%p; curpid=%u\n",
883 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
884
885 /*
886 * Remove logger instances related to this session.
887 */
888 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
889
890 /*
891 * Destroy the handle table.
892 */
893 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
894 AssertRC(rc);
895 pSession->hHandleTable = NIL_RTHANDLETABLE;
896
897 /*
898 * Release object references made in this session.
899 * In theory there should be noone racing us in this session.
900 */
901 Log2(("release objects - start\n"));
902 if (pSession->pUsage)
903 {
904 PSUPDRVUSAGE pUsage;
905 RTSpinlockAcquire(pDevExt->Spinlock);
906
907 while ((pUsage = pSession->pUsage) != NULL)
908 {
909 PSUPDRVOBJ pObj = pUsage->pObj;
910 pSession->pUsage = pUsage->pNext;
911
912 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
913 if (pUsage->cUsage < pObj->cUsage)
914 {
915 pObj->cUsage -= pUsage->cUsage;
916 RTSpinlockRelease(pDevExt->Spinlock);
917 }
918 else
919 {
920 /* Destroy the object and free the record. */
921 if (pDevExt->pObjs == pObj)
922 pDevExt->pObjs = pObj->pNext;
923 else
924 {
925 PSUPDRVOBJ pObjPrev;
926 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
927 if (pObjPrev->pNext == pObj)
928 {
929 pObjPrev->pNext = pObj->pNext;
930 break;
931 }
932 Assert(pObjPrev);
933 }
934 RTSpinlockRelease(pDevExt->Spinlock);
935
936 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
937 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
938 if (pObj->pfnDestructor)
939 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
940 RTMemFree(pObj);
941 }
942
943 /* free it and continue. */
944 RTMemFree(pUsage);
945
946 RTSpinlockAcquire(pDevExt->Spinlock);
947 }
948
949 RTSpinlockRelease(pDevExt->Spinlock);
950 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
951 }
952 Log2(("release objects - done\n"));
953
954 /*
955 * Make sure the associated VM pointers are NULL.
956 */
957 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
958 {
959 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
960 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
961 pSession->pSessionGVM = NULL;
962 pSession->pSessionVM = NULL;
963 pSession->pFastIoCtrlVM = NULL;
964 }
965
966 /*
967 * Do tracer cleanups related to this session.
968 */
969 Log2(("release tracer stuff - start\n"));
970 supdrvTracerCleanupSession(pDevExt, pSession);
971 Log2(("release tracer stuff - end\n"));
972
973 /*
974 * Release memory allocated in the session.
975 *
976 * We do not serialize this as we assume that the application will
977 * not allocated memory while closing the file handle object.
978 */
979 Log2(("freeing memory:\n"));
980 pBundle = &pSession->Bundle;
981 while (pBundle)
982 {
983 PSUPDRVBUNDLE pToFree;
984 unsigned i;
985
986 /*
987 * Check and unlock all entries in the bundle.
988 */
989 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
990 {
991 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
992 {
993 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
994 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
995 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
996 {
997 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
998 AssertRC(rc); /** @todo figure out how to handle this. */
999 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1000 }
1001 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1002 AssertRC(rc); /** @todo figure out how to handle this. */
1003 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1004 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1005 }
1006 }
1007
1008 /*
1009 * Advance and free previous bundle.
1010 */
1011 pToFree = pBundle;
1012 pBundle = pBundle->pNext;
1013
1014 pToFree->pNext = NULL;
1015 pToFree->cUsed = 0;
1016 if (pToFree != &pSession->Bundle)
1017 RTMemFree(pToFree);
1018 }
1019 Log2(("freeing memory - done\n"));
1020
1021 /*
1022 * Deregister component factories.
1023 */
1024 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1025 Log2(("deregistering component factories:\n"));
1026 if (pDevExt->pComponentFactoryHead)
1027 {
1028 PSUPDRVFACTORYREG pPrev = NULL;
1029 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1030 while (pCur)
1031 {
1032 if (pCur->pSession == pSession)
1033 {
1034 /* unlink it */
1035 PSUPDRVFACTORYREG pNext = pCur->pNext;
1036 if (pPrev)
1037 pPrev->pNext = pNext;
1038 else
1039 pDevExt->pComponentFactoryHead = pNext;
1040
1041 /* free it */
1042 pCur->pNext = NULL;
1043 pCur->pSession = NULL;
1044 pCur->pFactory = NULL;
1045 RTMemFree(pCur);
1046
1047 /* next */
1048 pCur = pNext;
1049 }
1050 else
1051 {
1052 /* next */
1053 pPrev = pCur;
1054 pCur = pCur->pNext;
1055 }
1056 }
1057 }
1058 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1059 Log2(("deregistering component factories - done\n"));
1060
1061 /*
1062 * Loaded images needs to be dereferenced and possibly freed up.
1063 */
1064 supdrvLdrLock(pDevExt);
1065 Log2(("freeing images:\n"));
1066 if (pSession->pLdrUsage)
1067 {
1068 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1069 pSession->pLdrUsage = NULL;
1070 while (pUsage)
1071 {
1072 void *pvFree = pUsage;
1073 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1074 if (pImage->cUsage > pUsage->cUsage)
1075 pImage->cUsage -= pUsage->cUsage;
1076 else
1077 supdrvLdrFree(pDevExt, pImage);
1078 pUsage->pImage = NULL;
1079 pUsage = pUsage->pNext;
1080 RTMemFree(pvFree);
1081 }
1082 }
1083 supdrvLdrUnlock(pDevExt);
1084 Log2(("freeing images - done\n"));
1085
1086 /*
1087 * Unmap the GIP.
1088 */
1089 Log2(("umapping GIP:\n"));
1090 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1091 {
1092 SUPR0GipUnmap(pSession);
1093 pSession->fGipReferenced = 0;
1094 }
1095 Log2(("umapping GIP - done\n"));
1096}
1097
1098
1099/**
1100 * Common code for freeing a session when the reference count reaches zero.
1101 *
1102 * @param pDevExt Device extension.
1103 * @param pSession Session data.
1104 * This data will be freed by this routine.
1105 */
1106static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1107{
1108 VBOXDRV_SESSION_CLOSE(pSession);
1109
1110 /*
1111 * Cleanup the session first.
1112 */
1113 supdrvCleanupSession(pDevExt, pSession);
1114 supdrvOSCleanupSession(pDevExt, pSession);
1115
1116 /*
1117 * Free the rest of the session stuff.
1118 */
1119 RTSpinlockDestroy(pSession->Spinlock);
1120 pSession->Spinlock = NIL_RTSPINLOCK;
1121 pSession->pDevExt = NULL;
1122 RTMemFree(pSession);
1123 LogFlow(("supdrvDestroySession: returns\n"));
1124}
1125
1126
1127/**
1128 * Inserts the session into the global hash table.
1129 *
1130 * @retval VINF_SUCCESS on success.
1131 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1132 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1133 * session (asserted).
1134 * @retval VERR_DUPLICATE if there is already a session for that pid.
1135 *
1136 * @param pDevExt The device extension.
1137 * @param pSession The session.
1138 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1139 * available and used. This will set to point to the
1140 * session while under the protection of the session
1141 * hash table spinlock. It will also be kept in
1142 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1143 * cleanup use.
1144 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1145 */
1146int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1147 void *pvUser)
1148{
1149 PSUPDRVSESSION pCur;
1150 unsigned iHash;
1151
1152 /*
1153 * Validate input.
1154 */
1155 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1156 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1157
1158 /*
1159 * Calculate the hash table index and acquire the spinlock.
1160 */
1161 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1162
1163 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1164
1165 /*
1166 * If there are a collisions, we need to carefully check if we got a
1167 * duplicate. There can only be one open session per process.
1168 */
1169 pCur = pDevExt->apSessionHashTab[iHash];
1170 if (pCur)
1171 {
1172 while (pCur && pCur->Process != pSession->Process)
1173 pCur = pCur->pCommonNextHash;
1174
1175 if (pCur)
1176 {
1177 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1178 if (pCur == pSession)
1179 {
1180 Assert(pSession->fInHashTable);
1181 AssertFailed();
1182 return VERR_WRONG_ORDER;
1183 }
1184 Assert(!pSession->fInHashTable);
1185 if (pCur->R0Process == pSession->R0Process)
1186 return VERR_RESOURCE_IN_USE;
1187 return VERR_DUPLICATE;
1188 }
1189 }
1190 Assert(!pSession->fInHashTable);
1191 Assert(!pSession->ppOsSessionPtr);
1192
1193 /*
1194 * Insert it, doing a callout to the OS specific code in case it has
1195 * anything it wishes to do while we're holding the spinlock.
1196 */
1197 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1198 pDevExt->apSessionHashTab[iHash] = pSession;
1199 pSession->fInHashTable = true;
1200 ASMAtomicIncS32(&pDevExt->cSessions);
1201
1202 pSession->ppOsSessionPtr = ppOsSessionPtr;
1203 if (ppOsSessionPtr)
1204 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1205
1206 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1207
1208 /*
1209 * Retain a reference for the pointer in the session table.
1210 */
1211 ASMAtomicIncU32(&pSession->cRefs);
1212
1213 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1214 return VINF_SUCCESS;
1215}
1216
1217
1218/**
1219 * Removes the session from the global hash table.
1220 *
1221 * @retval VINF_SUCCESS on success.
1222 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1223 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1224 * session (asserted).
1225 *
1226 * @param pDevExt The device extension.
1227 * @param pSession The session. The caller is expected to have a reference
1228 * to this so it won't croak on us when we release the hash
1229 * table reference.
1230 * @param pvUser OS specific context value for the
1231 * supdrvOSSessionHashTabInserted callback.
1232 */
1233int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1234{
1235 PSUPDRVSESSION pCur;
1236 unsigned iHash;
1237 int32_t cRefs;
1238
1239 /*
1240 * Validate input.
1241 */
1242 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1243 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1244
1245 /*
1246 * Calculate the hash table index and acquire the spinlock.
1247 */
1248 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1249
1250 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1251
1252 /*
1253 * Unlink it.
1254 */
1255 pCur = pDevExt->apSessionHashTab[iHash];
1256 if (pCur == pSession)
1257 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1258 else
1259 {
1260 PSUPDRVSESSION pPrev = pCur;
1261 while (pCur && pCur != pSession)
1262 {
1263 pPrev = pCur;
1264 pCur = pCur->pCommonNextHash;
1265 }
1266 if (pCur)
1267 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1268 else
1269 {
1270 Assert(!pSession->fInHashTable);
1271 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1272 return VERR_NOT_FOUND;
1273 }
1274 }
1275
1276 pSession->pCommonNextHash = NULL;
1277 pSession->fInHashTable = false;
1278
1279 ASMAtomicDecS32(&pDevExt->cSessions);
1280
1281 /*
1282 * Clear OS specific session pointer if available and do the OS callback.
1283 */
1284 if (pSession->ppOsSessionPtr)
1285 {
1286 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1287 pSession->ppOsSessionPtr = NULL;
1288 }
1289
1290 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1291
1292 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1293
1294 /*
1295 * Drop the reference the hash table had to the session. This shouldn't
1296 * be the last reference!
1297 */
1298 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1299 Assert(cRefs > 0 && cRefs < _1M);
1300 if (cRefs == 0)
1301 supdrvDestroySession(pDevExt, pSession);
1302
1303 return VINF_SUCCESS;
1304}
1305
1306
1307/**
1308 * Looks up the session for the current process in the global hash table or in
1309 * OS specific pointer.
1310 *
1311 * @returns Pointer to the session with a reference that the caller must
1312 * release. If no valid session was found, NULL is returned.
1313 *
1314 * @param pDevExt The device extension.
1315 * @param Process The process ID.
1316 * @param R0Process The ring-0 process handle.
1317 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1318 * this is used instead of the hash table. For
1319 * additional safety it must then be equal to the
1320 * SUPDRVSESSION::ppOsSessionPtr member.
1321 * This can be NULL even if the OS has a session
1322 * pointer.
1323 */
1324PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1325 PSUPDRVSESSION *ppOsSessionPtr)
1326{
1327 PSUPDRVSESSION pCur;
1328 unsigned iHash;
1329
1330 /*
1331 * Validate input.
1332 */
1333 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1334
1335 /*
1336 * Calculate the hash table index and acquire the spinlock.
1337 */
1338 iHash = SUPDRV_SESSION_HASH(Process);
1339
1340 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1341
1342 /*
1343 * If an OS session pointer is provided, always use it.
1344 */
1345 if (ppOsSessionPtr)
1346 {
1347 pCur = *ppOsSessionPtr;
1348 if ( pCur
1349 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1350 || pCur->Process != Process
1351 || pCur->R0Process != R0Process) )
1352 pCur = NULL;
1353 }
1354 else
1355 {
1356 /*
1357 * Otherwise, do the hash table lookup.
1358 */
1359 pCur = pDevExt->apSessionHashTab[iHash];
1360 while ( pCur
1361 && ( pCur->Process != Process
1362 || pCur->R0Process != R0Process) )
1363 pCur = pCur->pCommonNextHash;
1364 }
1365
1366 /*
1367 * Retain the session.
1368 */
1369 if (pCur)
1370 {
1371 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1372 NOREF(cRefs);
1373 Assert(cRefs > 1 && cRefs < _1M);
1374 }
1375
1376 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1377
1378 return pCur;
1379}
1380
1381
1382/**
1383 * Retain a session to make sure it doesn't go away while it is in use.
1384 *
1385 * @returns New reference count on success, UINT32_MAX on failure.
1386 * @param pSession Session data.
1387 */
1388uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1389{
1390 uint32_t cRefs;
1391 AssertPtrReturn(pSession, UINT32_MAX);
1392 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1393
1394 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1395 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1396 return cRefs;
1397}
1398
1399
1400/**
1401 * Releases a given session.
1402 *
1403 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1404 * @param pSession Session data.
1405 */
1406uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1407{
1408 uint32_t cRefs;
1409 AssertPtrReturn(pSession, UINT32_MAX);
1410 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1411
1412 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1413 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1414 if (cRefs == 0)
1415 supdrvDestroySession(pSession->pDevExt, pSession);
1416 return cRefs;
1417}
1418
1419
1420/**
1421 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1422 *
1423 * @returns IPRT status code, see SUPR0ObjAddRef.
1424 * @param hHandleTable The handle table handle. Ignored.
1425 * @param pvObj The object pointer.
1426 * @param pvCtx Context, the handle type. Ignored.
1427 * @param pvUser Session pointer.
1428 */
1429static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1430{
1431 NOREF(pvCtx);
1432 NOREF(hHandleTable);
1433 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1434}
1435
1436
1437/**
1438 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1439 *
1440 * @param hHandleTable The handle table handle. Ignored.
1441 * @param h The handle value. Ignored.
1442 * @param pvObj The object pointer.
1443 * @param pvCtx Context, the handle type. Ignored.
1444 * @param pvUser Session pointer.
1445 */
1446static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1447{
1448 NOREF(pvCtx);
1449 NOREF(h);
1450 NOREF(hHandleTable);
1451 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1452}
1453
1454
1455/**
1456 * Fast path I/O Control worker.
1457 *
1458 * @returns VBox status code that should be passed down to ring-3 unchanged.
1459 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1460 * @param idCpu VMCPU id.
1461 * @param pDevExt Device extention.
1462 * @param pSession Session data.
1463 */
1464int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1465{
1466 /*
1467 * Validate input and check that the VM has a session.
1468 */
1469 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1470 {
1471 PVM pVM = pSession->pSessionVM;
1472 PGVM pGVM = pSession->pSessionGVM;
1473 if (RT_LIKELY( pGVM != NULL
1474 && pVM != NULL
1475 && pVM == pSession->pFastIoCtrlVM))
1476 {
1477 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1478 {
1479 /*
1480 * Make the call.
1481 */
1482 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1483 return VINF_SUCCESS;
1484 }
1485
1486 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1487 }
1488 else
1489 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1490 pGVM, pVM, pSession->pFastIoCtrlVM);
1491 }
1492 else
1493 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1494 return VERR_INTERNAL_ERROR;
1495}
1496
1497
1498/**
1499 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1500 *
1501 * Check if pszStr contains any character of pszChars. We would use strpbrk
1502 * here if this function would be contained in the RedHat kABI white list, see
1503 * http://www.kerneldrivers.org/RHEL5.
1504 *
1505 * @returns true if fine, false if not.
1506 * @param pszName The module name to check.
1507 */
1508static bool supdrvIsLdrModuleNameValid(const char *pszName)
1509{
1510 int chCur;
1511 while ((chCur = *pszName++) != '\0')
1512 {
1513 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1514 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1515 while (offInv-- > 0)
1516 if (s_szInvalidChars[offInv] == chCur)
1517 return false;
1518 }
1519 return true;
1520}
1521
1522
1523
1524/**
1525 * I/O Control inner worker (tracing reasons).
1526 *
1527 * @returns IPRT status code.
1528 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1529 *
1530 * @param uIOCtl Function number.
1531 * @param pDevExt Device extention.
1532 * @param pSession Session data.
1533 * @param pReqHdr The request header.
1534 */
1535static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1536{
1537 /*
1538 * Validation macros
1539 */
1540#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1541 do { \
1542 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1543 { \
1544 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1545 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1546 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1547 } \
1548 } while (0)
1549
1550#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1551
1552#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1553 do { \
1554 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1555 { \
1556 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1557 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1558 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1559 } \
1560 } while (0)
1561
1562#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1563 do { \
1564 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1565 { \
1566 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1567 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1568 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1569 } \
1570 } while (0)
1571
1572#define REQ_CHECK_EXPR(Name, expr) \
1573 do { \
1574 if (RT_UNLIKELY(!(expr))) \
1575 { \
1576 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1577 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1578 } \
1579 } while (0)
1580
1581#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1582 do { \
1583 if (RT_UNLIKELY(!(expr))) \
1584 { \
1585 OSDBGPRINT( fmt ); \
1586 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1587 } \
1588 } while (0)
1589
1590 /*
1591 * The switch.
1592 */
1593 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1594 {
1595 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1596 {
1597 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1598 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1599 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1600 {
1601 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1602 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1603 return 0;
1604 }
1605
1606#if 0
1607 /*
1608 * Call out to the OS specific code and let it do permission checks on the
1609 * client process.
1610 */
1611 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1612 {
1613 pReq->u.Out.u32Cookie = 0xffffffff;
1614 pReq->u.Out.u32SessionCookie = 0xffffffff;
1615 pReq->u.Out.u32SessionVersion = 0xffffffff;
1616 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1617 pReq->u.Out.pSession = NULL;
1618 pReq->u.Out.cFunctions = 0;
1619 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1620 return 0;
1621 }
1622#endif
1623
1624 /*
1625 * Match the version.
1626 * The current logic is very simple, match the major interface version.
1627 */
1628 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1629 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1630 {
1631 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1632 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1633 pReq->u.Out.u32Cookie = 0xffffffff;
1634 pReq->u.Out.u32SessionCookie = 0xffffffff;
1635 pReq->u.Out.u32SessionVersion = 0xffffffff;
1636 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1637 pReq->u.Out.pSession = NULL;
1638 pReq->u.Out.cFunctions = 0;
1639 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1640 return 0;
1641 }
1642
1643 /*
1644 * Fill in return data and be gone.
1645 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1646 * u32SessionVersion <= u32ReqVersion!
1647 */
1648 /** @todo Somehow validate the client and negotiate a secure cookie... */
1649 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1650 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1651 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1652 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1653 pReq->u.Out.pSession = pSession;
1654 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1655 pReq->Hdr.rc = VINF_SUCCESS;
1656 return 0;
1657 }
1658
1659 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1660 {
1661 /* validate */
1662 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1663 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1664
1665 /* execute */
1666 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1667 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1668 pReq->Hdr.rc = VINF_SUCCESS;
1669 return 0;
1670 }
1671
1672 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1673 {
1674 /* validate */
1675 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1676 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1677 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1678 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1679 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1680
1681 /* execute */
1682 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1683 if (RT_FAILURE(pReq->Hdr.rc))
1684 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1685 return 0;
1686 }
1687
1688 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1689 {
1690 /* validate */
1691 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1692 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1693
1694 /* execute */
1695 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1696 return 0;
1697 }
1698
1699 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1700 {
1701 /* validate */
1702 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1703 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1704
1705 /* execute */
1706 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1707 if (RT_FAILURE(pReq->Hdr.rc))
1708 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1709 return 0;
1710 }
1711
1712 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1713 {
1714 /* validate */
1715 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1716 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1717
1718 /* execute */
1719 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1720 return 0;
1721 }
1722
1723 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1724 {
1725 /* validate */
1726 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1727 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1728 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs > 0);
1729 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs < 16*_1M);
1730 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1731 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1732 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithTabs);
1733 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1734 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1735 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1736 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1737
1738 /* execute */
1739 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1740 return 0;
1741 }
1742
1743 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1744 {
1745 /* validate */
1746 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1747 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1748 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithTabs), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1749 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1750 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1751 || ( pReq->u.In.offSymbols < pReq->u.In.cbImageWithTabs
1752 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithTabs),
1753 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offSymbols,
1754 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithTabs));
1755 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1756 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithTabs
1757 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs
1758 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs),
1759 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offStrTab,
1760 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithTabs));
1761
1762 if (pReq->u.In.cSymbols)
1763 {
1764 uint32_t i;
1765 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1766 for (i = 0; i < pReq->u.In.cSymbols; i++)
1767 {
1768 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithTabs,
1769 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithTabs));
1770 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1771 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
1772 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1773 pReq->u.In.cbStrTab - paSyms[i].offName),
1774 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
1775 }
1776 }
1777
1778 /* execute */
1779 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1780 return 0;
1781 }
1782
1783 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1784 {
1785 /* validate */
1786 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1787 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1788
1789 /* execute */
1790 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1791 return 0;
1792 }
1793
1794 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1795 {
1796 /* validate */
1797 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1798
1799 /* execute */
1800 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1801 return 0;
1802 }
1803
1804 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1805 {
1806 /* validate */
1807 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1808 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1809 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1810
1811 /* execute */
1812 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1813 return 0;
1814 }
1815
1816 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1817 {
1818 /* validate */
1819 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1820 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1821 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1822
1823 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1824 {
1825 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1826
1827 /* execute */
1828 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1829 {
1830 if (pReq->u.In.pVMR0 == NULL)
1831 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1832 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1833 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1834 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1835 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1836 else
1837 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1838 }
1839 else
1840 pReq->Hdr.rc = VERR_WRONG_ORDER;
1841 }
1842 else
1843 {
1844 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1845 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1846 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1847 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1848 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1849
1850 /* execute */
1851 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1852 {
1853 if (pReq->u.In.pVMR0 == NULL)
1854 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1855 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1856 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1857 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1858 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1859 else
1860 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1861 }
1862 else
1863 pReq->Hdr.rc = VERR_WRONG_ORDER;
1864 }
1865
1866 if ( RT_FAILURE(pReq->Hdr.rc)
1867 && pReq->Hdr.rc != VERR_INTERRUPTED
1868 && pReq->Hdr.rc != VERR_TIMEOUT)
1869 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1870 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1871 else
1872 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1873 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1874 return 0;
1875 }
1876
1877 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1878 {
1879 /* validate */
1880 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1881 PSUPVMMR0REQHDR pVMMReq;
1882 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1883 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1884
1885 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1886 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1887 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1888 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1889 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1890
1891 /* execute */
1892 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1893 {
1894 if (pReq->u.In.pVMR0 == NULL)
1895 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1896 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1897 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1898 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1899 else
1900 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1901 }
1902 else
1903 pReq->Hdr.rc = VERR_WRONG_ORDER;
1904
1905 if ( RT_FAILURE(pReq->Hdr.rc)
1906 && pReq->Hdr.rc != VERR_INTERRUPTED
1907 && pReq->Hdr.rc != VERR_TIMEOUT)
1908 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1909 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1910 else
1911 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1912 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1913 return 0;
1914 }
1915
1916 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1917 {
1918 /* validate */
1919 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1920 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1921
1922 /* execute */
1923 pReq->Hdr.rc = VINF_SUCCESS;
1924 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1925 return 0;
1926 }
1927
1928 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1929 {
1930 /* validate */
1931 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1932 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1933 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1934
1935 /* execute */
1936 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1937 if (RT_FAILURE(pReq->Hdr.rc))
1938 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1939 return 0;
1940 }
1941
1942 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1943 {
1944 /* validate */
1945 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1946 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1947
1948 /* execute */
1949 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1950 return 0;
1951 }
1952
1953 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1954 {
1955 /* validate */
1956 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1957 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1958
1959 /* execute */
1960 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1961 if (RT_SUCCESS(pReq->Hdr.rc))
1962 pReq->u.Out.pGipR0 = pDevExt->pGip;
1963 return 0;
1964 }
1965
1966 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1967 {
1968 /* validate */
1969 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1970 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1971
1972 /* execute */
1973 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1974 return 0;
1975 }
1976
1977 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1978 {
1979 /* validate */
1980 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1981 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1982 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1983 || ( VALID_PTR(pReq->u.In.pVMR0)
1984 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1985 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1986
1987 /* execute */
1988 RTSpinlockAcquire(pDevExt->Spinlock);
1989 if (pSession->pSessionVM == pReq->u.In.pVMR0)
1990 {
1991 if (pSession->pFastIoCtrlVM == NULL)
1992 {
1993 pSession->pFastIoCtrlVM = pSession->pSessionVM;
1994 RTSpinlockRelease(pDevExt->Spinlock);
1995 pReq->Hdr.rc = VINF_SUCCESS;
1996 }
1997 else
1998 {
1999 RTSpinlockRelease(pDevExt->Spinlock);
2000 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2001 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2002 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2003 }
2004 }
2005 else
2006 {
2007 RTSpinlockRelease(pDevExt->Spinlock);
2008 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2009 pSession->pSessionVM, pReq->u.In.pVMR0));
2010 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2011 }
2012 return 0;
2013 }
2014
2015 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2016 {
2017 /* validate */
2018 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2019 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2020 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2021 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2022 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2023 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2024 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2025 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2026 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2027
2028 /* execute */
2029 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2030 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2031 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2032 &pReq->u.Out.aPages[0]);
2033 if (RT_FAILURE(pReq->Hdr.rc))
2034 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2035 return 0;
2036 }
2037
2038 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2039 {
2040 /* validate */
2041 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2042 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2043 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2044 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2045 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2046 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2047
2048 /* execute */
2049 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2050 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2051 if (RT_FAILURE(pReq->Hdr.rc))
2052 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2053 return 0;
2054 }
2055
2056 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2057 {
2058 /* validate */
2059 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2060 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2061 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2062 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2063 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2064 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2065 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2066
2067 /* execute */
2068 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2069 return 0;
2070 }
2071
2072 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2073 {
2074 /* validate */
2075 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2076 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2077
2078 /* execute */
2079 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2080 return 0;
2081 }
2082
2083 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2084 {
2085 /* validate */
2086 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2087 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2088 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2089
2090 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2091 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2092 else
2093 {
2094 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2095 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2096 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2097 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2098 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2099 }
2100 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2101
2102 /* execute */
2103 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2104 return 0;
2105 }
2106
2107 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2108 {
2109 /* validate */
2110 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2111 size_t cbStrTab;
2112 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2113 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2114 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2115 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2116 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2117 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2118 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2119 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2120 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2121 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2122 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2123
2124 /* execute */
2125 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2126 return 0;
2127 }
2128
2129 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2130 {
2131 /* validate */
2132 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2133 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2134 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2135
2136 /* execute */
2137 switch (pReq->u.In.uType)
2138 {
2139 case SUP_SEM_TYPE_EVENT:
2140 {
2141 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2142 switch (pReq->u.In.uOp)
2143 {
2144 case SUPSEMOP2_WAIT_MS_REL:
2145 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2146 break;
2147 case SUPSEMOP2_WAIT_NS_ABS:
2148 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2149 break;
2150 case SUPSEMOP2_WAIT_NS_REL:
2151 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2152 break;
2153 case SUPSEMOP2_SIGNAL:
2154 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2155 break;
2156 case SUPSEMOP2_CLOSE:
2157 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2158 break;
2159 case SUPSEMOP2_RESET:
2160 default:
2161 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2162 break;
2163 }
2164 break;
2165 }
2166
2167 case SUP_SEM_TYPE_EVENT_MULTI:
2168 {
2169 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2170 switch (pReq->u.In.uOp)
2171 {
2172 case SUPSEMOP2_WAIT_MS_REL:
2173 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2174 break;
2175 case SUPSEMOP2_WAIT_NS_ABS:
2176 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2177 break;
2178 case SUPSEMOP2_WAIT_NS_REL:
2179 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2180 break;
2181 case SUPSEMOP2_SIGNAL:
2182 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2183 break;
2184 case SUPSEMOP2_CLOSE:
2185 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2186 break;
2187 case SUPSEMOP2_RESET:
2188 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2189 break;
2190 default:
2191 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2192 break;
2193 }
2194 break;
2195 }
2196
2197 default:
2198 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2199 break;
2200 }
2201 return 0;
2202 }
2203
2204 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2205 {
2206 /* validate */
2207 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2208 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2209 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2210
2211 /* execute */
2212 switch (pReq->u.In.uType)
2213 {
2214 case SUP_SEM_TYPE_EVENT:
2215 {
2216 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2217 switch (pReq->u.In.uOp)
2218 {
2219 case SUPSEMOP3_CREATE:
2220 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2221 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2222 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2223 break;
2224 case SUPSEMOP3_GET_RESOLUTION:
2225 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2226 pReq->Hdr.rc = VINF_SUCCESS;
2227 pReq->Hdr.cbOut = sizeof(*pReq);
2228 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2229 break;
2230 default:
2231 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2232 break;
2233 }
2234 break;
2235 }
2236
2237 case SUP_SEM_TYPE_EVENT_MULTI:
2238 {
2239 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2240 switch (pReq->u.In.uOp)
2241 {
2242 case SUPSEMOP3_CREATE:
2243 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2244 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2245 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2246 break;
2247 case SUPSEMOP3_GET_RESOLUTION:
2248 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2249 pReq->Hdr.rc = VINF_SUCCESS;
2250 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2251 break;
2252 default:
2253 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2254 break;
2255 }
2256 break;
2257 }
2258
2259 default:
2260 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2261 break;
2262 }
2263 return 0;
2264 }
2265
2266 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2267 {
2268 /* validate */
2269 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2270 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2271
2272 /* execute */
2273 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2274 if (RT_FAILURE(pReq->Hdr.rc))
2275 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2276 return 0;
2277 }
2278
2279 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2280 {
2281 /* validate */
2282 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2283 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2284
2285 /* execute */
2286 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2287 return 0;
2288 }
2289
2290 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2291 {
2292 /* validate */
2293 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2294
2295 /* execute */
2296 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2297 return 0;
2298 }
2299
2300 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2301 {
2302 /* validate */
2303 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2304 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2305
2306 /* execute */
2307 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2308 return 0;
2309 }
2310
2311 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2312 {
2313 /* validate */
2314 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2315 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2316 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2317 return VERR_INVALID_PARAMETER;
2318
2319 /* execute */
2320 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2321 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2322 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2323 pReq->u.In.szName, pReq->u.In.fFlags);
2324 return 0;
2325 }
2326
2327 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2328 {
2329 /* validate */
2330 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2331 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2332
2333 /* execute */
2334 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2335 return 0;
2336 }
2337
2338 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2339 {
2340 /* validate */
2341 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2342 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2343
2344 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2345 pReqHdr->rc = VINF_SUCCESS;
2346 return 0;
2347 }
2348
2349 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2350 {
2351 /* validate */
2352 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2353 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2354 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2355 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2356
2357 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2358 return 0;
2359 }
2360
2361 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2362 {
2363 /* validate */
2364 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2365
2366 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2367 return 0;
2368 }
2369
2370 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2371 {
2372 /* validate */
2373 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2374 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2375
2376 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2377 return 0;
2378 }
2379
2380 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2381 {
2382 /* validate */
2383 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2384 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2385
2386 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2387 return 0;
2388 }
2389
2390 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2391 {
2392 /* validate */
2393 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2394 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2395
2396 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2397 return 0;
2398 }
2399
2400 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2401 {
2402 /* validate */
2403 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2404 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2405
2406 /* execute */
2407 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2408 if (RT_FAILURE(pReq->Hdr.rc))
2409 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2410 return 0;
2411 }
2412
2413 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2414 {
2415 /* validate */
2416 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2417 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2418 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2419 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2420 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2421
2422 /* execute */
2423 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2424 if (RT_FAILURE(pReq->Hdr.rc))
2425 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2426 return 0;
2427 }
2428
2429 default:
2430 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2431 break;
2432 }
2433 return VERR_GENERAL_FAILURE;
2434}
2435
2436
2437/**
2438 * I/O Control inner worker for the restricted operations.
2439 *
2440 * @returns IPRT status code.
2441 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2442 *
2443 * @param uIOCtl Function number.
2444 * @param pDevExt Device extention.
2445 * @param pSession Session data.
2446 * @param pReqHdr The request header.
2447 */
2448static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2449{
2450 /*
2451 * The switch.
2452 */
2453 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2454 {
2455 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2456 {
2457 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2458 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2459 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2460 {
2461 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2462 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2463 return 0;
2464 }
2465
2466 /*
2467 * Match the version.
2468 * The current logic is very simple, match the major interface version.
2469 */
2470 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2471 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2472 {
2473 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2474 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2475 pReq->u.Out.u32Cookie = 0xffffffff;
2476 pReq->u.Out.u32SessionCookie = 0xffffffff;
2477 pReq->u.Out.u32SessionVersion = 0xffffffff;
2478 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2479 pReq->u.Out.pSession = NULL;
2480 pReq->u.Out.cFunctions = 0;
2481 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2482 return 0;
2483 }
2484
2485 /*
2486 * Fill in return data and be gone.
2487 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2488 * u32SessionVersion <= u32ReqVersion!
2489 */
2490 /** @todo Somehow validate the client and negotiate a secure cookie... */
2491 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2492 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2493 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2494 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2495 pReq->u.Out.pSession = pSession;
2496 pReq->u.Out.cFunctions = 0;
2497 pReq->Hdr.rc = VINF_SUCCESS;
2498 return 0;
2499 }
2500
2501 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2502 {
2503 /* validate */
2504 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2505 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2506
2507 /* execute */
2508 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2509 if (RT_FAILURE(pReq->Hdr.rc))
2510 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2511 return 0;
2512 }
2513
2514 default:
2515 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2516 break;
2517 }
2518 return VERR_GENERAL_FAILURE;
2519}
2520
2521
2522/**
2523 * I/O Control worker.
2524 *
2525 * @returns IPRT status code.
2526 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2527 *
2528 * @param uIOCtl Function number.
2529 * @param pDevExt Device extention.
2530 * @param pSession Session data.
2531 * @param pReqHdr The request header.
2532 * @param cbReq The size of the request buffer.
2533 */
2534int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2535{
2536 int rc;
2537 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2538
2539 /*
2540 * Validate the request.
2541 */
2542 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2543 {
2544 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2545 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2546 return VERR_INVALID_PARAMETER;
2547 }
2548 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2549 || pReqHdr->cbIn < sizeof(*pReqHdr)
2550 || pReqHdr->cbIn > cbReq
2551 || pReqHdr->cbOut < sizeof(*pReqHdr)
2552 || pReqHdr->cbOut > cbReq))
2553 {
2554 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2555 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2556 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2557 return VERR_INVALID_PARAMETER;
2558 }
2559 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2560 {
2561 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2562 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2563 return VERR_INVALID_PARAMETER;
2564 }
2565 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2566 {
2567 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2568 {
2569 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2570 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2571 return VERR_INVALID_PARAMETER;
2572 }
2573 }
2574 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2575 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2576 {
2577 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2578 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2579 return VERR_INVALID_PARAMETER;
2580 }
2581
2582 /*
2583 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2584 */
2585 if (pSession->fUnrestricted)
2586 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2587 else
2588 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2589
2590 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2591 return rc;
2592}
2593
2594
2595/**
2596 * Inter-Driver Communication (IDC) worker.
2597 *
2598 * @returns VBox status code.
2599 * @retval VINF_SUCCESS on success.
2600 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2601 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2602 *
2603 * @param uReq The request (function) code.
2604 * @param pDevExt Device extention.
2605 * @param pSession Session data.
2606 * @param pReqHdr The request header.
2607 */
2608int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2609{
2610 /*
2611 * The OS specific code has already validated the pSession
2612 * pointer, and the request size being greater or equal to
2613 * size of the header.
2614 *
2615 * So, just check that pSession is a kernel context session.
2616 */
2617 if (RT_UNLIKELY( pSession
2618 && pSession->R0Process != NIL_RTR0PROCESS))
2619 return VERR_INVALID_PARAMETER;
2620
2621/*
2622 * Validation macro.
2623 */
2624#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2625 do { \
2626 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2627 { \
2628 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2629 (long)pReqHdr->cb, (long)(cbExpect))); \
2630 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2631 } \
2632 } while (0)
2633
2634 switch (uReq)
2635 {
2636 case SUPDRV_IDC_REQ_CONNECT:
2637 {
2638 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2639 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2640
2641 /*
2642 * Validate the cookie and other input.
2643 */
2644 if (pReq->Hdr.pSession != NULL)
2645 {
2646 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2647 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2648 }
2649 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2650 {
2651 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2652 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2653 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2654 }
2655 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2656 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2657 {
2658 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2659 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2660 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2661 }
2662 if (pSession != NULL)
2663 {
2664 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2665 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2666 }
2667
2668 /*
2669 * Match the version.
2670 * The current logic is very simple, match the major interface version.
2671 */
2672 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2673 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2674 {
2675 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2676 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2677 pReq->u.Out.pSession = NULL;
2678 pReq->u.Out.uSessionVersion = 0xffffffff;
2679 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2680 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2681 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2682 return VINF_SUCCESS;
2683 }
2684
2685 pReq->u.Out.pSession = NULL;
2686 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2687 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2688 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2689
2690 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2691 if (RT_FAILURE(pReq->Hdr.rc))
2692 {
2693 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2694 return VINF_SUCCESS;
2695 }
2696
2697 pReq->u.Out.pSession = pSession;
2698 pReq->Hdr.pSession = pSession;
2699
2700 return VINF_SUCCESS;
2701 }
2702
2703 case SUPDRV_IDC_REQ_DISCONNECT:
2704 {
2705 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2706
2707 supdrvSessionRelease(pSession);
2708 return pReqHdr->rc = VINF_SUCCESS;
2709 }
2710
2711 case SUPDRV_IDC_REQ_GET_SYMBOL:
2712 {
2713 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2714 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2715
2716 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2717 return VINF_SUCCESS;
2718 }
2719
2720 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2721 {
2722 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2723 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2724
2725 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2726 return VINF_SUCCESS;
2727 }
2728
2729 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2730 {
2731 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2732 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2733
2734 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2735 return VINF_SUCCESS;
2736 }
2737
2738 default:
2739 Log(("Unknown IDC %#lx\n", (long)uReq));
2740 break;
2741 }
2742
2743#undef REQ_CHECK_IDC_SIZE
2744 return VERR_NOT_SUPPORTED;
2745}
2746
2747
2748/**
2749 * Register a object for reference counting.
2750 * The object is registered with one reference in the specified session.
2751 *
2752 * @returns Unique identifier on success (pointer).
2753 * All future reference must use this identifier.
2754 * @returns NULL on failure.
2755 * @param pSession The caller's session.
2756 * @param enmType The object type.
2757 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2758 * @param pvUser1 The first user argument.
2759 * @param pvUser2 The second user argument.
2760 */
2761SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2762{
2763 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2764 PSUPDRVOBJ pObj;
2765 PSUPDRVUSAGE pUsage;
2766
2767 /*
2768 * Validate the input.
2769 */
2770 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2771 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2772 AssertPtrReturn(pfnDestructor, NULL);
2773
2774 /*
2775 * Allocate and initialize the object.
2776 */
2777 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2778 if (!pObj)
2779 return NULL;
2780 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2781 pObj->enmType = enmType;
2782 pObj->pNext = NULL;
2783 pObj->cUsage = 1;
2784 pObj->pfnDestructor = pfnDestructor;
2785 pObj->pvUser1 = pvUser1;
2786 pObj->pvUser2 = pvUser2;
2787 pObj->CreatorUid = pSession->Uid;
2788 pObj->CreatorGid = pSession->Gid;
2789 pObj->CreatorProcess= pSession->Process;
2790 supdrvOSObjInitCreator(pObj, pSession);
2791
2792 /*
2793 * Allocate the usage record.
2794 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2795 */
2796 RTSpinlockAcquire(pDevExt->Spinlock);
2797
2798 pUsage = pDevExt->pUsageFree;
2799 if (pUsage)
2800 pDevExt->pUsageFree = pUsage->pNext;
2801 else
2802 {
2803 RTSpinlockRelease(pDevExt->Spinlock);
2804 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2805 if (!pUsage)
2806 {
2807 RTMemFree(pObj);
2808 return NULL;
2809 }
2810 RTSpinlockAcquire(pDevExt->Spinlock);
2811 }
2812
2813 /*
2814 * Insert the object and create the session usage record.
2815 */
2816 /* The object. */
2817 pObj->pNext = pDevExt->pObjs;
2818 pDevExt->pObjs = pObj;
2819
2820 /* The session record. */
2821 pUsage->cUsage = 1;
2822 pUsage->pObj = pObj;
2823 pUsage->pNext = pSession->pUsage;
2824 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2825 pSession->pUsage = pUsage;
2826
2827 RTSpinlockRelease(pDevExt->Spinlock);
2828
2829 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2830 return pObj;
2831}
2832
2833
2834/**
2835 * Increment the reference counter for the object associating the reference
2836 * with the specified session.
2837 *
2838 * @returns IPRT status code.
2839 * @param pvObj The identifier returned by SUPR0ObjRegister().
2840 * @param pSession The session which is referencing the object.
2841 *
2842 * @remarks The caller should not own any spinlocks and must carefully protect
2843 * itself against potential race with the destructor so freed memory
2844 * isn't accessed here.
2845 */
2846SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2847{
2848 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2849}
2850
2851
2852/**
2853 * Increment the reference counter for the object associating the reference
2854 * with the specified session.
2855 *
2856 * @returns IPRT status code.
2857 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2858 * couldn't be allocated. (If you see this you're not doing the right
2859 * thing and it won't ever work reliably.)
2860 *
2861 * @param pvObj The identifier returned by SUPR0ObjRegister().
2862 * @param pSession The session which is referencing the object.
2863 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2864 * first reference to an object in a session with this
2865 * argument set.
2866 *
2867 * @remarks The caller should not own any spinlocks and must carefully protect
2868 * itself against potential race with the destructor so freed memory
2869 * isn't accessed here.
2870 */
2871SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2872{
2873 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2874 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2875 int rc = VINF_SUCCESS;
2876 PSUPDRVUSAGE pUsagePre;
2877 PSUPDRVUSAGE pUsage;
2878
2879 /*
2880 * Validate the input.
2881 * Be ready for the destruction race (someone might be stuck in the
2882 * destructor waiting a lock we own).
2883 */
2884 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2885 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2886 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2887 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2888 VERR_INVALID_PARAMETER);
2889
2890 RTSpinlockAcquire(pDevExt->Spinlock);
2891
2892 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2893 {
2894 RTSpinlockRelease(pDevExt->Spinlock);
2895
2896 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2897 return VERR_WRONG_ORDER;
2898 }
2899
2900 /*
2901 * Preallocate the usage record if we can.
2902 */
2903 pUsagePre = pDevExt->pUsageFree;
2904 if (pUsagePre)
2905 pDevExt->pUsageFree = pUsagePre->pNext;
2906 else if (!fNoBlocking)
2907 {
2908 RTSpinlockRelease(pDevExt->Spinlock);
2909 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2910 if (!pUsagePre)
2911 return VERR_NO_MEMORY;
2912
2913 RTSpinlockAcquire(pDevExt->Spinlock);
2914 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2915 {
2916 RTSpinlockRelease(pDevExt->Spinlock);
2917
2918 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2919 return VERR_WRONG_ORDER;
2920 }
2921 }
2922
2923 /*
2924 * Reference the object.
2925 */
2926 pObj->cUsage++;
2927
2928 /*
2929 * Look for the session record.
2930 */
2931 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2932 {
2933 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2934 if (pUsage->pObj == pObj)
2935 break;
2936 }
2937 if (pUsage)
2938 pUsage->cUsage++;
2939 else if (pUsagePre)
2940 {
2941 /* create a new session record. */
2942 pUsagePre->cUsage = 1;
2943 pUsagePre->pObj = pObj;
2944 pUsagePre->pNext = pSession->pUsage;
2945 pSession->pUsage = pUsagePre;
2946 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2947
2948 pUsagePre = NULL;
2949 }
2950 else
2951 {
2952 pObj->cUsage--;
2953 rc = VERR_TRY_AGAIN;
2954 }
2955
2956 /*
2957 * Put any unused usage record into the free list..
2958 */
2959 if (pUsagePre)
2960 {
2961 pUsagePre->pNext = pDevExt->pUsageFree;
2962 pDevExt->pUsageFree = pUsagePre;
2963 }
2964
2965 RTSpinlockRelease(pDevExt->Spinlock);
2966
2967 return rc;
2968}
2969
2970
2971/**
2972 * Decrement / destroy a reference counter record for an object.
2973 *
2974 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
2975 *
2976 * @returns IPRT status code.
2977 * @retval VINF_SUCCESS if not destroyed.
2978 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
2979 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
2980 * string builds.
2981 *
2982 * @param pvObj The identifier returned by SUPR0ObjRegister().
2983 * @param pSession The session which is referencing the object.
2984 */
2985SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
2986{
2987 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2988 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2989 int rc = VERR_INVALID_PARAMETER;
2990 PSUPDRVUSAGE pUsage;
2991 PSUPDRVUSAGE pUsagePrev;
2992
2993 /*
2994 * Validate the input.
2995 */
2996 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2997 AssertMsgReturn(VALID_PTR(pObj)&& pObj->u32Magic == SUPDRVOBJ_MAGIC,
2998 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2999 VERR_INVALID_PARAMETER);
3000
3001 /*
3002 * Acquire the spinlock and look for the usage record.
3003 */
3004 RTSpinlockAcquire(pDevExt->Spinlock);
3005
3006 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3007 pUsage;
3008 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3009 {
3010 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3011 if (pUsage->pObj == pObj)
3012 {
3013 rc = VINF_SUCCESS;
3014 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3015 if (pUsage->cUsage > 1)
3016 {
3017 pObj->cUsage--;
3018 pUsage->cUsage--;
3019 }
3020 else
3021 {
3022 /*
3023 * Free the session record.
3024 */
3025 if (pUsagePrev)
3026 pUsagePrev->pNext = pUsage->pNext;
3027 else
3028 pSession->pUsage = pUsage->pNext;
3029 pUsage->pNext = pDevExt->pUsageFree;
3030 pDevExt->pUsageFree = pUsage;
3031
3032 /* What about the object? */
3033 if (pObj->cUsage > 1)
3034 pObj->cUsage--;
3035 else
3036 {
3037 /*
3038 * Object is to be destroyed, unlink it.
3039 */
3040 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3041 rc = VINF_OBJECT_DESTROYED;
3042 if (pDevExt->pObjs == pObj)
3043 pDevExt->pObjs = pObj->pNext;
3044 else
3045 {
3046 PSUPDRVOBJ pObjPrev;
3047 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3048 if (pObjPrev->pNext == pObj)
3049 {
3050 pObjPrev->pNext = pObj->pNext;
3051 break;
3052 }
3053 Assert(pObjPrev);
3054 }
3055 }
3056 }
3057 break;
3058 }
3059 }
3060
3061 RTSpinlockRelease(pDevExt->Spinlock);
3062
3063 /*
3064 * Call the destructor and free the object if required.
3065 */
3066 if (rc == VINF_OBJECT_DESTROYED)
3067 {
3068 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3069 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3070 if (pObj->pfnDestructor)
3071 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3072 RTMemFree(pObj);
3073 }
3074
3075 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3076 return rc;
3077}
3078
3079
3080/**
3081 * Verifies that the current process can access the specified object.
3082 *
3083 * @returns The following IPRT status code:
3084 * @retval VINF_SUCCESS if access was granted.
3085 * @retval VERR_PERMISSION_DENIED if denied access.
3086 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3087 *
3088 * @param pvObj The identifier returned by SUPR0ObjRegister().
3089 * @param pSession The session which wishes to access the object.
3090 * @param pszObjName Object string name. This is optional and depends on the object type.
3091 *
3092 * @remark The caller is responsible for making sure the object isn't removed while
3093 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3094 */
3095SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3096{
3097 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3098 int rc;
3099
3100 /*
3101 * Validate the input.
3102 */
3103 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3104 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3105 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3106 VERR_INVALID_PARAMETER);
3107
3108 /*
3109 * Check access. (returns true if a decision has been made.)
3110 */
3111 rc = VERR_INTERNAL_ERROR;
3112 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3113 return rc;
3114
3115 /*
3116 * Default policy is to allow the user to access his own
3117 * stuff but nothing else.
3118 */
3119 if (pObj->CreatorUid == pSession->Uid)
3120 return VINF_SUCCESS;
3121 return VERR_PERMISSION_DENIED;
3122}
3123
3124
3125/**
3126 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3127 *
3128 * @returns The associated VM pointer.
3129 * @param pSession The session of the current thread.
3130 */
3131SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3132{
3133 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3134 return pSession->pSessionVM;
3135}
3136
3137
3138/**
3139 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3140 *
3141 * @returns The associated GVM pointer.
3142 * @param pSession The session of the current thread.
3143 */
3144SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3145{
3146 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3147 return pSession->pSessionGVM;
3148}
3149
3150
3151/**
3152 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3153 *
3154 * This will fail if there is already a VM associated with the session and pVM
3155 * isn't NULL.
3156 *
3157 * @retval VINF_SUCCESS
3158 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3159 * session.
3160 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3161 * the session is invalid.
3162 *
3163 * @param pSession The session of the current thread.
3164 * @param pGVM The GVM to associate with the session. Pass NULL to
3165 * dissassociate.
3166 * @param pVM The VM to associate with the session. Pass NULL to
3167 * dissassociate.
3168 */
3169SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3170{
3171 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3172 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3173
3174 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3175 if (pGVM)
3176 {
3177 if (!pSession->pSessionGVM)
3178 {
3179 pSession->pSessionGVM = pGVM;
3180 pSession->pSessionVM = pVM;
3181 pSession->pFastIoCtrlVM = NULL;
3182 }
3183 else
3184 {
3185 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3186 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3187 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3188 return VERR_ALREADY_EXISTS;
3189 }
3190 }
3191 else
3192 {
3193 pSession->pSessionGVM = NULL;
3194 pSession->pSessionVM = NULL;
3195 pSession->pFastIoCtrlVM = NULL;
3196 }
3197 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3198 return VINF_SUCCESS;
3199}
3200
3201
3202/** @copydoc RTLogGetDefaultInstanceEx
3203 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3204SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3205{
3206 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3207}
3208
3209
3210/** @copydoc RTLogRelGetDefaultInstanceEx
3211 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3212SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3213{
3214 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3215}
3216
3217
3218/**
3219 * Lock pages.
3220 *
3221 * @returns IPRT status code.
3222 * @param pSession Session to which the locked memory should be associated.
3223 * @param pvR3 Start of the memory range to lock.
3224 * This must be page aligned.
3225 * @param cPages Number of pages to lock.
3226 * @param paPages Where to put the physical addresses of locked memory.
3227 */
3228SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3229{
3230 int rc;
3231 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3232 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3233 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3234
3235 /*
3236 * Verify input.
3237 */
3238 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3239 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3240 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3241 || !pvR3)
3242 {
3243 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3244 return VERR_INVALID_PARAMETER;
3245 }
3246
3247 /*
3248 * Let IPRT do the job.
3249 */
3250 Mem.eType = MEMREF_TYPE_LOCKED;
3251 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3252 if (RT_SUCCESS(rc))
3253 {
3254 uint32_t iPage = cPages;
3255 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3256 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3257
3258 while (iPage-- > 0)
3259 {
3260 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3261 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3262 {
3263 AssertMsgFailed(("iPage=%d\n", iPage));
3264 rc = VERR_INTERNAL_ERROR;
3265 break;
3266 }
3267 }
3268 if (RT_SUCCESS(rc))
3269 rc = supdrvMemAdd(&Mem, pSession);
3270 if (RT_FAILURE(rc))
3271 {
3272 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3273 AssertRC(rc2);
3274 }
3275 }
3276
3277 return rc;
3278}
3279
3280
3281/**
3282 * Unlocks the memory pointed to by pv.
3283 *
3284 * @returns IPRT status code.
3285 * @param pSession Session to which the memory was locked.
3286 * @param pvR3 Memory to unlock.
3287 */
3288SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3289{
3290 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3291 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3292 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3293}
3294
3295
3296/**
3297 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3298 * backing.
3299 *
3300 * @returns IPRT status code.
3301 * @param pSession Session data.
3302 * @param cPages Number of pages to allocate.
3303 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3304 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3305 * @param pHCPhys Where to put the physical address of allocated memory.
3306 */
3307SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3308{
3309 int rc;
3310 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3311 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3312
3313 /*
3314 * Validate input.
3315 */
3316 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3317 if (!ppvR3 || !ppvR0 || !pHCPhys)
3318 {
3319 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3320 pSession, ppvR0, ppvR3, pHCPhys));
3321 return VERR_INVALID_PARAMETER;
3322
3323 }
3324 if (cPages < 1 || cPages >= 256)
3325 {
3326 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3327 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3328 }
3329
3330 /*
3331 * Let IPRT do the job.
3332 */
3333 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3334 if (RT_SUCCESS(rc))
3335 {
3336 int rc2;
3337 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3338 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3339 if (RT_SUCCESS(rc))
3340 {
3341 Mem.eType = MEMREF_TYPE_CONT;
3342 rc = supdrvMemAdd(&Mem, pSession);
3343 if (!rc)
3344 {
3345 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3346 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3347 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3348 return 0;
3349 }
3350
3351 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3352 AssertRC(rc2);
3353 }
3354 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3355 AssertRC(rc2);
3356 }
3357
3358 return rc;
3359}
3360
3361
3362/**
3363 * Frees memory allocated using SUPR0ContAlloc().
3364 *
3365 * @returns IPRT status code.
3366 * @param pSession The session to which the memory was allocated.
3367 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3368 */
3369SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3370{
3371 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3372 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3373 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3374}
3375
3376
3377/**
3378 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3379 *
3380 * The memory isn't zeroed.
3381 *
3382 * @returns IPRT status code.
3383 * @param pSession Session data.
3384 * @param cPages Number of pages to allocate.
3385 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3386 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3387 * @param paPages Where to put the physical addresses of allocated memory.
3388 */
3389SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3390{
3391 unsigned iPage;
3392 int rc;
3393 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3394 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3395
3396 /*
3397 * Validate input.
3398 */
3399 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3400 if (!ppvR3 || !ppvR0 || !paPages)
3401 {
3402 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3403 pSession, ppvR3, ppvR0, paPages));
3404 return VERR_INVALID_PARAMETER;
3405
3406 }
3407 if (cPages < 1 || cPages >= 256)
3408 {
3409 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3410 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3411 }
3412
3413 /*
3414 * Let IPRT do the work.
3415 */
3416 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3417 if (RT_SUCCESS(rc))
3418 {
3419 int rc2;
3420 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3421 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3422 if (RT_SUCCESS(rc))
3423 {
3424 Mem.eType = MEMREF_TYPE_LOW;
3425 rc = supdrvMemAdd(&Mem, pSession);
3426 if (!rc)
3427 {
3428 for (iPage = 0; iPage < cPages; iPage++)
3429 {
3430 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3431 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3432 }
3433 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3434 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3435 return 0;
3436 }
3437
3438 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3439 AssertRC(rc2);
3440 }
3441
3442 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3443 AssertRC(rc2);
3444 }
3445
3446 return rc;
3447}
3448
3449
3450/**
3451 * Frees memory allocated using SUPR0LowAlloc().
3452 *
3453 * @returns IPRT status code.
3454 * @param pSession The session to which the memory was allocated.
3455 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3456 */
3457SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3458{
3459 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3460 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3461 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3462}
3463
3464
3465
3466/**
3467 * Allocates a chunk of memory with both R0 and R3 mappings.
3468 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3469 *
3470 * @returns IPRT status code.
3471 * @param pSession The session to associated the allocation with.
3472 * @param cb Number of bytes to allocate.
3473 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3474 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3475 */
3476SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3477{
3478 int rc;
3479 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3480 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3481
3482 /*
3483 * Validate input.
3484 */
3485 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3486 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3487 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3488 if (cb < 1 || cb >= _4M)
3489 {
3490 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3491 return VERR_INVALID_PARAMETER;
3492 }
3493
3494 /*
3495 * Let IPRT do the work.
3496 */
3497 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3498 if (RT_SUCCESS(rc))
3499 {
3500 int rc2;
3501 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3502 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3503 if (RT_SUCCESS(rc))
3504 {
3505 Mem.eType = MEMREF_TYPE_MEM;
3506 rc = supdrvMemAdd(&Mem, pSession);
3507 if (!rc)
3508 {
3509 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3510 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3511 return VINF_SUCCESS;
3512 }
3513
3514 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3515 AssertRC(rc2);
3516 }
3517
3518 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3519 AssertRC(rc2);
3520 }
3521
3522 return rc;
3523}
3524
3525
3526/**
3527 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3528 *
3529 * @returns IPRT status code.
3530 * @param pSession The session to which the memory was allocated.
3531 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3532 * @param paPages Where to store the physical addresses.
3533 */
3534SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3535{
3536 PSUPDRVBUNDLE pBundle;
3537 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3538
3539 /*
3540 * Validate input.
3541 */
3542 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3543 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3544 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3545
3546 /*
3547 * Search for the address.
3548 */
3549 RTSpinlockAcquire(pSession->Spinlock);
3550 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3551 {
3552 if (pBundle->cUsed > 0)
3553 {
3554 unsigned i;
3555 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3556 {
3557 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3558 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3559 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3560 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3561 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3562 )
3563 )
3564 {
3565 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3566 size_t iPage;
3567 for (iPage = 0; iPage < cPages; iPage++)
3568 {
3569 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3570 paPages[iPage].uReserved = 0;
3571 }
3572 RTSpinlockRelease(pSession->Spinlock);
3573 return VINF_SUCCESS;
3574 }
3575 }
3576 }
3577 }
3578 RTSpinlockRelease(pSession->Spinlock);
3579 Log(("Failed to find %p!!!\n", (void *)uPtr));
3580 return VERR_INVALID_PARAMETER;
3581}
3582
3583
3584/**
3585 * Free memory allocated by SUPR0MemAlloc().
3586 *
3587 * @returns IPRT status code.
3588 * @param pSession The session owning the allocation.
3589 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3590 */
3591SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3592{
3593 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3594 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3595 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3596}
3597
3598
3599/**
3600 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3601 *
3602 * The memory is fixed and it's possible to query the physical addresses using
3603 * SUPR0MemGetPhys().
3604 *
3605 * @returns IPRT status code.
3606 * @param pSession The session to associated the allocation with.
3607 * @param cPages The number of pages to allocate.
3608 * @param fFlags Flags, reserved for the future. Must be zero.
3609 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3610 * NULL if no ring-3 mapping.
3611 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3612 * NULL if no ring-0 mapping.
3613 * @param paPages Where to store the addresses of the pages. Optional.
3614 */
3615SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3616{
3617 int rc;
3618 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3619 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3620
3621 /*
3622 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3623 */
3624 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3625 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3626 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3627 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3628 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3629 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3630 {
3631 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3632 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3633 }
3634
3635 /*
3636 * Let IPRT do the work.
3637 */
3638 if (ppvR0)
3639 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3640 else
3641 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3642 if (RT_SUCCESS(rc))
3643 {
3644 int rc2;
3645 if (ppvR3)
3646 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3647 else
3648 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3649 if (RT_SUCCESS(rc))
3650 {
3651 Mem.eType = MEMREF_TYPE_PAGE;
3652 rc = supdrvMemAdd(&Mem, pSession);
3653 if (!rc)
3654 {
3655 if (ppvR3)
3656 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3657 if (ppvR0)
3658 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3659 if (paPages)
3660 {
3661 uint32_t iPage = cPages;
3662 while (iPage-- > 0)
3663 {
3664 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3665 Assert(paPages[iPage] != NIL_RTHCPHYS);
3666 }
3667 }
3668 return VINF_SUCCESS;
3669 }
3670
3671 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3672 AssertRC(rc2);
3673 }
3674
3675 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3676 AssertRC(rc2);
3677 }
3678 return rc;
3679}
3680
3681
3682/**
3683 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3684 * space.
3685 *
3686 * @returns IPRT status code.
3687 * @param pSession The session to associated the allocation with.
3688 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3689 * @param offSub Where to start mapping. Must be page aligned.
3690 * @param cbSub How much to map. Must be page aligned.
3691 * @param fFlags Flags, MBZ.
3692 * @param ppvR0 Where to return the address of the ring-0 mapping on
3693 * success.
3694 */
3695SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3696 uint32_t fFlags, PRTR0PTR ppvR0)
3697{
3698 int rc;
3699 PSUPDRVBUNDLE pBundle;
3700 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3701 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3702
3703 /*
3704 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3705 */
3706 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3707 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3708 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3709 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3710 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3711 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3712
3713 /*
3714 * Find the memory object.
3715 */
3716 RTSpinlockAcquire(pSession->Spinlock);
3717 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3718 {
3719 if (pBundle->cUsed > 0)
3720 {
3721 unsigned i;
3722 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3723 {
3724 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3725 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3726 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3727 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3728 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3729 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3730 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3731 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3732 {
3733 hMemObj = pBundle->aMem[i].MemObj;
3734 break;
3735 }
3736 }
3737 }
3738 }
3739 RTSpinlockRelease(pSession->Spinlock);
3740
3741 rc = VERR_INVALID_PARAMETER;
3742 if (hMemObj != NIL_RTR0MEMOBJ)
3743 {
3744 /*
3745 * Do some further input validations before calling IPRT.
3746 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3747 */
3748 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3749 if ( offSub < cbMemObj
3750 && cbSub <= cbMemObj
3751 && offSub + cbSub <= cbMemObj)
3752 {
3753 RTR0MEMOBJ hMapObj;
3754 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3755 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3756 if (RT_SUCCESS(rc))
3757 *ppvR0 = RTR0MemObjAddress(hMapObj);
3758 }
3759 else
3760 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3761
3762 }
3763 return rc;
3764}
3765
3766
3767/**
3768 * Changes the page level protection of one or more pages previously allocated
3769 * by SUPR0PageAllocEx.
3770 *
3771 * @returns IPRT status code.
3772 * @param pSession The session to associated the allocation with.
3773 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3774 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3775 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3776 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3777 * @param offSub Where to start changing. Must be page aligned.
3778 * @param cbSub How much to change. Must be page aligned.
3779 * @param fProt The new page level protection, see RTMEM_PROT_*.
3780 */
3781SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3782{
3783 int rc;
3784 PSUPDRVBUNDLE pBundle;
3785 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3786 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3787 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3788
3789 /*
3790 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3791 */
3792 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3793 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3794 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3795 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3796 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3797
3798 /*
3799 * Find the memory object.
3800 */
3801 RTSpinlockAcquire(pSession->Spinlock);
3802 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3803 {
3804 if (pBundle->cUsed > 0)
3805 {
3806 unsigned i;
3807 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3808 {
3809 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3810 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3811 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3812 || pvR3 == NIL_RTR3PTR)
3813 && ( pvR0 == NIL_RTR0PTR
3814 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3815 && ( pvR3 == NIL_RTR3PTR
3816 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3817 {
3818 if (pvR0 != NIL_RTR0PTR)
3819 hMemObjR0 = pBundle->aMem[i].MemObj;
3820 if (pvR3 != NIL_RTR3PTR)
3821 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3822 break;
3823 }
3824 }
3825 }
3826 }
3827 RTSpinlockRelease(pSession->Spinlock);
3828
3829 rc = VERR_INVALID_PARAMETER;
3830 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3831 || hMemObjR3 != NIL_RTR0MEMOBJ)
3832 {
3833 /*
3834 * Do some further input validations before calling IPRT.
3835 */
3836 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3837 if ( offSub < cbMemObj
3838 && cbSub <= cbMemObj
3839 && offSub + cbSub <= cbMemObj)
3840 {
3841 rc = VINF_SUCCESS;
3842 if (hMemObjR3 != NIL_RTR0PTR)
3843 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3844 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3845 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3846 }
3847 else
3848 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3849
3850 }
3851 return rc;
3852
3853}
3854
3855
3856/**
3857 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3858 *
3859 * @returns IPRT status code.
3860 * @param pSession The session owning the allocation.
3861 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3862 * SUPR0PageAllocEx().
3863 */
3864SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3865{
3866 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3867 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3868 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3869}
3870
3871
3872/**
3873 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3874 *
3875 * @param pDevExt The device extension.
3876 * @param pszFile The source file where the caller detected the bad
3877 * context.
3878 * @param uLine The line number in @a pszFile.
3879 * @param pszExtra Optional additional message to give further hints.
3880 */
3881void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3882{
3883 uint32_t cCalls;
3884
3885 /*
3886 * Shorten the filename before displaying the message.
3887 */
3888 for (;;)
3889 {
3890 const char *pszTmp = strchr(pszFile, '/');
3891 if (!pszTmp)
3892 pszTmp = strchr(pszFile, '\\');
3893 if (!pszTmp)
3894 break;
3895 pszFile = pszTmp + 1;
3896 }
3897 if (RT_VALID_PTR(pszExtra) && *pszExtra)
3898 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
3899 else
3900 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
3901
3902 /*
3903 * Record the incident so that we stand a chance of blocking I/O controls
3904 * before panicing the system.
3905 */
3906 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
3907 if (cCalls > UINT32_MAX - _1K)
3908 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
3909}
3910
3911
3912/**
3913 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3914 *
3915 * @param pSession The session of the caller.
3916 * @param pszFile The source file where the caller detected the bad
3917 * context.
3918 * @param uLine The line number in @a pszFile.
3919 * @param pszExtra Optional additional message to give further hints.
3920 */
3921SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
3922{
3923 PSUPDRVDEVEXT pDevExt;
3924
3925 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
3926 pDevExt = pSession->pDevExt;
3927
3928 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
3929}
3930
3931
3932/**
3933 * Gets the paging mode of the current CPU.
3934 *
3935 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3936 */
3937SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3938{
3939 SUPPAGINGMODE enmMode;
3940
3941 RTR0UINTREG cr0 = ASMGetCR0();
3942 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3943 enmMode = SUPPAGINGMODE_INVALID;
3944 else
3945 {
3946 RTR0UINTREG cr4 = ASMGetCR4();
3947 uint32_t fNXEPlusLMA = 0;
3948 if (cr4 & X86_CR4_PAE)
3949 {
3950 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
3951 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
3952 {
3953 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3954 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3955 fNXEPlusLMA |= RT_BIT(0);
3956 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3957 fNXEPlusLMA |= RT_BIT(1);
3958 }
3959 }
3960
3961 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
3962 {
3963 case 0:
3964 enmMode = SUPPAGINGMODE_32_BIT;
3965 break;
3966
3967 case X86_CR4_PGE:
3968 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
3969 break;
3970
3971 case X86_CR4_PAE:
3972 enmMode = SUPPAGINGMODE_PAE;
3973 break;
3974
3975 case X86_CR4_PAE | RT_BIT(0):
3976 enmMode = SUPPAGINGMODE_PAE_NX;
3977 break;
3978
3979 case X86_CR4_PAE | X86_CR4_PGE:
3980 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3981 break;
3982
3983 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
3984 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3985 break;
3986
3987 case RT_BIT(1) | X86_CR4_PAE:
3988 enmMode = SUPPAGINGMODE_AMD64;
3989 break;
3990
3991 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
3992 enmMode = SUPPAGINGMODE_AMD64_NX;
3993 break;
3994
3995 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
3996 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
3997 break;
3998
3999 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4000 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4001 break;
4002
4003 default:
4004 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4005 enmMode = SUPPAGINGMODE_INVALID;
4006 break;
4007 }
4008 }
4009 return enmMode;
4010}
4011
4012
4013/**
4014 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4015 *
4016 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4017 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4018 * for code with interrupts enabled.
4019 *
4020 * @returns the old CR4 value.
4021 *
4022 * @param fOrMask bits to be set in CR4.
4023 * @param fAndMask bits to be cleard in CR4.
4024 *
4025 * @remarks Must be called with preemption/interrupts disabled.
4026 */
4027SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4028{
4029#ifdef RT_OS_LINUX
4030 return supdrvOSChangeCR4(fOrMask, fAndMask);
4031#else
4032 RTCCUINTREG uOld = ASMGetCR4();
4033 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4034 if (uNew != uOld)
4035 ASMSetCR4(uNew);
4036 return uOld;
4037#endif
4038}
4039
4040
4041/**
4042 * Enables or disabled hardware virtualization extensions using native OS APIs.
4043 *
4044 * @returns VBox status code.
4045 * @retval VINF_SUCCESS on success.
4046 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4047 *
4048 * @param fEnable Whether to enable or disable.
4049 */
4050SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4051{
4052#ifdef RT_OS_DARWIN
4053 return supdrvOSEnableVTx(fEnable);
4054#else
4055 RT_NOREF1(fEnable);
4056 return VERR_NOT_SUPPORTED;
4057#endif
4058}
4059
4060
4061/**
4062 * Suspends hardware virtualization extensions using the native OS API.
4063 *
4064 * This is called prior to entering raw-mode context.
4065 *
4066 * @returns @c true if suspended, @c false if not.
4067 */
4068SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4069{
4070#ifdef RT_OS_DARWIN
4071 return supdrvOSSuspendVTxOnCpu();
4072#else
4073 return false;
4074#endif
4075}
4076
4077
4078/**
4079 * Resumes hardware virtualization extensions using the native OS API.
4080 *
4081 * This is called after to entering raw-mode context.
4082 *
4083 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4084 */
4085SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4086{
4087#ifdef RT_OS_DARWIN
4088 supdrvOSResumeVTxOnCpu(fSuspended);
4089#else
4090 RT_NOREF1(fSuspended);
4091 Assert(!fSuspended);
4092#endif
4093}
4094
4095
4096SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4097{
4098#ifdef RT_OS_LINUX
4099 return supdrvOSGetCurrentGdtRw(pGdtRw);
4100#else
4101 NOREF(pGdtRw);
4102 return VERR_NOT_IMPLEMENTED;
4103#endif
4104}
4105
4106
4107/**
4108 * Checks if raw-mode is usable on this system.
4109 *
4110 * The reasons why raw-mode isn't safe to use are host specific. For example on
4111 * Windows the Hyper-V root partition may perhapse not allow important bits in
4112 * CR4 to be changed, which would make it impossible to do a world switch.
4113 *
4114 * @returns VBox status code.
4115 */
4116SUPR0DECL(int) SUPR0GetRawModeUsability(void)
4117{
4118#ifdef RT_OS_WINDOWS
4119 return supdrvOSGetRawModeUsability();
4120#else
4121 return VINF_SUCCESS;
4122#endif
4123}
4124
4125
4126/**
4127 * Gets AMD-V and VT-x support for the calling CPU.
4128 *
4129 * @returns VBox status code.
4130 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4131 * (SUPVTCAPS_AMD_V) is supported.
4132 */
4133SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4134{
4135 Assert(pfCaps);
4136 *pfCaps = 0;
4137
4138 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4139 if (ASMHasCpuId())
4140 {
4141 /* Check the range of standard CPUID leafs. */
4142 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4143 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4144 if (ASMIsValidStdRange(uMaxLeaf))
4145 {
4146 /* Query the standard CPUID leaf. */
4147 uint32_t fFeatEcx, fFeatEdx, uDummy;
4148 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4149
4150 /* Check if the vendor is Intel (or compatible). */
4151 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4152 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4153 || ASMIsShanghaiCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4154 {
4155 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4156 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4157 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4158 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4159 {
4160 *pfCaps = SUPVTCAPS_VT_X;
4161 return VINF_SUCCESS;
4162 }
4163 return VERR_VMX_NO_VMX;
4164 }
4165
4166 /* Check if the vendor is AMD (or compatible). */
4167 if (ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4168 {
4169 uint32_t fExtFeatEcx, uExtMaxId;
4170 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4171 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4172
4173 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4174 if ( ASMIsValidExtRange(uExtMaxId)
4175 && uExtMaxId >= 0x8000000a
4176 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4177 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4178 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4179 {
4180 *pfCaps = SUPVTCAPS_AMD_V;
4181 return VINF_SUCCESS;
4182 }
4183 return VERR_SVM_NO_SVM;
4184 }
4185 }
4186 }
4187 return VERR_UNSUPPORTED_CPU;
4188}
4189
4190
4191/**
4192 * Checks if Intel VT-x feature is usable on this CPU.
4193 *
4194 * @returns VBox status code.
4195 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4196 * ambiguity that makes us unsure whether we
4197 * really can use VT-x or not.
4198 *
4199 * @remarks Must be called with preemption disabled.
4200 * The caller is also expected to check that the CPU is an Intel (or
4201 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4202 * function might throw a \#GP fault as it tries to read/write MSRs
4203 * that may not be present!
4204 */
4205SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4206{
4207 uint64_t fFeatMsr;
4208 bool fMaybeSmxMode;
4209 bool fMsrLocked;
4210 bool fSmxVmxAllowed;
4211 bool fVmxAllowed;
4212 bool fIsSmxModeAmbiguous;
4213 int rc;
4214
4215 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4216
4217 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4218 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4219 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4220 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4221 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4222 fIsSmxModeAmbiguous = false;
4223 rc = VERR_INTERNAL_ERROR_5;
4224
4225 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4226 if (fMsrLocked)
4227 {
4228 if (fVmxAllowed && fSmxVmxAllowed)
4229 rc = VINF_SUCCESS;
4230 else if (!fVmxAllowed && !fSmxVmxAllowed)
4231 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4232 else if (!fMaybeSmxMode)
4233 {
4234 if (fVmxAllowed)
4235 rc = VINF_SUCCESS;
4236 else
4237 rc = VERR_VMX_MSR_VMX_DISABLED;
4238 }
4239 else
4240 {
4241 /*
4242 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4243 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4244 * See @bugref{6873}.
4245 */
4246 Assert(fMaybeSmxMode == true);
4247 fIsSmxModeAmbiguous = true;
4248 rc = VINF_SUCCESS;
4249 }
4250 }
4251 else
4252 {
4253 /*
4254 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4255 * this MSR can no longer be modified.
4256 *
4257 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4258 * accurately. See @bugref{6873}.
4259 *
4260 * We need to check for SMX hardware support here, before writing the MSR as
4261 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4262 * for it.
4263 */
4264 uint32_t fFeaturesECX, uDummy;
4265#ifdef VBOX_STRICT
4266 /* Callers should have verified these at some point. */
4267 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4268 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4269 Assert(ASMIsValidStdRange(uMaxId));
4270 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4271 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4272 || ASMIsShanghaiCpuEx( uVendorEBX, uVendorECX, uVendorEDX));
4273#endif
4274 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4275 bool fSmxVmxHwSupport = false;
4276 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4277 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4278 fSmxVmxHwSupport = true;
4279
4280 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4281 | MSR_IA32_FEATURE_CONTROL_VMXON;
4282 if (fSmxVmxHwSupport)
4283 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4284
4285 /*
4286 * Commit.
4287 */
4288 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4289
4290 /*
4291 * Verify.
4292 */
4293 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4294 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4295 if (fMsrLocked)
4296 {
4297 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4298 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4299 if ( fVmxAllowed
4300 && ( !fSmxVmxHwSupport
4301 || fSmxVmxAllowed))
4302 rc = VINF_SUCCESS;
4303 else
4304 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4305 }
4306 else
4307 rc = VERR_VMX_MSR_LOCKING_FAILED;
4308 }
4309
4310 if (pfIsSmxModeAmbiguous)
4311 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4312
4313 return rc;
4314}
4315
4316
4317/**
4318 * Checks if AMD-V SVM feature is usable on this CPU.
4319 *
4320 * @returns VBox status code.
4321 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4322 *
4323 * @remarks Must be called with preemption disabled.
4324 */
4325SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4326{
4327 int rc;
4328 uint64_t fVmCr;
4329 uint64_t fEfer;
4330
4331 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4332 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4333 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4334 {
4335 rc = VINF_SUCCESS;
4336 if (fInitSvm)
4337 {
4338 /* Turn on SVM in the EFER MSR. */
4339 fEfer = ASMRdMsr(MSR_K6_EFER);
4340 if (fEfer & MSR_K6_EFER_SVME)
4341 rc = VERR_SVM_IN_USE;
4342 else
4343 {
4344 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4345
4346 /* Paranoia. */
4347 fEfer = ASMRdMsr(MSR_K6_EFER);
4348 if (fEfer & MSR_K6_EFER_SVME)
4349 {
4350 /* Restore previous value. */
4351 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4352 }
4353 else
4354 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4355 }
4356 }
4357 }
4358 else
4359 rc = VERR_SVM_DISABLED;
4360 return rc;
4361}
4362
4363
4364/**
4365 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4366 *
4367 * @returns VBox status code.
4368 * @retval VERR_VMX_NO_VMX
4369 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4370 * @retval VERR_VMX_MSR_VMX_DISABLED
4371 * @retval VERR_VMX_MSR_LOCKING_FAILED
4372 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4373 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4374 * @retval VERR_SVM_NO_SVM
4375 * @retval VERR_SVM_DISABLED
4376 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4377 * (centaur)/Shanghai CPU.
4378 *
4379 * @param pfCaps Where to store the capabilities.
4380 */
4381int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4382{
4383 int rc = VERR_UNSUPPORTED_CPU;
4384 bool fIsSmxModeAmbiguous = false;
4385 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4386
4387 /*
4388 * Input validation.
4389 */
4390 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4391 *pfCaps = 0;
4392
4393 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4394 RTThreadPreemptDisable(&PreemptState);
4395
4396 /* Check if VT-x/AMD-V is supported. */
4397 rc = SUPR0GetVTSupport(pfCaps);
4398 if (RT_SUCCESS(rc))
4399 {
4400 /* Check if VT-x is supported. */
4401 if (*pfCaps & SUPVTCAPS_VT_X)
4402 {
4403 /* Check if VT-x is usable. */
4404 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4405 if (RT_SUCCESS(rc))
4406 {
4407 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4408 VMXCTLSMSR vtCaps;
4409 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4410 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4411 {
4412 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4413 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4414 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4415 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4416 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4417 }
4418 }
4419 }
4420 /* Check if AMD-V is supported. */
4421 else if (*pfCaps & SUPVTCAPS_AMD_V)
4422 {
4423 /* Check is SVM is usable. */
4424 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4425 if (RT_SUCCESS(rc))
4426 {
4427 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4428 uint32_t uDummy, fSvmFeatures;
4429 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4430 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4431 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4432 }
4433 }
4434 }
4435
4436 /* Restore preemption. */
4437 RTThreadPreemptRestore(&PreemptState);
4438
4439 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4440 if (fIsSmxModeAmbiguous)
4441 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4442
4443 return rc;
4444}
4445
4446
4447/**
4448 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4449 *
4450 * @returns VBox status code.
4451 * @retval VERR_VMX_NO_VMX
4452 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4453 * @retval VERR_VMX_MSR_VMX_DISABLED
4454 * @retval VERR_VMX_MSR_LOCKING_FAILED
4455 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4456 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4457 * @retval VERR_SVM_NO_SVM
4458 * @retval VERR_SVM_DISABLED
4459 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4460 * (centaur)/Shanghai CPU.
4461 *
4462 * @param pSession The session handle.
4463 * @param pfCaps Where to store the capabilities.
4464 */
4465SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4466{
4467 /*
4468 * Input validation.
4469 */
4470 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4471 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4472
4473 /*
4474 * Call common worker.
4475 */
4476 return supdrvQueryVTCapsInternal(pfCaps);
4477}
4478
4479
4480/**
4481 * Queries the CPU microcode revision.
4482 *
4483 * @returns VBox status code.
4484 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4485 * readable microcode rev.
4486 *
4487 * @param puRevision Where to store the microcode revision.
4488 */
4489static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4490{
4491 int rc = VERR_UNSUPPORTED_CPU;
4492 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4493
4494 /*
4495 * Input validation.
4496 */
4497 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4498
4499 *puRevision = 0;
4500
4501 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4502 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4503 RTThreadPreemptDisable(&PreemptState);
4504
4505 if (ASMHasCpuId())
4506 {
4507 uint32_t uDummy, uTFMSEAX;
4508 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4509
4510 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4511 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4512
4513 if (ASMIsValidStdRange(uMaxId))
4514 {
4515 uint64_t uRevMsr;
4516 if (ASMIsIntelCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4517 {
4518 /* Architectural MSR available on Pentium Pro and later. */
4519 if (ASMGetCpuFamily(uTFMSEAX) >= 6)
4520 {
4521 /* Revision is in the high dword. */
4522 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4523 *puRevision = RT_HIDWORD(uRevMsr);
4524 rc = VINF_SUCCESS;
4525 }
4526 }
4527 else if (ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4528 {
4529 /* Not well documented, but at least all AMD64 CPUs support this. */
4530 if (ASMGetCpuFamily(uTFMSEAX) >= 15)
4531 {
4532 /* Revision is in the low dword. */
4533 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4534 *puRevision = RT_LODWORD(uRevMsr);
4535 rc = VINF_SUCCESS;
4536 }
4537 }
4538 }
4539 }
4540
4541 RTThreadPreemptRestore(&PreemptState);
4542
4543 return rc;
4544}
4545
4546/**
4547 * Queries the CPU microcode revision.
4548 *
4549 * @returns VBox status code.
4550 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4551 * readable microcode rev.
4552 *
4553 * @param pSession The session handle.
4554 * @param puRevision Where to store the microcode revision.
4555 */
4556SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4557{
4558 /*
4559 * Input validation.
4560 */
4561 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4562 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4563
4564 /*
4565 * Call common worker.
4566 */
4567 return supdrvQueryUcodeRev(puRevision);
4568}
4569
4570
4571/**
4572 * Gets hardware-virtualization MSRs of the calling CPU.
4573 *
4574 * @returns VBox status code.
4575 * @param pMsrs Where to store the hardware-virtualization MSRs.
4576 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4577 * to explicitly check for the presence of VT-x/AMD-V before
4578 * querying MSRs.
4579 * @param fForce Force querying of MSRs from the hardware.
4580 */
4581SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4582{
4583 int rc;
4584 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4585
4586 /*
4587 * Input validation.
4588 */
4589 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4590
4591 /*
4592 * Disable preemption so we make sure we don't migrate CPUs and because
4593 * we access global data.
4594 */
4595 RTThreadPreemptDisable(&PreemptState);
4596
4597 /*
4598 * Querying MSRs from hardware can be expensive (exponentially more so
4599 * in a nested-virtualization scenario if they happen to cause VM-exits).
4600 *
4601 * So, if the caller does not force re-querying of MSRs and we have them
4602 * already cached, simply copy the cached MSRs and we're done.
4603 */
4604 if ( !fForce
4605 && ASMAtomicReadBool(&g_fHwvirtMsrsCached))
4606 {
4607 memcpy(pMsrs, &g_HwvirtMsrs, sizeof(*pMsrs));
4608 RTThreadPreemptRestore(&PreemptState);
4609 return VINF_SUCCESS;
4610 }
4611
4612 /*
4613 * Query the MSRs from hardware, since it's either the first call since
4614 * driver load or the caller has forced re-querying of the MSRs.
4615 */
4616 RT_ZERO(*pMsrs);
4617
4618 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4619 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4620 rc = SUPR0GetVTSupport(&fCaps);
4621 else
4622 rc = VINF_SUCCESS;
4623 if (RT_SUCCESS(rc))
4624 {
4625 if (fCaps & SUPVTCAPS_VT_X)
4626 {
4627 g_HwvirtMsrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4628 g_HwvirtMsrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4629 g_HwvirtMsrs.u.vmx.u64PinCtls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4630 g_HwvirtMsrs.u.vmx.u64ProcCtls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4631 g_HwvirtMsrs.u.vmx.u64ExitCtls = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4632 g_HwvirtMsrs.u.vmx.u64EntryCtls = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4633 g_HwvirtMsrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4634 g_HwvirtMsrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4635 g_HwvirtMsrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4636 g_HwvirtMsrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4637 g_HwvirtMsrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4638 g_HwvirtMsrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4639
4640 if (RT_BF_GET(g_HwvirtMsrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4641 {
4642 g_HwvirtMsrs.u.vmx.u64TruePinCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4643 g_HwvirtMsrs.u.vmx.u64TrueProcCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4644 g_HwvirtMsrs.u.vmx.u64TrueEntryCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4645 g_HwvirtMsrs.u.vmx.u64TrueExitCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4646 }
4647
4648 uint32_t const fProcCtlsAllowed1 = RT_HI_U32(g_HwvirtMsrs.u.vmx.u64ProcCtls);
4649 if (fProcCtlsAllowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4650 {
4651 g_HwvirtMsrs.u.vmx.u64ProcCtls2 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4652
4653 uint32_t const fProcCtls2Allowed1 = RT_HI_U32(g_HwvirtMsrs.u.vmx.u64ProcCtls2);
4654 if (fProcCtls2Allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4655 g_HwvirtMsrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4656
4657 if (fProcCtls2Allowed1 & VMX_PROC_CTLS2_VMFUNC)
4658 g_HwvirtMsrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4659 }
4660 ASMAtomicWriteBool(&g_fHwvirtMsrsCached, true);
4661 }
4662 else if (fCaps & SUPVTCAPS_AMD_V)
4663 {
4664 g_HwvirtMsrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4665 ASMAtomicWriteBool(&g_fHwvirtMsrsCached, true);
4666 }
4667 else
4668 {
4669 RTThreadPreemptRestore(&PreemptState);
4670 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4671 VERR_INTERNAL_ERROR_2);
4672 }
4673
4674 /*
4675 * We have successfully populated the cache, copy the MSRs to the caller.
4676 */
4677 memcpy(pMsrs, &g_HwvirtMsrs, sizeof(*pMsrs));
4678 }
4679
4680 RTThreadPreemptRestore(&PreemptState);
4681
4682 return rc;
4683}
4684
4685
4686/**
4687 * Register a component factory with the support driver.
4688 *
4689 * This is currently restricted to kernel sessions only.
4690 *
4691 * @returns VBox status code.
4692 * @retval VINF_SUCCESS on success.
4693 * @retval VERR_NO_MEMORY if we're out of memory.
4694 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4695 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4696 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4697 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4698 *
4699 * @param pSession The SUPDRV session (must be a ring-0 session).
4700 * @param pFactory Pointer to the component factory registration structure.
4701 *
4702 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4703 */
4704SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4705{
4706 PSUPDRVFACTORYREG pNewReg;
4707 const char *psz;
4708 int rc;
4709
4710 /*
4711 * Validate parameters.
4712 */
4713 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4714 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4715 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4716 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4717 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4718 AssertReturn(psz, VERR_INVALID_PARAMETER);
4719
4720 /*
4721 * Allocate and initialize a new registration structure.
4722 */
4723 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4724 if (pNewReg)
4725 {
4726 pNewReg->pNext = NULL;
4727 pNewReg->pFactory = pFactory;
4728 pNewReg->pSession = pSession;
4729 pNewReg->cchName = psz - &pFactory->szName[0];
4730
4731 /*
4732 * Add it to the tail of the list after checking for prior registration.
4733 */
4734 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4735 if (RT_SUCCESS(rc))
4736 {
4737 PSUPDRVFACTORYREG pPrev = NULL;
4738 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4739 while (pCur && pCur->pFactory != pFactory)
4740 {
4741 pPrev = pCur;
4742 pCur = pCur->pNext;
4743 }
4744 if (!pCur)
4745 {
4746 if (pPrev)
4747 pPrev->pNext = pNewReg;
4748 else
4749 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4750 rc = VINF_SUCCESS;
4751 }
4752 else
4753 rc = VERR_ALREADY_EXISTS;
4754
4755 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4756 }
4757
4758 if (RT_FAILURE(rc))
4759 RTMemFree(pNewReg);
4760 }
4761 else
4762 rc = VERR_NO_MEMORY;
4763 return rc;
4764}
4765
4766
4767/**
4768 * Deregister a component factory.
4769 *
4770 * @returns VBox status code.
4771 * @retval VINF_SUCCESS on success.
4772 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4773 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4774 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4775 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4776 *
4777 * @param pSession The SUPDRV session (must be a ring-0 session).
4778 * @param pFactory Pointer to the component factory registration structure
4779 * previously passed SUPR0ComponentRegisterFactory().
4780 *
4781 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4782 */
4783SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4784{
4785 int rc;
4786
4787 /*
4788 * Validate parameters.
4789 */
4790 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4791 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4792 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4793
4794 /*
4795 * Take the lock and look for the registration record.
4796 */
4797 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4798 if (RT_SUCCESS(rc))
4799 {
4800 PSUPDRVFACTORYREG pPrev = NULL;
4801 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4802 while (pCur && pCur->pFactory != pFactory)
4803 {
4804 pPrev = pCur;
4805 pCur = pCur->pNext;
4806 }
4807 if (pCur)
4808 {
4809 if (!pPrev)
4810 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4811 else
4812 pPrev->pNext = pCur->pNext;
4813
4814 pCur->pNext = NULL;
4815 pCur->pFactory = NULL;
4816 pCur->pSession = NULL;
4817 rc = VINF_SUCCESS;
4818 }
4819 else
4820 rc = VERR_NOT_FOUND;
4821
4822 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4823
4824 RTMemFree(pCur);
4825 }
4826 return rc;
4827}
4828
4829
4830/**
4831 * Queries a component factory.
4832 *
4833 * @returns VBox status code.
4834 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4835 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4836 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4837 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4838 *
4839 * @param pSession The SUPDRV session.
4840 * @param pszName The name of the component factory.
4841 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4842 * @param ppvFactoryIf Where to store the factory interface.
4843 */
4844SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4845{
4846 const char *pszEnd;
4847 size_t cchName;
4848 int rc;
4849
4850 /*
4851 * Validate parameters.
4852 */
4853 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4854
4855 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4856 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4857 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4858 cchName = pszEnd - pszName;
4859
4860 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4861 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4862 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4863
4864 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4865 *ppvFactoryIf = NULL;
4866
4867 /*
4868 * Take the lock and try all factories by this name.
4869 */
4870 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4871 if (RT_SUCCESS(rc))
4872 {
4873 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4874 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4875 while (pCur)
4876 {
4877 if ( pCur->cchName == cchName
4878 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4879 {
4880 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4881 if (pvFactory)
4882 {
4883 *ppvFactoryIf = pvFactory;
4884 rc = VINF_SUCCESS;
4885 break;
4886 }
4887 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4888 }
4889
4890 /* next */
4891 pCur = pCur->pNext;
4892 }
4893
4894 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4895 }
4896 return rc;
4897}
4898
4899
4900/**
4901 * Adds a memory object to the session.
4902 *
4903 * @returns IPRT status code.
4904 * @param pMem Memory tracking structure containing the
4905 * information to track.
4906 * @param pSession The session.
4907 */
4908static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
4909{
4910 PSUPDRVBUNDLE pBundle;
4911
4912 /*
4913 * Find free entry and record the allocation.
4914 */
4915 RTSpinlockAcquire(pSession->Spinlock);
4916 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4917 {
4918 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
4919 {
4920 unsigned i;
4921 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4922 {
4923 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
4924 {
4925 pBundle->cUsed++;
4926 pBundle->aMem[i] = *pMem;
4927 RTSpinlockRelease(pSession->Spinlock);
4928 return VINF_SUCCESS;
4929 }
4930 }
4931 AssertFailed(); /* !!this can't be happening!!! */
4932 }
4933 }
4934 RTSpinlockRelease(pSession->Spinlock);
4935
4936 /*
4937 * Need to allocate a new bundle.
4938 * Insert into the last entry in the bundle.
4939 */
4940 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
4941 if (!pBundle)
4942 return VERR_NO_MEMORY;
4943
4944 /* take last entry. */
4945 pBundle->cUsed++;
4946 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
4947
4948 /* insert into list. */
4949 RTSpinlockAcquire(pSession->Spinlock);
4950 pBundle->pNext = pSession->Bundle.pNext;
4951 pSession->Bundle.pNext = pBundle;
4952 RTSpinlockRelease(pSession->Spinlock);
4953
4954 return VINF_SUCCESS;
4955}
4956
4957
4958/**
4959 * Releases a memory object referenced by pointer and type.
4960 *
4961 * @returns IPRT status code.
4962 * @param pSession Session data.
4963 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
4964 * @param eType Memory type.
4965 */
4966static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
4967{
4968 PSUPDRVBUNDLE pBundle;
4969
4970 /*
4971 * Validate input.
4972 */
4973 if (!uPtr)
4974 {
4975 Log(("Illegal address %p\n", (void *)uPtr));
4976 return VERR_INVALID_PARAMETER;
4977 }
4978
4979 /*
4980 * Search for the address.
4981 */
4982 RTSpinlockAcquire(pSession->Spinlock);
4983 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4984 {
4985 if (pBundle->cUsed > 0)
4986 {
4987 unsigned i;
4988 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4989 {
4990 if ( pBundle->aMem[i].eType == eType
4991 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
4992 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
4993 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
4994 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
4995 )
4996 {
4997 /* Make a copy of it and release it outside the spinlock. */
4998 SUPDRVMEMREF Mem = pBundle->aMem[i];
4999 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5000 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5001 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5002 RTSpinlockRelease(pSession->Spinlock);
5003
5004 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5005 {
5006 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5007 AssertRC(rc); /** @todo figure out how to handle this. */
5008 }
5009 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5010 {
5011 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5012 AssertRC(rc); /** @todo figure out how to handle this. */
5013 }
5014 return VINF_SUCCESS;
5015 }
5016 }
5017 }
5018 }
5019 RTSpinlockRelease(pSession->Spinlock);
5020 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5021 return VERR_INVALID_PARAMETER;
5022}
5023
5024
5025/**
5026 * Opens an image. If it's the first time it's opened the call must upload
5027 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5028 *
5029 * This is the 1st step of the loading.
5030 *
5031 * @returns IPRT status code.
5032 * @param pDevExt Device globals.
5033 * @param pSession Session data.
5034 * @param pReq The open request.
5035 */
5036static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5037{
5038 int rc;
5039 PSUPDRVLDRIMAGE pImage;
5040 void *pv;
5041 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5042 SUPDRV_CHECK_SMAP_SETUP();
5043 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5044 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithTabs=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithTabs));
5045
5046 /*
5047 * Check if we got an instance of the image already.
5048 */
5049 supdrvLdrLock(pDevExt);
5050 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5051 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5052 {
5053 if ( pImage->szName[cchName] == '\0'
5054 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5055 {
5056 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
5057 {
5058 /** @todo check cbImageBits and cbImageWithTabs here, if they differs that indicates that the images are different. */
5059 pImage->cUsage++;
5060 pReq->u.Out.pvImageBase = pImage->pvImage;
5061 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5062 pReq->u.Out.fNativeLoader = pImage->fNative;
5063 supdrvLdrAddUsage(pSession, pImage);
5064 supdrvLdrUnlock(pDevExt);
5065 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5066 return VINF_SUCCESS;
5067 }
5068 supdrvLdrUnlock(pDevExt);
5069 Log(("supdrvIOCtl_LdrOpen: To many existing references to '%s'!\n", pReq->u.In.szName));
5070 return VERR_INTERNAL_ERROR_3; /** @todo add VERR_TOO_MANY_REFERENCES */
5071 }
5072 }
5073 /* (not found - add it!) */
5074
5075 /* If the loader interface is locked down, make userland fail early */
5076 if (pDevExt->fLdrLockedDown)
5077 {
5078 supdrvLdrUnlock(pDevExt);
5079 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5080 return VERR_PERMISSION_DENIED;
5081 }
5082
5083 /*
5084 * Allocate memory.
5085 */
5086 Assert(cchName < sizeof(pImage->szName));
5087 pv = RTMemAlloc(sizeof(SUPDRVLDRIMAGE));
5088 if (!pv)
5089 {
5090 supdrvLdrUnlock(pDevExt);
5091 Log(("supdrvIOCtl_LdrOpen: RTMemAlloc() failed\n"));
5092 return /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_2;
5093 }
5094 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5095
5096 /*
5097 * Setup and link in the LDR stuff.
5098 */
5099 pImage = (PSUPDRVLDRIMAGE)pv;
5100 pImage->pvImage = NULL;
5101 pImage->pvImageAlloc = NULL;
5102 pImage->cbImageWithTabs = pReq->u.In.cbImageWithTabs;
5103 pImage->cbImageBits = pReq->u.In.cbImageBits;
5104 pImage->cSymbols = 0;
5105 pImage->paSymbols = NULL;
5106 pImage->pachStrTab = NULL;
5107 pImage->cbStrTab = 0;
5108 pImage->pfnModuleInit = NULL;
5109 pImage->pfnModuleTerm = NULL;
5110 pImage->pfnServiceReqHandler = NULL;
5111 pImage->uState = SUP_IOCTL_LDR_OPEN;
5112 pImage->cUsage = 1;
5113 pImage->pDevExt = pDevExt;
5114 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5115
5116 /*
5117 * Try load it using the native loader, if that isn't supported, fall back
5118 * on the older method.
5119 */
5120 pImage->fNative = true;
5121 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5122 if (rc == VERR_NOT_SUPPORTED)
5123 {
5124 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
5125 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
5126 pImage->fNative = false;
5127 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
5128 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5129 }
5130 if (RT_FAILURE(rc))
5131 {
5132 supdrvLdrUnlock(pDevExt);
5133 RTMemFree(pImage);
5134 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5135 return rc;
5136 }
5137 Assert(VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5138
5139 /*
5140 * Link it.
5141 */
5142 pImage->pNext = pDevExt->pLdrImages;
5143 pDevExt->pLdrImages = pImage;
5144
5145 supdrvLdrAddUsage(pSession, pImage);
5146
5147 pReq->u.Out.pvImageBase = pImage->pvImage;
5148 pReq->u.Out.fNeedsLoading = true;
5149 pReq->u.Out.fNativeLoader = pImage->fNative;
5150 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5151
5152 supdrvLdrUnlock(pDevExt);
5153 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5154 return VINF_SUCCESS;
5155}
5156
5157
5158/**
5159 * Worker that validates a pointer to an image entrypoint.
5160 *
5161 * @returns IPRT status code.
5162 * @param pDevExt The device globals.
5163 * @param pImage The loader image.
5164 * @param pv The pointer into the image.
5165 * @param fMayBeNull Whether it may be NULL.
5166 * @param fCheckNative Whether to check with the native loaders.
5167 * @param pszSymbol The entrypoint name or log name. If the symbol
5168 * capitalized it signifies a specific symbol, otherwise it
5169 * for logging.
5170 * @param pbImageBits The image bits prepared by ring-3.
5171 *
5172 * @remarks Will leave the lock on failure.
5173 */
5174static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5175 bool fCheckNative, const uint8_t *pbImageBits, const char *pszSymbol)
5176{
5177 if (!fMayBeNull || pv)
5178 {
5179 if ((uintptr_t)pv - (uintptr_t)pImage->pvImage >= pImage->cbImageBits)
5180 {
5181 supdrvLdrUnlock(pDevExt);
5182 Log(("Out of range (%p LB %#x): %s=%p\n", pImage->pvImage, pImage->cbImageBits, pszSymbol, pv));
5183 return VERR_INVALID_PARAMETER;
5184 }
5185
5186 if (pImage->fNative && fCheckNative)
5187 {
5188 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5189 if (RT_FAILURE(rc))
5190 {
5191 supdrvLdrUnlock(pDevExt);
5192 Log(("Bad entry point address: %s=%p (rc=%Rrc)\n", pszSymbol, pv, rc));
5193 return rc;
5194 }
5195 }
5196 }
5197 return VINF_SUCCESS;
5198}
5199
5200
5201/**
5202 * Formats a load error message.
5203 *
5204 * @returns @a rc
5205 * @param rc Return code.
5206 * @param pReq The request.
5207 * @param pszFormat The error message format string.
5208 * @param ... Argument to the format string.
5209 */
5210int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5211{
5212 va_list va;
5213 va_start(va, pszFormat);
5214 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5215 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5216 va_end(va);
5217 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5218 return rc;
5219}
5220
5221
5222/**
5223 * Loads the image bits.
5224 *
5225 * This is the 2nd step of the loading.
5226 *
5227 * @returns IPRT status code.
5228 * @param pDevExt Device globals.
5229 * @param pSession Session data.
5230 * @param pReq The request.
5231 */
5232static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5233{
5234 PSUPDRVLDRUSAGE pUsage;
5235 PSUPDRVLDRIMAGE pImage;
5236 int rc;
5237 SUPDRV_CHECK_SMAP_SETUP();
5238 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithBits=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithTabs));
5239 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5240
5241 /*
5242 * Find the ldr image.
5243 */
5244 supdrvLdrLock(pDevExt);
5245 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5246
5247 pUsage = pSession->pLdrUsage;
5248 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5249 pUsage = pUsage->pNext;
5250 if (!pUsage)
5251 {
5252 supdrvLdrUnlock(pDevExt);
5253 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5254 }
5255 pImage = pUsage->pImage;
5256
5257 /*
5258 * Validate input.
5259 */
5260 if ( pImage->cbImageWithTabs != pReq->u.In.cbImageWithTabs
5261 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5262 {
5263 supdrvLdrUnlock(pDevExt);
5264 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %d(prep) != %d(load) or %d != %d",
5265 pImage->cbImageWithTabs, pReq->u.In.cbImageWithTabs, pImage->cbImageBits, pReq->u.In.cbImageBits);
5266 }
5267
5268 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5269 {
5270 unsigned uState = pImage->uState;
5271 supdrvLdrUnlock(pDevExt);
5272 if (uState != SUP_IOCTL_LDR_LOAD)
5273 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5274 pReq->u.Out.uErrorMagic = 0;
5275 return VERR_ALREADY_LOADED;
5276 }
5277
5278 /* If the loader interface is locked down, don't load new images */
5279 if (pDevExt->fLdrLockedDown)
5280 {
5281 supdrvLdrUnlock(pDevExt);
5282 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5283 }
5284
5285 switch (pReq->u.In.eEPType)
5286 {
5287 case SUPLDRLOADEP_NOTHING:
5288 break;
5289
5290 case SUPLDRLOADEP_VMMR0:
5291 rc = supdrvLdrValidatePointer( pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0, false, false, pReq->u.In.abImage, "pvVMMR0");
5292 if (RT_SUCCESS(rc))
5293 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, true, pReq->u.In.abImage, "VMMR0EntryFast");
5294 if (RT_SUCCESS(rc))
5295 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, true, pReq->u.In.abImage, "VMMR0EntryEx");
5296 if (RT_FAILURE(rc))
5297 return supdrvLdrLoadError(rc, pReq, "Invalid VMMR0 pointer");
5298 break;
5299
5300 case SUPLDRLOADEP_SERVICE:
5301 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, true, pReq->u.In.abImage, "pfnServiceReq");
5302 if (RT_FAILURE(rc))
5303 return supdrvLdrLoadError(rc, pReq, "Invalid pfnServiceReq pointer: %p", pReq->u.In.EP.Service.pfnServiceReq);
5304 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5305 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5306 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5307 {
5308 supdrvLdrUnlock(pDevExt);
5309 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5310 "Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!",
5311 pImage->pvImage, pReq->u.In.cbImageWithTabs,
5312 pReq->u.In.EP.Service.apvReserved[0],
5313 pReq->u.In.EP.Service.apvReserved[1],
5314 pReq->u.In.EP.Service.apvReserved[2]);
5315 }
5316 break;
5317
5318 default:
5319 supdrvLdrUnlock(pDevExt);
5320 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5321 }
5322
5323 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, true, pReq->u.In.abImage, "ModuleInit");
5324 if (RT_FAILURE(rc))
5325 return supdrvLdrLoadError(rc, pReq, "Invalid pfnModuleInit pointer: %p", pReq->u.In.pfnModuleInit);
5326 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, true, pReq->u.In.abImage, "ModuleTerm");
5327 if (RT_FAILURE(rc))
5328 return supdrvLdrLoadError(rc, pReq, "Invalid pfnModuleTerm pointer: %p", pReq->u.In.pfnModuleTerm);
5329 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5330
5331 /*
5332 * Allocate and copy the tables if non-native.
5333 * (No need to do try/except as this is a buffered request.)
5334 */
5335 if (!pImage->fNative)
5336 {
5337 pImage->cbStrTab = pReq->u.In.cbStrTab;
5338 if (pImage->cbStrTab)
5339 {
5340 pImage->pachStrTab = (char *)RTMemAlloc(pImage->cbStrTab);
5341 if (pImage->pachStrTab)
5342 memcpy(pImage->pachStrTab, &pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5343 else
5344 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5345 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5346 }
5347
5348 pImage->cSymbols = pReq->u.In.cSymbols;
5349 if (RT_SUCCESS(rc) && pImage->cSymbols)
5350 {
5351 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5352 pImage->paSymbols = (PSUPLDRSYM)RTMemAlloc(cbSymbols);
5353 if (pImage->paSymbols)
5354 memcpy(pImage->paSymbols, &pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
5355 else
5356 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5357 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5358 }
5359 }
5360
5361 /*
5362 * Copy the bits / complete native loading.
5363 */
5364 if (RT_SUCCESS(rc))
5365 {
5366 pImage->uState = SUP_IOCTL_LDR_LOAD;
5367 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5368 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5369
5370 if (pImage->fNative)
5371 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5372 else
5373 {
5374 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5375 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5376 }
5377 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5378 }
5379
5380 /*
5381 * Update any entry points.
5382 */
5383 if (RT_SUCCESS(rc))
5384 {
5385 switch (pReq->u.In.eEPType)
5386 {
5387 default:
5388 case SUPLDRLOADEP_NOTHING:
5389 rc = VINF_SUCCESS;
5390 break;
5391 case SUPLDRLOADEP_VMMR0:
5392 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0,
5393 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5394 break;
5395 case SUPLDRLOADEP_SERVICE:
5396 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5397 rc = VINF_SUCCESS;
5398 break;
5399 }
5400 }
5401
5402 /*
5403 * On success call the module initialization.
5404 */
5405 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5406 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5407 {
5408 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5409 pDevExt->pLdrInitImage = pImage;
5410 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5411 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5412 rc = pImage->pfnModuleInit(pImage);
5413 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5414 pDevExt->pLdrInitImage = NULL;
5415 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5416 if (RT_FAILURE(rc))
5417 {
5418 if (pDevExt->pvVMMR0 == pImage->pvImage)
5419 supdrvLdrUnsetVMMR0EPs(pDevExt);
5420 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5421 }
5422 }
5423 if (RT_SUCCESS(rc))
5424 {
5425 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5426 pReq->u.Out.uErrorMagic = 0;
5427 pReq->u.Out.szError[0] = '\0';
5428 }
5429 else
5430 {
5431 /* Inform the tracing component in case ModuleInit registered TPs. */
5432 supdrvTracerModuleUnloading(pDevExt, pImage);
5433
5434 pImage->uState = SUP_IOCTL_LDR_OPEN;
5435 pImage->pfnModuleInit = NULL;
5436 pImage->pfnModuleTerm = NULL;
5437 pImage->pfnServiceReqHandler= NULL;
5438 pImage->cbStrTab = 0;
5439 RTMemFree(pImage->pachStrTab);
5440 pImage->pachStrTab = NULL;
5441 RTMemFree(pImage->paSymbols);
5442 pImage->paSymbols = NULL;
5443 pImage->cSymbols = 0;
5444 }
5445
5446 supdrvLdrUnlock(pDevExt);
5447 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5448 return rc;
5449}
5450
5451
5452/**
5453 * Frees a previously loaded (prep'ed) image.
5454 *
5455 * @returns IPRT status code.
5456 * @param pDevExt Device globals.
5457 * @param pSession Session data.
5458 * @param pReq The request.
5459 */
5460static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
5461{
5462 int rc;
5463 PSUPDRVLDRUSAGE pUsagePrev;
5464 PSUPDRVLDRUSAGE pUsage;
5465 PSUPDRVLDRIMAGE pImage;
5466 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
5467
5468 /*
5469 * Find the ldr image.
5470 */
5471 supdrvLdrLock(pDevExt);
5472 pUsagePrev = NULL;
5473 pUsage = pSession->pLdrUsage;
5474 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5475 {
5476 pUsagePrev = pUsage;
5477 pUsage = pUsage->pNext;
5478 }
5479 if (!pUsage)
5480 {
5481 supdrvLdrUnlock(pDevExt);
5482 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
5483 return VERR_INVALID_HANDLE;
5484 }
5485
5486 /*
5487 * Check if we can remove anything.
5488 */
5489 rc = VINF_SUCCESS;
5490 pImage = pUsage->pImage;
5491 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
5492 {
5493 /*
5494 * Check if there are any objects with destructors in the image, if
5495 * so leave it for the session cleanup routine so we get a chance to
5496 * clean things up in the right order and not leave them all dangling.
5497 */
5498 RTSpinlockAcquire(pDevExt->Spinlock);
5499 if (pImage->cUsage <= 1)
5500 {
5501 PSUPDRVOBJ pObj;
5502 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5503 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5504 {
5505 rc = VERR_DANGLING_OBJECTS;
5506 break;
5507 }
5508 }
5509 else
5510 {
5511 PSUPDRVUSAGE pGenUsage;
5512 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5513 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5514 {
5515 rc = VERR_DANGLING_OBJECTS;
5516 break;
5517 }
5518 }
5519 RTSpinlockRelease(pDevExt->Spinlock);
5520 if (rc == VINF_SUCCESS)
5521 {
5522 /* unlink it */
5523 if (pUsagePrev)
5524 pUsagePrev->pNext = pUsage->pNext;
5525 else
5526 pSession->pLdrUsage = pUsage->pNext;
5527
5528 /* free it */
5529 pUsage->pImage = NULL;
5530 pUsage->pNext = NULL;
5531 RTMemFree(pUsage);
5532
5533 /*
5534 * Dereference the image.
5535 */
5536 if (pImage->cUsage <= 1)
5537 supdrvLdrFree(pDevExt, pImage);
5538 else
5539 pImage->cUsage--;
5540 }
5541 else
5542 {
5543 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
5544 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
5545 }
5546 }
5547 else
5548 {
5549 /*
5550 * Dereference both image and usage.
5551 */
5552 pImage->cUsage--;
5553 pUsage->cUsage--;
5554 }
5555
5556 supdrvLdrUnlock(pDevExt);
5557 return rc;
5558}
5559
5560
5561/**
5562 * Lock down the image loader interface.
5563 *
5564 * @returns IPRT status code.
5565 * @param pDevExt Device globals.
5566 */
5567static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
5568{
5569 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
5570
5571 supdrvLdrLock(pDevExt);
5572 if (!pDevExt->fLdrLockedDown)
5573 {
5574 pDevExt->fLdrLockedDown = true;
5575 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
5576 }
5577 supdrvLdrUnlock(pDevExt);
5578
5579 return VINF_SUCCESS;
5580}
5581
5582
5583/**
5584 * Queries the address of a symbol in an open image.
5585 *
5586 * @returns IPRT status code.
5587 * @param pDevExt Device globals.
5588 * @param pSession Session data.
5589 * @param pReq The request buffer.
5590 */
5591static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
5592{
5593 PSUPDRVLDRIMAGE pImage;
5594 PSUPDRVLDRUSAGE pUsage;
5595 uint32_t i;
5596 PSUPLDRSYM paSyms;
5597 const char *pchStrings;
5598 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
5599 void *pvSymbol = NULL;
5600 int rc = VERR_SYMBOL_NOT_FOUND;
5601 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
5602
5603 /*
5604 * Find the ldr image.
5605 */
5606 supdrvLdrLock(pDevExt);
5607 pUsage = pSession->pLdrUsage;
5608 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5609 pUsage = pUsage->pNext;
5610 if (!pUsage)
5611 {
5612 supdrvLdrUnlock(pDevExt);
5613 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
5614 return VERR_INVALID_HANDLE;
5615 }
5616 pImage = pUsage->pImage;
5617 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
5618 {
5619 unsigned uState = pImage->uState;
5620 supdrvLdrUnlock(pDevExt);
5621 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
5622 return VERR_ALREADY_LOADED;
5623 }
5624
5625 /*
5626 * Search the image exports / symbol strings.
5627 *
5628 * Note! The int32_t is for native loading on solaris where the data
5629 * and text segments are in very different places.
5630 */
5631 if (pImage->fNative)
5632 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pReq->u.In.szSymbol, cbSymbol - 1, &pvSymbol);
5633 else
5634 {
5635 pchStrings = pImage->pachStrTab;
5636 paSyms = pImage->paSymbols;
5637 for (i = 0; i < pImage->cSymbols; i++)
5638 {
5639 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5640 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
5641 {
5642 pvSymbol = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
5643 rc = VINF_SUCCESS;
5644 break;
5645 }
5646 }
5647 }
5648 supdrvLdrUnlock(pDevExt);
5649 pReq->u.Out.pvSymbol = pvSymbol;
5650 return rc;
5651}
5652
5653
5654/**
5655 * Gets the address of a symbol in an open image or the support driver.
5656 *
5657 * @returns VINF_SUCCESS on success.
5658 * @returns
5659 * @param pDevExt Device globals.
5660 * @param pSession Session data.
5661 * @param pReq The request buffer.
5662 */
5663static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
5664{
5665 int rc = VINF_SUCCESS;
5666 const char *pszSymbol = pReq->u.In.pszSymbol;
5667 const char *pszModule = pReq->u.In.pszModule;
5668 size_t cbSymbol;
5669 char const *pszEnd;
5670 uint32_t i;
5671
5672 /*
5673 * Input validation.
5674 */
5675 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
5676 pszEnd = RTStrEnd(pszSymbol, 512);
5677 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5678 cbSymbol = pszEnd - pszSymbol + 1;
5679
5680 if (pszModule)
5681 {
5682 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
5683 pszEnd = RTStrEnd(pszModule, 64);
5684 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5685 }
5686 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
5687
5688
5689 if ( !pszModule
5690 || !strcmp(pszModule, "SupDrv"))
5691 {
5692 /*
5693 * Search the support driver export table.
5694 */
5695 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5696 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5697 {
5698 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
5699 break;
5700 }
5701 }
5702 else
5703 {
5704 /*
5705 * Find the loader image.
5706 */
5707 PSUPDRVLDRIMAGE pImage;
5708
5709 supdrvLdrLock(pDevExt);
5710
5711 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5712 if (!strcmp(pImage->szName, pszModule))
5713 break;
5714 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
5715 {
5716 /*
5717 * Search the image exports / symbol strings.
5718 */
5719 if (pImage->fNative)
5720 {
5721 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cbSymbol - 1, (void **)&pReq->u.Out.pfnSymbol);
5722 if (RT_SUCCESS(rc))
5723 rc = supdrvLdrAddUsage(pSession, pImage);
5724 }
5725 else
5726 {
5727 const char *pchStrings = pImage->pachStrTab;
5728 PCSUPLDRSYM paSyms = pImage->paSymbols;
5729 rc = VERR_SYMBOL_NOT_FOUND;
5730 for (i = 0; i < pImage->cSymbols; i++)
5731 {
5732 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5733 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
5734 {
5735 /*
5736 * Found it! Calc the symbol address and add a reference to the module.
5737 */
5738 pReq->u.Out.pfnSymbol = (PFNRT)((uintptr_t)pImage->pvImage + (int32_t)paSyms[i].offSymbol);
5739 rc = supdrvLdrAddUsage(pSession, pImage);
5740 break;
5741 }
5742 }
5743 }
5744 }
5745 else
5746 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
5747
5748 supdrvLdrUnlock(pDevExt);
5749 }
5750 return rc;
5751}
5752
5753
5754/**
5755 * Looks up a symbol in g_aFunctions
5756 *
5757 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
5758 * @param pszSymbol The symbol to look up.
5759 * @param puValue Where to return the value.
5760 */
5761int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
5762{
5763 uint32_t i;
5764 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5765 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5766 {
5767 *puValue = (uintptr_t)g_aFunctions[i].pfn;
5768 return VINF_SUCCESS;
5769 }
5770
5771 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
5772 {
5773 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
5774 return VINF_SUCCESS;
5775 }
5776
5777 return VERR_SYMBOL_NOT_FOUND;
5778}
5779
5780
5781/**
5782 * Updates the VMMR0 entry point pointers.
5783 *
5784 * @returns IPRT status code.
5785 * @param pDevExt Device globals.
5786 * @param pvVMMR0 VMMR0 image handle.
5787 * @param pvVMMR0EntryFast VMMR0EntryFast address.
5788 * @param pvVMMR0EntryEx VMMR0EntryEx address.
5789 * @remark Caller must own the loader mutex.
5790 */
5791static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
5792{
5793 int rc = VINF_SUCCESS;
5794 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryFast=%p\n", pvVMMR0, pvVMMR0EntryFast));
5795
5796
5797 /*
5798 * Check if not yet set.
5799 */
5800 if (!pDevExt->pvVMMR0)
5801 {
5802 pDevExt->pvVMMR0 = pvVMMR0;
5803 *(void **)&pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
5804 *(void **)&pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
5805 ASMCompilerBarrier(); /* the above isn't nice, so be careful... */
5806 }
5807 else
5808 {
5809 /*
5810 * Return failure or success depending on whether the values match or not.
5811 */
5812 if ( pDevExt->pvVMMR0 != pvVMMR0
5813 || (uintptr_t)pDevExt->pfnVMMR0EntryFast != (uintptr_t)pvVMMR0EntryFast
5814 || (uintptr_t)pDevExt->pfnVMMR0EntryEx != (uintptr_t)pvVMMR0EntryEx)
5815 {
5816 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
5817 rc = VERR_INVALID_PARAMETER;
5818 }
5819 }
5820 return rc;
5821}
5822
5823
5824/**
5825 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
5826 *
5827 * @param pDevExt Device globals.
5828 */
5829static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
5830{
5831 pDevExt->pvVMMR0 = NULL;
5832 pDevExt->pfnVMMR0EntryFast = NULL;
5833 pDevExt->pfnVMMR0EntryEx = NULL;
5834}
5835
5836
5837/**
5838 * Adds a usage reference in the specified session of an image.
5839 *
5840 * Called while owning the loader semaphore.
5841 *
5842 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
5843 * @param pSession Session in question.
5844 * @param pImage Image which the session is using.
5845 */
5846static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
5847{
5848 PSUPDRVLDRUSAGE pUsage;
5849 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
5850
5851 /*
5852 * Referenced it already?
5853 */
5854 pUsage = pSession->pLdrUsage;
5855 while (pUsage)
5856 {
5857 if (pUsage->pImage == pImage)
5858 {
5859 pUsage->cUsage++;
5860 return VINF_SUCCESS;
5861 }
5862 pUsage = pUsage->pNext;
5863 }
5864
5865 /*
5866 * Allocate new usage record.
5867 */
5868 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
5869 AssertReturn(pUsage, /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_5);
5870 pUsage->cUsage = 1;
5871 pUsage->pImage = pImage;
5872 pUsage->pNext = pSession->pLdrUsage;
5873 pSession->pLdrUsage = pUsage;
5874 return VINF_SUCCESS;
5875}
5876
5877
5878/**
5879 * Frees a load image.
5880 *
5881 * @param pDevExt Pointer to device extension.
5882 * @param pImage Pointer to the image we're gonna free.
5883 * This image must exit!
5884 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
5885 */
5886static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
5887{
5888 PSUPDRVLDRIMAGE pImagePrev;
5889 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
5890
5891 /*
5892 * Warn if we're releasing images while the image loader interface is
5893 * locked down -- we won't be able to reload them!
5894 */
5895 if (pDevExt->fLdrLockedDown)
5896 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
5897
5898 /* find it - arg. should've used doubly linked list. */
5899 Assert(pDevExt->pLdrImages);
5900 pImagePrev = NULL;
5901 if (pDevExt->pLdrImages != pImage)
5902 {
5903 pImagePrev = pDevExt->pLdrImages;
5904 while (pImagePrev->pNext != pImage)
5905 pImagePrev = pImagePrev->pNext;
5906 Assert(pImagePrev->pNext == pImage);
5907 }
5908
5909 /* unlink */
5910 if (pImagePrev)
5911 pImagePrev->pNext = pImage->pNext;
5912 else
5913 pDevExt->pLdrImages = pImage->pNext;
5914
5915 /* check if this is VMMR0.r0 unset its entry point pointers. */
5916 if (pDevExt->pvVMMR0 == pImage->pvImage)
5917 supdrvLdrUnsetVMMR0EPs(pDevExt);
5918
5919 /* check for objects with destructors in this image. (Shouldn't happen.) */
5920 if (pDevExt->pObjs)
5921 {
5922 unsigned cObjs = 0;
5923 PSUPDRVOBJ pObj;
5924 RTSpinlockAcquire(pDevExt->Spinlock);
5925 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5926 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5927 {
5928 pObj->pfnDestructor = NULL;
5929 cObjs++;
5930 }
5931 RTSpinlockRelease(pDevExt->Spinlock);
5932 if (cObjs)
5933 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
5934 }
5935
5936 /* call termination function if fully loaded. */
5937 if ( pImage->pfnModuleTerm
5938 && pImage->uState == SUP_IOCTL_LDR_LOAD)
5939 {
5940 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
5941 pImage->pfnModuleTerm(pImage);
5942 }
5943
5944 /* Inform the tracing component. */
5945 supdrvTracerModuleUnloading(pDevExt, pImage);
5946
5947 /* Do native unload if appropriate, then inform the native code about the
5948 unloading (mainly for non-native loading case). */
5949 if (pImage->fNative)
5950 supdrvOSLdrUnload(pDevExt, pImage);
5951 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
5952
5953 /* free the image */
5954 pImage->cUsage = 0;
5955 pImage->pDevExt = NULL;
5956 pImage->pNext = NULL;
5957 pImage->uState = SUP_IOCTL_LDR_FREE;
5958 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
5959 pImage->pvImageAlloc = NULL;
5960 RTMemFree(pImage->pachStrTab);
5961 pImage->pachStrTab = NULL;
5962 RTMemFree(pImage->paSymbols);
5963 pImage->paSymbols = NULL;
5964 RTMemFree(pImage);
5965}
5966
5967
5968/**
5969 * Acquires the loader lock.
5970 *
5971 * @returns IPRT status code.
5972 * @param pDevExt The device extension.
5973 */
5974DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
5975{
5976#ifdef SUPDRV_USE_MUTEX_FOR_LDR
5977 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
5978#else
5979 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
5980#endif
5981 AssertRC(rc);
5982 return rc;
5983}
5984
5985
5986/**
5987 * Releases the loader lock.
5988 *
5989 * @returns IPRT status code.
5990 * @param pDevExt The device extension.
5991 */
5992DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
5993{
5994#ifdef SUPDRV_USE_MUTEX_FOR_LDR
5995 return RTSemMutexRelease(pDevExt->mtxLdr);
5996#else
5997 return RTSemFastMutexRelease(pDevExt->mtxLdr);
5998#endif
5999}
6000
6001
6002/**
6003 * Implements the service call request.
6004 *
6005 * @returns VBox status code.
6006 * @param pDevExt The device extension.
6007 * @param pSession The calling session.
6008 * @param pReq The request packet, valid.
6009 */
6010static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6011{
6012#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
6013 int rc;
6014
6015 /*
6016 * Find the module first in the module referenced by the calling session.
6017 */
6018 rc = supdrvLdrLock(pDevExt);
6019 if (RT_SUCCESS(rc))
6020 {
6021 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
6022 PSUPDRVLDRUSAGE pUsage;
6023
6024 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6025 if ( pUsage->pImage->pfnServiceReqHandler
6026 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
6027 {
6028 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
6029 break;
6030 }
6031 supdrvLdrUnlock(pDevExt);
6032
6033 if (pfnServiceReqHandler)
6034 {
6035 /*
6036 * Call it.
6037 */
6038 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
6039 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
6040 else
6041 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
6042 }
6043 else
6044 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
6045 }
6046
6047 /* log it */
6048 if ( RT_FAILURE(rc)
6049 && rc != VERR_INTERRUPTED
6050 && rc != VERR_TIMEOUT)
6051 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6052 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6053 else
6054 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6055 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6056 return rc;
6057#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6058 RT_NOREF3(pDevExt, pSession, pReq);
6059 return VERR_NOT_IMPLEMENTED;
6060#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6061}
6062
6063
6064/**
6065 * Implements the logger settings request.
6066 *
6067 * @returns VBox status code.
6068 * @param pReq The request.
6069 */
6070static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
6071{
6072 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
6073 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
6074 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
6075 PRTLOGGER pLogger = NULL;
6076 int rc;
6077
6078 /*
6079 * Some further validation.
6080 */
6081 switch (pReq->u.In.fWhat)
6082 {
6083 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6084 case SUPLOGGERSETTINGS_WHAT_CREATE:
6085 break;
6086
6087 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6088 if (*pszGroup || *pszFlags || *pszDest)
6089 return VERR_INVALID_PARAMETER;
6090 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
6091 return VERR_ACCESS_DENIED;
6092 break;
6093
6094 default:
6095 return VERR_INTERNAL_ERROR;
6096 }
6097
6098 /*
6099 * Get the logger.
6100 */
6101 switch (pReq->u.In.fWhich)
6102 {
6103 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6104 pLogger = RTLogGetDefaultInstance();
6105 break;
6106
6107 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6108 pLogger = RTLogRelGetDefaultInstance();
6109 break;
6110
6111 default:
6112 return VERR_INTERNAL_ERROR;
6113 }
6114
6115 /*
6116 * Do the job.
6117 */
6118 switch (pReq->u.In.fWhat)
6119 {
6120 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6121 if (pLogger)
6122 {
6123 rc = RTLogFlags(pLogger, pszFlags);
6124 if (RT_SUCCESS(rc))
6125 rc = RTLogGroupSettings(pLogger, pszGroup);
6126 NOREF(pszDest);
6127 }
6128 else
6129 rc = VERR_NOT_FOUND;
6130 break;
6131
6132 case SUPLOGGERSETTINGS_WHAT_CREATE:
6133 {
6134 if (pLogger)
6135 rc = VERR_ALREADY_EXISTS;
6136 else
6137 {
6138 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
6139
6140 rc = RTLogCreate(&pLogger,
6141 0 /* fFlags */,
6142 pszGroup,
6143 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
6144 ? "VBOX_LOG"
6145 : "VBOX_RELEASE_LOG",
6146 RT_ELEMENTS(s_apszGroups),
6147 s_apszGroups,
6148 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
6149 NULL);
6150 if (RT_SUCCESS(rc))
6151 {
6152 rc = RTLogFlags(pLogger, pszFlags);
6153 NOREF(pszDest);
6154 if (RT_SUCCESS(rc))
6155 {
6156 switch (pReq->u.In.fWhich)
6157 {
6158 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6159 pLogger = RTLogSetDefaultInstance(pLogger);
6160 break;
6161 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6162 pLogger = RTLogRelSetDefaultInstance(pLogger);
6163 break;
6164 }
6165 }
6166 RTLogDestroy(pLogger);
6167 }
6168 }
6169 break;
6170 }
6171
6172 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6173 switch (pReq->u.In.fWhich)
6174 {
6175 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6176 pLogger = RTLogSetDefaultInstance(NULL);
6177 break;
6178 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6179 pLogger = RTLogRelSetDefaultInstance(NULL);
6180 break;
6181 }
6182 rc = RTLogDestroy(pLogger);
6183 break;
6184
6185 default:
6186 {
6187 rc = VERR_INTERNAL_ERROR;
6188 break;
6189 }
6190 }
6191
6192 return rc;
6193}
6194
6195
6196/**
6197 * Implements the MSR prober operations.
6198 *
6199 * @returns VBox status code.
6200 * @param pDevExt The device extension.
6201 * @param pReq The request.
6202 */
6203static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
6204{
6205#ifdef SUPDRV_WITH_MSR_PROBER
6206 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
6207 int rc;
6208
6209 switch (pReq->u.In.enmOp)
6210 {
6211 case SUPMSRPROBEROP_READ:
6212 {
6213 uint64_t uValue;
6214 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
6215 if (RT_SUCCESS(rc))
6216 {
6217 pReq->u.Out.uResults.Read.uValue = uValue;
6218 pReq->u.Out.uResults.Read.fGp = false;
6219 }
6220 else if (rc == VERR_ACCESS_DENIED)
6221 {
6222 pReq->u.Out.uResults.Read.uValue = 0;
6223 pReq->u.Out.uResults.Read.fGp = true;
6224 rc = VINF_SUCCESS;
6225 }
6226 break;
6227 }
6228
6229 case SUPMSRPROBEROP_WRITE:
6230 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
6231 if (RT_SUCCESS(rc))
6232 pReq->u.Out.uResults.Write.fGp = false;
6233 else if (rc == VERR_ACCESS_DENIED)
6234 {
6235 pReq->u.Out.uResults.Write.fGp = true;
6236 rc = VINF_SUCCESS;
6237 }
6238 break;
6239
6240 case SUPMSRPROBEROP_MODIFY:
6241 case SUPMSRPROBEROP_MODIFY_FASTER:
6242 rc = supdrvOSMsrProberModify(idCpu, pReq);
6243 break;
6244
6245 default:
6246 return VERR_INVALID_FUNCTION;
6247 }
6248 RT_NOREF1(pDevExt);
6249 return rc;
6250#else
6251 RT_NOREF2(pDevExt, pReq);
6252 return VERR_NOT_IMPLEMENTED;
6253#endif
6254}
6255
6256
6257/**
6258 * Resume built-in keyboard on MacBook Air and Pro hosts.
6259 * If there is no built-in keyboard device, return success anyway.
6260 *
6261 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
6262 */
6263static int supdrvIOCtl_ResumeSuspendedKbds(void)
6264{
6265#if defined(RT_OS_DARWIN)
6266 return supdrvDarwinResumeSuspendedKbds();
6267#else
6268 return VERR_NOT_IMPLEMENTED;
6269#endif
6270}
6271
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette