VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 86719

Last change on this file since 86719 was 86512, checked in by vboxsync, 4 years ago

SUP: Major support driver interface version bump. Trace VMMR0.r0 dependants. bugref:9841

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 258.0 KB
Line 
1/* $Id: SUPDrv.cpp 86512 2020-10-10 11:20:58Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_vmx.h>
68
69#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
70# include "dtrace/SUPDrv.h"
71#else
72# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
73# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
74# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
75# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
76#endif
77
78/*
79 * Logging assignments:
80 * Log - useful stuff, like failures.
81 * LogFlow - program flow, except the really noisy bits.
82 * Log2 - Cleanup.
83 * Log3 - Loader flow noise.
84 * Log4 - Call VMMR0 flow noise.
85 * Log5 - Native yet-to-be-defined noise.
86 * Log6 - Native ioctl flow noise.
87 *
88 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
89 * instantiation in log-vbox.c(pp).
90 */
91
92
93/*********************************************************************************************************************************
94* Defined Constants And Macros *
95*********************************************************************************************************************************/
96/** @def VBOX_SVN_REV
97 * The makefile should define this if it can. */
98#ifndef VBOX_SVN_REV
99# define VBOX_SVN_REV 0
100#endif
101
102/** @ SUPDRV_CHECK_SMAP_SETUP
103 * SMAP check setup. */
104/** @def SUPDRV_CHECK_SMAP_CHECK
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
106 * will be logged and @a a_BadExpr is executed. */
107#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
108# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
109# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
110 do { \
111 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
112 { \
113 RTCCUINTREG fEfl = ASMGetFlags(); \
114 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
115 { /* likely */ } \
116 else \
117 { \
118 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
119 a_BadExpr; \
120 } \
121 } \
122 } while (0)
123#else
124# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
125# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
133static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
134static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
135static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
136static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
137static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
138static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
139static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
140static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
141static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
142static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
143static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
144static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
145static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
146DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
147DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
148static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
149static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
150static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
151static int supdrvIOCtl_ResumeSuspendedKbds(void);
152
153
154/*********************************************************************************************************************************
155* Global Variables *
156*********************************************************************************************************************************/
157/**
158 * Array of the R0 SUP API.
159 *
160 * While making changes to these exports, make sure to update the IOC
161 * minor version (SUPDRV_IOC_VERSION).
162 *
163 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
164 * produce definition files from which import libraries are generated.
165 * Take care when commenting things and especially with \#ifdef'ing.
166 */
167static SUPFUNC g_aFunctions[] =
168{
169/* SED: START */
170 /* name function */
171 /* Entries with absolute addresses determined at runtime, fixup
172 code makes ugly ASSUMPTIONS about the order here: */
173 { "SUPR0AbsIs64bit", (void *)0 },
174 { "SUPR0Abs64bitKernelCS", (void *)0 },
175 { "SUPR0Abs64bitKernelSS", (void *)0 },
176 { "SUPR0Abs64bitKernelDS", (void *)0 },
177 { "SUPR0AbsKernelCS", (void *)0 },
178 { "SUPR0AbsKernelSS", (void *)0 },
179 { "SUPR0AbsKernelDS", (void *)0 },
180 { "SUPR0AbsKernelES", (void *)0 },
181 { "SUPR0AbsKernelFS", (void *)0 },
182 { "SUPR0AbsKernelGS", (void *)0 },
183 /* Normal function pointers: */
184 { "g_pSUPGlobalInfoPage", (void *)&g_pSUPGlobalInfoPage }, /* SED: DATA */
185 { "SUPGetGIP", (void *)(uintptr_t)SUPGetGIP },
186 { "SUPReadTscWithDelta", (void *)(uintptr_t)SUPReadTscWithDelta },
187 { "SUPGetTscDeltaSlow", (void *)(uintptr_t)SUPGetTscDeltaSlow },
188 { "SUPGetCpuHzFromGipForAsyncMode", (void *)(uintptr_t)SUPGetCpuHzFromGipForAsyncMode },
189 { "SUPIsTscFreqCompatible", (void *)(uintptr_t)SUPIsTscFreqCompatible },
190 { "SUPIsTscFreqCompatibleEx", (void *)(uintptr_t)SUPIsTscFreqCompatibleEx },
191 { "SUPR0BadContext", (void *)(uintptr_t)SUPR0BadContext },
192 { "SUPR0ComponentDeregisterFactory", (void *)(uintptr_t)SUPR0ComponentDeregisterFactory },
193 { "SUPR0ComponentQueryFactory", (void *)(uintptr_t)SUPR0ComponentQueryFactory },
194 { "SUPR0ComponentRegisterFactory", (void *)(uintptr_t)SUPR0ComponentRegisterFactory },
195 { "SUPR0ContAlloc", (void *)(uintptr_t)SUPR0ContAlloc },
196 { "SUPR0ContFree", (void *)(uintptr_t)SUPR0ContFree },
197 { "SUPR0ChangeCR4", (void *)(uintptr_t)SUPR0ChangeCR4 },
198 { "SUPR0EnableVTx", (void *)(uintptr_t)SUPR0EnableVTx },
199 { "SUPR0SuspendVTxOnCpu", (void *)(uintptr_t)SUPR0SuspendVTxOnCpu },
200 { "SUPR0ResumeVTxOnCpu", (void *)(uintptr_t)SUPR0ResumeVTxOnCpu },
201 { "SUPR0GetCurrentGdtRw", (void *)(uintptr_t)SUPR0GetCurrentGdtRw },
202 { "SUPR0GetKernelFeatures", (void *)(uintptr_t)SUPR0GetKernelFeatures },
203 { "SUPR0GetHwvirtMsrs", (void *)(uintptr_t)SUPR0GetHwvirtMsrs },
204 { "SUPR0GetPagingMode", (void *)(uintptr_t)SUPR0GetPagingMode },
205 { "SUPR0GetSvmUsability", (void *)(uintptr_t)SUPR0GetSvmUsability },
206 { "SUPR0GetVTSupport", (void *)(uintptr_t)SUPR0GetVTSupport },
207 { "SUPR0GetVmxUsability", (void *)(uintptr_t)SUPR0GetVmxUsability },
208 { "SUPR0LdrIsLockOwnerByMod", (void *)(uintptr_t)SUPR0LdrIsLockOwnerByMod },
209 { "SUPR0LdrLock", (void *)(uintptr_t)SUPR0LdrLock },
210 { "SUPR0LdrUnlock", (void *)(uintptr_t)SUPR0LdrUnlock },
211 { "SUPR0LdrModByName", (void *)(uintptr_t)SUPR0LdrModByName },
212 { "SUPR0LdrModRelease", (void *)(uintptr_t)SUPR0LdrModRelease },
213 { "SUPR0LdrModRetain", (void *)(uintptr_t)SUPR0LdrModRetain },
214 { "SUPR0LockMem", (void *)(uintptr_t)SUPR0LockMem },
215 { "SUPR0LowAlloc", (void *)(uintptr_t)SUPR0LowAlloc },
216 { "SUPR0LowFree", (void *)(uintptr_t)SUPR0LowFree },
217 { "SUPR0MemAlloc", (void *)(uintptr_t)SUPR0MemAlloc },
218 { "SUPR0MemFree", (void *)(uintptr_t)SUPR0MemFree },
219 { "SUPR0MemGetPhys", (void *)(uintptr_t)SUPR0MemGetPhys },
220 { "SUPR0ObjAddRef", (void *)(uintptr_t)SUPR0ObjAddRef },
221 { "SUPR0ObjAddRefEx", (void *)(uintptr_t)SUPR0ObjAddRefEx },
222 { "SUPR0ObjRegister", (void *)(uintptr_t)SUPR0ObjRegister },
223 { "SUPR0ObjRelease", (void *)(uintptr_t)SUPR0ObjRelease },
224 { "SUPR0ObjVerifyAccess", (void *)(uintptr_t)SUPR0ObjVerifyAccess },
225 { "SUPR0PageAllocEx", (void *)(uintptr_t)SUPR0PageAllocEx },
226 { "SUPR0PageFree", (void *)(uintptr_t)SUPR0PageFree },
227 { "SUPR0PageMapKernel", (void *)(uintptr_t)SUPR0PageMapKernel },
228 { "SUPR0PageProtect", (void *)(uintptr_t)SUPR0PageProtect },
229#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
230 { "SUPR0HCPhysToVirt", (void *)(uintptr_t)SUPR0HCPhysToVirt }, /* only-linux, only solaris */
231#endif
232 { "SUPR0Printf", (void *)(uintptr_t)SUPR0Printf },
233 { "SUPR0GetSessionGVM", (void *)(uintptr_t)SUPR0GetSessionGVM },
234 { "SUPR0GetSessionVM", (void *)(uintptr_t)SUPR0GetSessionVM },
235 { "SUPR0SetSessionVM", (void *)(uintptr_t)SUPR0SetSessionVM },
236 { "SUPR0TscDeltaMeasureBySetIndex", (void *)(uintptr_t)SUPR0TscDeltaMeasureBySetIndex },
237 { "SUPR0TracerDeregisterDrv", (void *)(uintptr_t)SUPR0TracerDeregisterDrv },
238 { "SUPR0TracerDeregisterImpl", (void *)(uintptr_t)SUPR0TracerDeregisterImpl },
239 { "SUPR0TracerFireProbe", (void *)(uintptr_t)SUPR0TracerFireProbe },
240 { "SUPR0TracerRegisterDrv", (void *)(uintptr_t)SUPR0TracerRegisterDrv },
241 { "SUPR0TracerRegisterImpl", (void *)(uintptr_t)SUPR0TracerRegisterImpl },
242 { "SUPR0TracerRegisterModule", (void *)(uintptr_t)SUPR0TracerRegisterModule },
243 { "SUPR0TracerUmodProbeFire", (void *)(uintptr_t)SUPR0TracerUmodProbeFire },
244 { "SUPR0UnlockMem", (void *)(uintptr_t)SUPR0UnlockMem },
245#ifdef RT_OS_WINDOWS
246 { "SUPR0IoCtlSetupForHandle", (void *)(uintptr_t)SUPR0IoCtlSetupForHandle }, /* only-windows */
247 { "SUPR0IoCtlPerform", (void *)(uintptr_t)SUPR0IoCtlPerform }, /* only-windows */
248 { "SUPR0IoCtlCleanup", (void *)(uintptr_t)SUPR0IoCtlCleanup }, /* only-windows */
249#endif
250 { "SUPSemEventClose", (void *)(uintptr_t)SUPSemEventClose },
251 { "SUPSemEventCreate", (void *)(uintptr_t)SUPSemEventCreate },
252 { "SUPSemEventGetResolution", (void *)(uintptr_t)SUPSemEventGetResolution },
253 { "SUPSemEventMultiClose", (void *)(uintptr_t)SUPSemEventMultiClose },
254 { "SUPSemEventMultiCreate", (void *)(uintptr_t)SUPSemEventMultiCreate },
255 { "SUPSemEventMultiGetResolution", (void *)(uintptr_t)SUPSemEventMultiGetResolution },
256 { "SUPSemEventMultiReset", (void *)(uintptr_t)SUPSemEventMultiReset },
257 { "SUPSemEventMultiSignal", (void *)(uintptr_t)SUPSemEventMultiSignal },
258 { "SUPSemEventMultiWait", (void *)(uintptr_t)SUPSemEventMultiWait },
259 { "SUPSemEventMultiWaitNoResume", (void *)(uintptr_t)SUPSemEventMultiWaitNoResume },
260 { "SUPSemEventMultiWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsAbsIntr },
261 { "SUPSemEventMultiWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsRelIntr },
262 { "SUPSemEventSignal", (void *)(uintptr_t)SUPSemEventSignal },
263 { "SUPSemEventWait", (void *)(uintptr_t)SUPSemEventWait },
264 { "SUPSemEventWaitNoResume", (void *)(uintptr_t)SUPSemEventWaitNoResume },
265 { "SUPSemEventWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventWaitNsAbsIntr },
266 { "SUPSemEventWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventWaitNsRelIntr },
267
268 { "RTAssertAreQuiet", (void *)(uintptr_t)RTAssertAreQuiet },
269 { "RTAssertMayPanic", (void *)(uintptr_t)RTAssertMayPanic },
270 { "RTAssertMsg1", (void *)(uintptr_t)RTAssertMsg1 },
271 { "RTAssertMsg2AddV", (void *)(uintptr_t)RTAssertMsg2AddV },
272 { "RTAssertMsg2V", (void *)(uintptr_t)RTAssertMsg2V },
273 { "RTAssertSetMayPanic", (void *)(uintptr_t)RTAssertSetMayPanic },
274 { "RTAssertSetQuiet", (void *)(uintptr_t)RTAssertSetQuiet },
275 { "RTCrc32", (void *)(uintptr_t)RTCrc32 },
276 { "RTCrc32Finish", (void *)(uintptr_t)RTCrc32Finish },
277 { "RTCrc32Process", (void *)(uintptr_t)RTCrc32Process },
278 { "RTCrc32Start", (void *)(uintptr_t)RTCrc32Start },
279 { "RTErrConvertFromErrno", (void *)(uintptr_t)RTErrConvertFromErrno },
280 { "RTErrConvertToErrno", (void *)(uintptr_t)RTErrConvertToErrno },
281 { "RTHandleTableAllocWithCtx", (void *)(uintptr_t)RTHandleTableAllocWithCtx },
282 { "RTHandleTableCreate", (void *)(uintptr_t)RTHandleTableCreate },
283 { "RTHandleTableCreateEx", (void *)(uintptr_t)RTHandleTableCreateEx },
284 { "RTHandleTableDestroy", (void *)(uintptr_t)RTHandleTableDestroy },
285 { "RTHandleTableFreeWithCtx", (void *)(uintptr_t)RTHandleTableFreeWithCtx },
286 { "RTHandleTableLookupWithCtx", (void *)(uintptr_t)RTHandleTableLookupWithCtx },
287 { "RTLogDefaultInstance", (void *)(uintptr_t)RTLogDefaultInstance },
288 { "RTLogDefaultInstanceEx", (void *)(uintptr_t)RTLogDefaultInstanceEx },
289 { "RTLogGetDefaultInstance", (void *)(uintptr_t)RTLogGetDefaultInstance },
290 { "RTLogGetDefaultInstanceEx", (void *)(uintptr_t)RTLogGetDefaultInstanceEx },
291 { "SUPR0GetDefaultLogInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogInstanceEx },
292 { "RTLogLoggerExV", (void *)(uintptr_t)RTLogLoggerExV },
293 { "RTLogPrintfV", (void *)(uintptr_t)RTLogPrintfV },
294 { "RTLogRelGetDefaultInstance", (void *)(uintptr_t)RTLogRelGetDefaultInstance },
295 { "RTLogRelGetDefaultInstanceEx", (void *)(uintptr_t)RTLogRelGetDefaultInstanceEx },
296 { "SUPR0GetDefaultLogRelInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogRelInstanceEx },
297 { "RTLogSetDefaultInstanceThread", (void *)(uintptr_t)RTLogSetDefaultInstanceThread },
298 { "RTMemAllocExTag", (void *)(uintptr_t)RTMemAllocExTag },
299 { "RTMemAllocTag", (void *)(uintptr_t)RTMemAllocTag },
300 { "RTMemAllocVarTag", (void *)(uintptr_t)RTMemAllocVarTag },
301 { "RTMemAllocZTag", (void *)(uintptr_t)RTMemAllocZTag },
302 { "RTMemAllocZVarTag", (void *)(uintptr_t)RTMemAllocZVarTag },
303 { "RTMemDupExTag", (void *)(uintptr_t)RTMemDupExTag },
304 { "RTMemDupTag", (void *)(uintptr_t)RTMemDupTag },
305 { "RTMemFree", (void *)(uintptr_t)RTMemFree },
306 { "RTMemFreeEx", (void *)(uintptr_t)RTMemFreeEx },
307 { "RTMemReallocTag", (void *)(uintptr_t)RTMemReallocTag },
308 { "RTMpCpuId", (void *)(uintptr_t)RTMpCpuId },
309 { "RTMpCpuIdFromSetIndex", (void *)(uintptr_t)RTMpCpuIdFromSetIndex },
310 { "RTMpCpuIdToSetIndex", (void *)(uintptr_t)RTMpCpuIdToSetIndex },
311 { "RTMpCurSetIndex", (void *)(uintptr_t)RTMpCurSetIndex },
312 { "RTMpCurSetIndexAndId", (void *)(uintptr_t)RTMpCurSetIndexAndId },
313 { "RTMpGetArraySize", (void *)(uintptr_t)RTMpGetArraySize },
314 { "RTMpGetCount", (void *)(uintptr_t)RTMpGetCount },
315 { "RTMpGetMaxCpuId", (void *)(uintptr_t)RTMpGetMaxCpuId },
316 { "RTMpGetOnlineCount", (void *)(uintptr_t)RTMpGetOnlineCount },
317 { "RTMpGetOnlineSet", (void *)(uintptr_t)RTMpGetOnlineSet },
318 { "RTMpGetSet", (void *)(uintptr_t)RTMpGetSet },
319 { "RTMpIsCpuOnline", (void *)(uintptr_t)RTMpIsCpuOnline },
320 { "RTMpIsCpuPossible", (void *)(uintptr_t)RTMpIsCpuPossible },
321 { "RTMpIsCpuWorkPending", (void *)(uintptr_t)RTMpIsCpuWorkPending },
322 { "RTMpNotificationDeregister", (void *)(uintptr_t)RTMpNotificationDeregister },
323 { "RTMpNotificationRegister", (void *)(uintptr_t)RTMpNotificationRegister },
324 { "RTMpOnAll", (void *)(uintptr_t)RTMpOnAll },
325 { "RTMpOnOthers", (void *)(uintptr_t)RTMpOnOthers },
326 { "RTMpOnSpecific", (void *)(uintptr_t)RTMpOnSpecific },
327 { "RTMpPokeCpu", (void *)(uintptr_t)RTMpPokeCpu },
328 { "RTNetIPv4AddDataChecksum", (void *)(uintptr_t)RTNetIPv4AddDataChecksum },
329 { "RTNetIPv4AddTCPChecksum", (void *)(uintptr_t)RTNetIPv4AddTCPChecksum },
330 { "RTNetIPv4AddUDPChecksum", (void *)(uintptr_t)RTNetIPv4AddUDPChecksum },
331 { "RTNetIPv4FinalizeChecksum", (void *)(uintptr_t)RTNetIPv4FinalizeChecksum },
332 { "RTNetIPv4HdrChecksum", (void *)(uintptr_t)RTNetIPv4HdrChecksum },
333 { "RTNetIPv4IsDHCPValid", (void *)(uintptr_t)RTNetIPv4IsDHCPValid },
334 { "RTNetIPv4IsHdrValid", (void *)(uintptr_t)RTNetIPv4IsHdrValid },
335 { "RTNetIPv4IsTCPSizeValid", (void *)(uintptr_t)RTNetIPv4IsTCPSizeValid },
336 { "RTNetIPv4IsTCPValid", (void *)(uintptr_t)RTNetIPv4IsTCPValid },
337 { "RTNetIPv4IsUDPSizeValid", (void *)(uintptr_t)RTNetIPv4IsUDPSizeValid },
338 { "RTNetIPv4IsUDPValid", (void *)(uintptr_t)RTNetIPv4IsUDPValid },
339 { "RTNetIPv4PseudoChecksum", (void *)(uintptr_t)RTNetIPv4PseudoChecksum },
340 { "RTNetIPv4PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv4PseudoChecksumBits },
341 { "RTNetIPv4TCPChecksum", (void *)(uintptr_t)RTNetIPv4TCPChecksum },
342 { "RTNetIPv4UDPChecksum", (void *)(uintptr_t)RTNetIPv4UDPChecksum },
343 { "RTNetIPv6PseudoChecksum", (void *)(uintptr_t)RTNetIPv6PseudoChecksum },
344 { "RTNetIPv6PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv6PseudoChecksumBits },
345 { "RTNetIPv6PseudoChecksumEx", (void *)(uintptr_t)RTNetIPv6PseudoChecksumEx },
346 { "RTNetTCPChecksum", (void *)(uintptr_t)RTNetTCPChecksum },
347 { "RTNetUDPChecksum", (void *)(uintptr_t)RTNetUDPChecksum },
348 { "RTPowerNotificationDeregister", (void *)(uintptr_t)RTPowerNotificationDeregister },
349 { "RTPowerNotificationRegister", (void *)(uintptr_t)RTPowerNotificationRegister },
350 { "RTProcSelf", (void *)(uintptr_t)RTProcSelf },
351 { "RTR0AssertPanicSystem", (void *)(uintptr_t)RTR0AssertPanicSystem },
352#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
353 { "RTR0DbgKrnlInfoOpen", (void *)(uintptr_t)RTR0DbgKrnlInfoOpen }, /* only-darwin, only-solaris, only-windows */
354 { "RTR0DbgKrnlInfoQueryMember", (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember }, /* only-darwin, only-solaris, only-windows */
355# if defined(RT_OS_SOLARIS)
356 { "RTR0DbgKrnlInfoQuerySize", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySize }, /* only-solaris */
357# endif
358 { "RTR0DbgKrnlInfoQuerySymbol", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol }, /* only-darwin, only-solaris, only-windows */
359 { "RTR0DbgKrnlInfoRelease", (void *)(uintptr_t)RTR0DbgKrnlInfoRelease }, /* only-darwin, only-solaris, only-windows */
360 { "RTR0DbgKrnlInfoRetain", (void *)(uintptr_t)RTR0DbgKrnlInfoRetain }, /* only-darwin, only-solaris, only-windows */
361#endif
362 { "RTR0MemAreKrnlAndUsrDifferent", (void *)(uintptr_t)RTR0MemAreKrnlAndUsrDifferent },
363 { "RTR0MemKernelIsValidAddr", (void *)(uintptr_t)RTR0MemKernelIsValidAddr },
364 { "RTR0MemKernelCopyFrom", (void *)(uintptr_t)RTR0MemKernelCopyFrom },
365 { "RTR0MemKernelCopyTo", (void *)(uintptr_t)RTR0MemKernelCopyTo },
366 { "RTR0MemObjAddress", (void *)(uintptr_t)RTR0MemObjAddress },
367 { "RTR0MemObjAddressR3", (void *)(uintptr_t)RTR0MemObjAddressR3 },
368 { "RTR0MemObjAllocContTag", (void *)(uintptr_t)RTR0MemObjAllocContTag },
369 { "RTR0MemObjAllocLowTag", (void *)(uintptr_t)RTR0MemObjAllocLowTag },
370 { "RTR0MemObjAllocPageTag", (void *)(uintptr_t)RTR0MemObjAllocPageTag },
371 { "RTR0MemObjAllocPhysExTag", (void *)(uintptr_t)RTR0MemObjAllocPhysExTag },
372 { "RTR0MemObjAllocPhysNCTag", (void *)(uintptr_t)RTR0MemObjAllocPhysNCTag },
373 { "RTR0MemObjAllocPhysTag", (void *)(uintptr_t)RTR0MemObjAllocPhysTag },
374 { "RTR0MemObjEnterPhysTag", (void *)(uintptr_t)RTR0MemObjEnterPhysTag },
375 { "RTR0MemObjFree", (void *)(uintptr_t)RTR0MemObjFree },
376 { "RTR0MemObjGetPagePhysAddr", (void *)(uintptr_t)RTR0MemObjGetPagePhysAddr },
377 { "RTR0MemObjIsMapping", (void *)(uintptr_t)RTR0MemObjIsMapping },
378 { "RTR0MemObjLockUserTag", (void *)(uintptr_t)RTR0MemObjLockUserTag },
379 { "RTR0MemObjMapKernelExTag", (void *)(uintptr_t)RTR0MemObjMapKernelExTag },
380 { "RTR0MemObjMapKernelTag", (void *)(uintptr_t)RTR0MemObjMapKernelTag },
381 { "RTR0MemObjMapUserTag", (void *)(uintptr_t)RTR0MemObjMapUserTag },
382 { "RTR0MemObjMapUserExTag", (void *)(uintptr_t)RTR0MemObjMapUserExTag },
383 { "RTR0MemObjProtect", (void *)(uintptr_t)RTR0MemObjProtect },
384 { "RTR0MemObjSize", (void *)(uintptr_t)RTR0MemObjSize },
385 { "RTR0MemUserCopyFrom", (void *)(uintptr_t)RTR0MemUserCopyFrom },
386 { "RTR0MemUserCopyTo", (void *)(uintptr_t)RTR0MemUserCopyTo },
387 { "RTR0MemUserIsValidAddr", (void *)(uintptr_t)RTR0MemUserIsValidAddr },
388 { "RTR0ProcHandleSelf", (void *)(uintptr_t)RTR0ProcHandleSelf },
389 { "RTSemEventCreate", (void *)(uintptr_t)RTSemEventCreate },
390 { "RTSemEventDestroy", (void *)(uintptr_t)RTSemEventDestroy },
391 { "RTSemEventGetResolution", (void *)(uintptr_t)RTSemEventGetResolution },
392 { "RTSemEventMultiCreate", (void *)(uintptr_t)RTSemEventMultiCreate },
393 { "RTSemEventMultiDestroy", (void *)(uintptr_t)RTSemEventMultiDestroy },
394 { "RTSemEventMultiGetResolution", (void *)(uintptr_t)RTSemEventMultiGetResolution },
395 { "RTSemEventMultiReset", (void *)(uintptr_t)RTSemEventMultiReset },
396 { "RTSemEventMultiSignal", (void *)(uintptr_t)RTSemEventMultiSignal },
397 { "RTSemEventMultiWait", (void *)(uintptr_t)RTSemEventMultiWait },
398 { "RTSemEventMultiWaitEx", (void *)(uintptr_t)RTSemEventMultiWaitEx },
399 { "RTSemEventMultiWaitExDebug", (void *)(uintptr_t)RTSemEventMultiWaitExDebug },
400 { "RTSemEventMultiWaitNoResume", (void *)(uintptr_t)RTSemEventMultiWaitNoResume },
401 { "RTSemEventSignal", (void *)(uintptr_t)RTSemEventSignal },
402 { "RTSemEventWait", (void *)(uintptr_t)RTSemEventWait },
403 { "RTSemEventWaitEx", (void *)(uintptr_t)RTSemEventWaitEx },
404 { "RTSemEventWaitExDebug", (void *)(uintptr_t)RTSemEventWaitExDebug },
405 { "RTSemEventWaitNoResume", (void *)(uintptr_t)RTSemEventWaitNoResume },
406 { "RTSemFastMutexCreate", (void *)(uintptr_t)RTSemFastMutexCreate },
407 { "RTSemFastMutexDestroy", (void *)(uintptr_t)RTSemFastMutexDestroy },
408 { "RTSemFastMutexRelease", (void *)(uintptr_t)RTSemFastMutexRelease },
409 { "RTSemFastMutexRequest", (void *)(uintptr_t)RTSemFastMutexRequest },
410 { "RTSemMutexCreate", (void *)(uintptr_t)RTSemMutexCreate },
411 { "RTSemMutexDestroy", (void *)(uintptr_t)RTSemMutexDestroy },
412 { "RTSemMutexRelease", (void *)(uintptr_t)RTSemMutexRelease },
413 { "RTSemMutexRequest", (void *)(uintptr_t)RTSemMutexRequest },
414 { "RTSemMutexRequestDebug", (void *)(uintptr_t)RTSemMutexRequestDebug },
415 { "RTSemMutexRequestNoResume", (void *)(uintptr_t)RTSemMutexRequestNoResume },
416 { "RTSemMutexRequestNoResumeDebug", (void *)(uintptr_t)RTSemMutexRequestNoResumeDebug },
417 { "RTSpinlockAcquire", (void *)(uintptr_t)RTSpinlockAcquire },
418 { "RTSpinlockCreate", (void *)(uintptr_t)RTSpinlockCreate },
419 { "RTSpinlockDestroy", (void *)(uintptr_t)RTSpinlockDestroy },
420 { "RTSpinlockRelease", (void *)(uintptr_t)RTSpinlockRelease },
421 { "RTStrCopy", (void *)(uintptr_t)RTStrCopy },
422 { "RTStrDupTag", (void *)(uintptr_t)RTStrDupTag },
423 { "RTStrFormat", (void *)(uintptr_t)RTStrFormat },
424 { "RTStrFormatNumber", (void *)(uintptr_t)RTStrFormatNumber },
425 { "RTStrFormatTypeDeregister", (void *)(uintptr_t)RTStrFormatTypeDeregister },
426 { "RTStrFormatTypeRegister", (void *)(uintptr_t)RTStrFormatTypeRegister },
427 { "RTStrFormatTypeSetUser", (void *)(uintptr_t)RTStrFormatTypeSetUser },
428 { "RTStrFormatV", (void *)(uintptr_t)RTStrFormatV },
429 { "RTStrFree", (void *)(uintptr_t)RTStrFree },
430 { "RTStrNCmp", (void *)(uintptr_t)RTStrNCmp },
431 { "RTStrPrintf", (void *)(uintptr_t)RTStrPrintf },
432 { "RTStrPrintfEx", (void *)(uintptr_t)RTStrPrintfEx },
433 { "RTStrPrintfExV", (void *)(uintptr_t)RTStrPrintfExV },
434 { "RTStrPrintfV", (void *)(uintptr_t)RTStrPrintfV },
435 { "RTThreadCreate", (void *)(uintptr_t)RTThreadCreate },
436 { "RTThreadCtxHookIsEnabled", (void *)(uintptr_t)RTThreadCtxHookIsEnabled },
437 { "RTThreadCtxHookCreate", (void *)(uintptr_t)RTThreadCtxHookCreate },
438 { "RTThreadCtxHookDestroy", (void *)(uintptr_t)RTThreadCtxHookDestroy },
439 { "RTThreadCtxHookDisable", (void *)(uintptr_t)RTThreadCtxHookDisable },
440 { "RTThreadCtxHookEnable", (void *)(uintptr_t)RTThreadCtxHookEnable },
441 { "RTThreadGetName", (void *)(uintptr_t)RTThreadGetName },
442 { "RTThreadGetNative", (void *)(uintptr_t)RTThreadGetNative },
443 { "RTThreadGetType", (void *)(uintptr_t)RTThreadGetType },
444 { "RTThreadIsInInterrupt", (void *)(uintptr_t)RTThreadIsInInterrupt },
445 { "RTThreadNativeSelf", (void *)(uintptr_t)RTThreadNativeSelf },
446 { "RTThreadPreemptDisable", (void *)(uintptr_t)RTThreadPreemptDisable },
447 { "RTThreadPreemptIsEnabled", (void *)(uintptr_t)RTThreadPreemptIsEnabled },
448 { "RTThreadPreemptIsPending", (void *)(uintptr_t)RTThreadPreemptIsPending },
449 { "RTThreadPreemptIsPendingTrusty", (void *)(uintptr_t)RTThreadPreemptIsPendingTrusty },
450 { "RTThreadPreemptIsPossible", (void *)(uintptr_t)RTThreadPreemptIsPossible },
451 { "RTThreadPreemptRestore", (void *)(uintptr_t)RTThreadPreemptRestore },
452 { "RTThreadSelf", (void *)(uintptr_t)RTThreadSelf },
453 { "RTThreadSelfName", (void *)(uintptr_t)RTThreadSelfName },
454 { "RTThreadSleep", (void *)(uintptr_t)RTThreadSleep },
455 { "RTThreadUserReset", (void *)(uintptr_t)RTThreadUserReset },
456 { "RTThreadUserSignal", (void *)(uintptr_t)RTThreadUserSignal },
457 { "RTThreadUserWait", (void *)(uintptr_t)RTThreadUserWait },
458 { "RTThreadUserWaitNoResume", (void *)(uintptr_t)RTThreadUserWaitNoResume },
459 { "RTThreadWait", (void *)(uintptr_t)RTThreadWait },
460 { "RTThreadWaitNoResume", (void *)(uintptr_t)RTThreadWaitNoResume },
461 { "RTThreadYield", (void *)(uintptr_t)RTThreadYield },
462 { "RTTimeNow", (void *)(uintptr_t)RTTimeNow },
463 { "RTTimerCanDoHighResolution", (void *)(uintptr_t)RTTimerCanDoHighResolution },
464 { "RTTimerChangeInterval", (void *)(uintptr_t)RTTimerChangeInterval },
465 { "RTTimerCreate", (void *)(uintptr_t)RTTimerCreate },
466 { "RTTimerCreateEx", (void *)(uintptr_t)RTTimerCreateEx },
467 { "RTTimerDestroy", (void *)(uintptr_t)RTTimerDestroy },
468 { "RTTimerGetSystemGranularity", (void *)(uintptr_t)RTTimerGetSystemGranularity },
469 { "RTTimerReleaseSystemGranularity", (void *)(uintptr_t)RTTimerReleaseSystemGranularity },
470 { "RTTimerRequestSystemGranularity", (void *)(uintptr_t)RTTimerRequestSystemGranularity },
471 { "RTTimerStart", (void *)(uintptr_t)RTTimerStart },
472 { "RTTimerStop", (void *)(uintptr_t)RTTimerStop },
473 { "RTTimeSystemMilliTS", (void *)(uintptr_t)RTTimeSystemMilliTS },
474 { "RTTimeSystemNanoTS", (void *)(uintptr_t)RTTimeSystemNanoTS },
475 { "RTUuidCompare", (void *)(uintptr_t)RTUuidCompare },
476 { "RTUuidCompareStr", (void *)(uintptr_t)RTUuidCompareStr },
477 { "RTUuidFromStr", (void *)(uintptr_t)RTUuidFromStr },
478/* SED: END */
479};
480
481#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
482/**
483 * Drag in the rest of IRPT since we share it with the
484 * rest of the kernel modules on darwin.
485 */
486struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
487{
488 /* VBoxNetAdp */
489 { (PFNRT)RTRandBytes },
490 /* VBoxUSB */
491 { (PFNRT)RTPathStripFilename },
492#if !defined(RT_OS_FREEBSD)
493 { (PFNRT)RTHandleTableAlloc },
494 { (PFNRT)RTStrPurgeEncoding },
495#endif
496 { NULL }
497};
498#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
499
500
501
502/**
503 * Initializes the device extentsion structure.
504 *
505 * @returns IPRT status code.
506 * @param pDevExt The device extension to initialize.
507 * @param cbSession The size of the session structure. The size of
508 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
509 * defined because we're skipping the OS specific members
510 * then.
511 */
512int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
513{
514 int rc;
515
516#ifdef SUPDRV_WITH_RELEASE_LOGGER
517 /*
518 * Create the release log.
519 */
520 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
521 PRTLOGGER pRelLogger;
522 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
523 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
524 if (RT_SUCCESS(rc))
525 RTLogRelSetDefaultInstance(pRelLogger);
526 /** @todo Add native hook for getting logger config parameters and setting
527 * them. On linux we should use the module parameter stuff... */
528#endif
529
530#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
531 /*
532 * Require SSE2 to be present.
533 */
534 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
535 {
536 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
537 return VERR_UNSUPPORTED_CPU;
538 }
539#endif
540
541 /*
542 * Initialize it.
543 */
544 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
545 pDevExt->Spinlock = NIL_RTSPINLOCK;
546 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
547 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
548#ifdef SUPDRV_USE_MUTEX_FOR_LDR
549 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
550#else
551 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
552#endif
553#ifdef SUPDRV_USE_MUTEX_FOR_GIP
554 pDevExt->mtxGip = NIL_RTSEMMUTEX;
555 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
556#else
557 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
558 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
559#endif
560
561 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
562 if (RT_SUCCESS(rc))
563 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
564 if (RT_SUCCESS(rc))
565 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
566
567 if (RT_SUCCESS(rc))
568#ifdef SUPDRV_USE_MUTEX_FOR_LDR
569 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
570#else
571 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
572#endif
573 if (RT_SUCCESS(rc))
574#ifdef SUPDRV_USE_MUTEX_FOR_GIP
575 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
576#else
577 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
578#endif
579 if (RT_SUCCESS(rc))
580 {
581 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
582 if (RT_SUCCESS(rc))
583 {
584#ifdef SUPDRV_USE_MUTEX_FOR_GIP
585 rc = RTSemMutexCreate(&pDevExt->mtxGip);
586#else
587 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
588#endif
589 if (RT_SUCCESS(rc))
590 {
591 rc = supdrvGipCreate(pDevExt);
592 if (RT_SUCCESS(rc))
593 {
594 rc = supdrvTracerInit(pDevExt);
595 if (RT_SUCCESS(rc))
596 {
597 pDevExt->pLdrInitImage = NULL;
598 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
599 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
600 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
601 pDevExt->cbSession = (uint32_t)cbSession;
602
603 /*
604 * Fixup the absolute symbols.
605 *
606 * Because of the table indexing assumptions we'll have a little #ifdef orgy
607 * here rather than distributing this to OS specific files. At least for now.
608 */
609#ifdef RT_OS_DARWIN
610# if ARCH_BITS == 32
611 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
612 {
613 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
614 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
615 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
616 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
617 }
618 else
619 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
620 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
621 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
622 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
623 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
624 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
625 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
626# else /* 64-bit darwin: */
627 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
628 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
629 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
630 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
631 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
632 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
633 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
634 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
635 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
636 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
637
638# endif
639#else /* !RT_OS_DARWIN */
640# if ARCH_BITS == 64
641 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
642 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
643 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
644 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
645# else
646 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
647# endif
648 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
649 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
650 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
651 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
652 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
653 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
654#endif /* !RT_OS_DARWIN */
655 return VINF_SUCCESS;
656 }
657
658 supdrvGipDestroy(pDevExt);
659 }
660
661#ifdef SUPDRV_USE_MUTEX_FOR_GIP
662 RTSemMutexDestroy(pDevExt->mtxGip);
663 pDevExt->mtxGip = NIL_RTSEMMUTEX;
664#else
665 RTSemFastMutexDestroy(pDevExt->mtxGip);
666 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
667#endif
668 }
669 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
670 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
671 }
672 }
673
674#ifdef SUPDRV_USE_MUTEX_FOR_GIP
675 RTSemMutexDestroy(pDevExt->mtxTscDelta);
676 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
677#else
678 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
679 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
680#endif
681#ifdef SUPDRV_USE_MUTEX_FOR_LDR
682 RTSemMutexDestroy(pDevExt->mtxLdr);
683 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
684#else
685 RTSemFastMutexDestroy(pDevExt->mtxLdr);
686 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
687#endif
688 RTSpinlockDestroy(pDevExt->Spinlock);
689 pDevExt->Spinlock = NIL_RTSPINLOCK;
690 RTSpinlockDestroy(pDevExt->hGipSpinlock);
691 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
692 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
693 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
694
695#ifdef SUPDRV_WITH_RELEASE_LOGGER
696 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
697 RTLogDestroy(RTLogSetDefaultInstance(NULL));
698#endif
699
700 return rc;
701}
702
703
704/**
705 * Delete the device extension (e.g. cleanup members).
706 *
707 * @param pDevExt The device extension to delete.
708 */
709void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
710{
711 PSUPDRVOBJ pObj;
712 PSUPDRVUSAGE pUsage;
713
714 /*
715 * Kill mutexes and spinlocks.
716 */
717#ifdef SUPDRV_USE_MUTEX_FOR_GIP
718 RTSemMutexDestroy(pDevExt->mtxGip);
719 pDevExt->mtxGip = NIL_RTSEMMUTEX;
720 RTSemMutexDestroy(pDevExt->mtxTscDelta);
721 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
722#else
723 RTSemFastMutexDestroy(pDevExt->mtxGip);
724 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
725 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
726 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
727#endif
728#ifdef SUPDRV_USE_MUTEX_FOR_LDR
729 RTSemMutexDestroy(pDevExt->mtxLdr);
730 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
731#else
732 RTSemFastMutexDestroy(pDevExt->mtxLdr);
733 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
734#endif
735 RTSpinlockDestroy(pDevExt->Spinlock);
736 pDevExt->Spinlock = NIL_RTSPINLOCK;
737 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
738 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
739 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
740 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
741
742 /*
743 * Free lists.
744 */
745 /* objects. */
746 pObj = pDevExt->pObjs;
747 Assert(!pObj); /* (can trigger on forced unloads) */
748 pDevExt->pObjs = NULL;
749 while (pObj)
750 {
751 void *pvFree = pObj;
752 pObj = pObj->pNext;
753 RTMemFree(pvFree);
754 }
755
756 /* usage records. */
757 pUsage = pDevExt->pUsageFree;
758 pDevExt->pUsageFree = NULL;
759 while (pUsage)
760 {
761 void *pvFree = pUsage;
762 pUsage = pUsage->pNext;
763 RTMemFree(pvFree);
764 }
765
766 /* kill the GIP. */
767 supdrvGipDestroy(pDevExt);
768 RTSpinlockDestroy(pDevExt->hGipSpinlock);
769 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
770
771 supdrvTracerTerm(pDevExt);
772
773#ifdef SUPDRV_WITH_RELEASE_LOGGER
774 /* destroy the loggers. */
775 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
776 RTLogDestroy(RTLogSetDefaultInstance(NULL));
777#endif
778}
779
780
781/**
782 * Create session.
783 *
784 * @returns IPRT status code.
785 * @param pDevExt Device extension.
786 * @param fUser Flag indicating whether this is a user or kernel
787 * session.
788 * @param fUnrestricted Unrestricted access (system) or restricted access
789 * (user)?
790 * @param ppSession Where to store the pointer to the session data.
791 */
792int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
793{
794 int rc;
795 PSUPDRVSESSION pSession;
796
797 if (!SUP_IS_DEVEXT_VALID(pDevExt))
798 return VERR_INVALID_PARAMETER;
799
800 /*
801 * Allocate memory for the session data.
802 */
803 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
804 if (pSession)
805 {
806 /* Initialize session data. */
807 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
808 if (!rc)
809 {
810 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
811 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
812 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
813 if (RT_SUCCESS(rc))
814 {
815 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
816 pSession->pDevExt = pDevExt;
817 pSession->u32Cookie = BIRD_INV;
818 pSession->fUnrestricted = fUnrestricted;
819 /*pSession->fInHashTable = false; */
820 pSession->cRefs = 1;
821 /*pSession->pCommonNextHash = NULL;
822 pSession->ppOsSessionPtr = NULL; */
823 if (fUser)
824 {
825 pSession->Process = RTProcSelf();
826 pSession->R0Process = RTR0ProcHandleSelf();
827 }
828 else
829 {
830 pSession->Process = NIL_RTPROCESS;
831 pSession->R0Process = NIL_RTR0PROCESS;
832 }
833 /*pSession->pLdrUsage = NULL;
834 pSession->pVM = NULL;
835 pSession->pUsage = NULL;
836 pSession->pGip = NULL;
837 pSession->fGipReferenced = false;
838 pSession->Bundle.cUsed = 0; */
839 pSession->Uid = NIL_RTUID;
840 pSession->Gid = NIL_RTGID;
841 /*pSession->uTracerData = 0;*/
842 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
843 RTListInit(&pSession->TpProviders);
844 /*pSession->cTpProviders = 0;*/
845 /*pSession->cTpProbesFiring = 0;*/
846 RTListInit(&pSession->TpUmods);
847 /*RT_ZERO(pSession->apTpLookupTable);*/
848
849 VBOXDRV_SESSION_CREATE(pSession, fUser);
850 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
851 return VINF_SUCCESS;
852 }
853
854 RTSpinlockDestroy(pSession->Spinlock);
855 }
856 RTMemFree(pSession);
857 *ppSession = NULL;
858 Log(("Failed to create spinlock, rc=%d!\n", rc));
859 }
860 else
861 rc = VERR_NO_MEMORY;
862
863 return rc;
864}
865
866
867/**
868 * Cleans up the session in the context of the process to which it belongs, the
869 * caller will free the session and the session spinlock.
870 *
871 * This should normally occur when the session is closed or as the process
872 * exits. Careful reference counting in the OS specfic code makes sure that
873 * there cannot be any races between process/handle cleanup callbacks and
874 * threads doing I/O control calls.
875 *
876 * @param pDevExt The device extension.
877 * @param pSession Session data.
878 */
879static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
880{
881 int rc;
882 PSUPDRVBUNDLE pBundle;
883 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
884
885 Assert(!pSession->fInHashTable);
886 Assert(!pSession->ppOsSessionPtr);
887 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
888 ("R0Process=%p cur=%p; curpid=%u\n",
889 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
890
891 /*
892 * Remove logger instances related to this session.
893 */
894 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
895
896 /*
897 * Destroy the handle table.
898 */
899 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
900 AssertRC(rc);
901 pSession->hHandleTable = NIL_RTHANDLETABLE;
902
903 /*
904 * Release object references made in this session.
905 * In theory there should be noone racing us in this session.
906 */
907 Log2(("release objects - start\n"));
908 if (pSession->pUsage)
909 {
910 PSUPDRVUSAGE pUsage;
911 RTSpinlockAcquire(pDevExt->Spinlock);
912
913 while ((pUsage = pSession->pUsage) != NULL)
914 {
915 PSUPDRVOBJ pObj = pUsage->pObj;
916 pSession->pUsage = pUsage->pNext;
917
918 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
919 if (pUsage->cUsage < pObj->cUsage)
920 {
921 pObj->cUsage -= pUsage->cUsage;
922 RTSpinlockRelease(pDevExt->Spinlock);
923 }
924 else
925 {
926 /* Destroy the object and free the record. */
927 if (pDevExt->pObjs == pObj)
928 pDevExt->pObjs = pObj->pNext;
929 else
930 {
931 PSUPDRVOBJ pObjPrev;
932 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
933 if (pObjPrev->pNext == pObj)
934 {
935 pObjPrev->pNext = pObj->pNext;
936 break;
937 }
938 Assert(pObjPrev);
939 }
940 RTSpinlockRelease(pDevExt->Spinlock);
941
942 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
943 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
944 if (pObj->pfnDestructor)
945 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
946 RTMemFree(pObj);
947 }
948
949 /* free it and continue. */
950 RTMemFree(pUsage);
951
952 RTSpinlockAcquire(pDevExt->Spinlock);
953 }
954
955 RTSpinlockRelease(pDevExt->Spinlock);
956 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
957 }
958 Log2(("release objects - done\n"));
959
960 /*
961 * Make sure the associated VM pointers are NULL.
962 */
963 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
964 {
965 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
966 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
967 pSession->pSessionGVM = NULL;
968 pSession->pSessionVM = NULL;
969 pSession->pFastIoCtrlVM = NULL;
970 }
971
972 /*
973 * Do tracer cleanups related to this session.
974 */
975 Log2(("release tracer stuff - start\n"));
976 supdrvTracerCleanupSession(pDevExt, pSession);
977 Log2(("release tracer stuff - end\n"));
978
979 /*
980 * Release memory allocated in the session.
981 *
982 * We do not serialize this as we assume that the application will
983 * not allocated memory while closing the file handle object.
984 */
985 Log2(("freeing memory:\n"));
986 pBundle = &pSession->Bundle;
987 while (pBundle)
988 {
989 PSUPDRVBUNDLE pToFree;
990 unsigned i;
991
992 /*
993 * Check and unlock all entries in the bundle.
994 */
995 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
996 {
997 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
998 {
999 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1000 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1001 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1002 {
1003 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1004 AssertRC(rc); /** @todo figure out how to handle this. */
1005 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1006 }
1007 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1008 AssertRC(rc); /** @todo figure out how to handle this. */
1009 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1010 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1011 }
1012 }
1013
1014 /*
1015 * Advance and free previous bundle.
1016 */
1017 pToFree = pBundle;
1018 pBundle = pBundle->pNext;
1019
1020 pToFree->pNext = NULL;
1021 pToFree->cUsed = 0;
1022 if (pToFree != &pSession->Bundle)
1023 RTMemFree(pToFree);
1024 }
1025 Log2(("freeing memory - done\n"));
1026
1027 /*
1028 * Deregister component factories.
1029 */
1030 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1031 Log2(("deregistering component factories:\n"));
1032 if (pDevExt->pComponentFactoryHead)
1033 {
1034 PSUPDRVFACTORYREG pPrev = NULL;
1035 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1036 while (pCur)
1037 {
1038 if (pCur->pSession == pSession)
1039 {
1040 /* unlink it */
1041 PSUPDRVFACTORYREG pNext = pCur->pNext;
1042 if (pPrev)
1043 pPrev->pNext = pNext;
1044 else
1045 pDevExt->pComponentFactoryHead = pNext;
1046
1047 /* free it */
1048 pCur->pNext = NULL;
1049 pCur->pSession = NULL;
1050 pCur->pFactory = NULL;
1051 RTMemFree(pCur);
1052
1053 /* next */
1054 pCur = pNext;
1055 }
1056 else
1057 {
1058 /* next */
1059 pPrev = pCur;
1060 pCur = pCur->pNext;
1061 }
1062 }
1063 }
1064 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1065 Log2(("deregistering component factories - done\n"));
1066
1067 /*
1068 * Loaded images needs to be dereferenced and possibly freed up.
1069 */
1070 supdrvLdrLock(pDevExt);
1071 Log2(("freeing images:\n"));
1072 if (pSession->pLdrUsage)
1073 {
1074 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1075 pSession->pLdrUsage = NULL;
1076 while (pUsage)
1077 {
1078 void *pvFree = pUsage;
1079 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1080 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1081 if (pImage->cUsage > cUsage)
1082 pImage->cUsage -= cUsage;
1083 else
1084 supdrvLdrFree(pDevExt, pImage);
1085 pUsage->pImage = NULL;
1086 pUsage = pUsage->pNext;
1087 RTMemFree(pvFree);
1088 }
1089 }
1090 supdrvLdrUnlock(pDevExt);
1091 Log2(("freeing images - done\n"));
1092
1093 /*
1094 * Unmap the GIP.
1095 */
1096 Log2(("umapping GIP:\n"));
1097 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1098 {
1099 SUPR0GipUnmap(pSession);
1100 pSession->fGipReferenced = 0;
1101 }
1102 Log2(("umapping GIP - done\n"));
1103}
1104
1105
1106/**
1107 * Common code for freeing a session when the reference count reaches zero.
1108 *
1109 * @param pDevExt Device extension.
1110 * @param pSession Session data.
1111 * This data will be freed by this routine.
1112 */
1113static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1114{
1115 VBOXDRV_SESSION_CLOSE(pSession);
1116
1117 /*
1118 * Cleanup the session first.
1119 */
1120 supdrvCleanupSession(pDevExt, pSession);
1121 supdrvOSCleanupSession(pDevExt, pSession);
1122
1123 /*
1124 * Free the rest of the session stuff.
1125 */
1126 RTSpinlockDestroy(pSession->Spinlock);
1127 pSession->Spinlock = NIL_RTSPINLOCK;
1128 pSession->pDevExt = NULL;
1129 RTMemFree(pSession);
1130 LogFlow(("supdrvDestroySession: returns\n"));
1131}
1132
1133
1134/**
1135 * Inserts the session into the global hash table.
1136 *
1137 * @retval VINF_SUCCESS on success.
1138 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1139 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1140 * session (asserted).
1141 * @retval VERR_DUPLICATE if there is already a session for that pid.
1142 *
1143 * @param pDevExt The device extension.
1144 * @param pSession The session.
1145 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1146 * available and used. This will set to point to the
1147 * session while under the protection of the session
1148 * hash table spinlock. It will also be kept in
1149 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1150 * cleanup use.
1151 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1152 */
1153int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1154 void *pvUser)
1155{
1156 PSUPDRVSESSION pCur;
1157 unsigned iHash;
1158
1159 /*
1160 * Validate input.
1161 */
1162 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1163 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1164
1165 /*
1166 * Calculate the hash table index and acquire the spinlock.
1167 */
1168 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1169
1170 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1171
1172 /*
1173 * If there are a collisions, we need to carefully check if we got a
1174 * duplicate. There can only be one open session per process.
1175 */
1176 pCur = pDevExt->apSessionHashTab[iHash];
1177 if (pCur)
1178 {
1179 while (pCur && pCur->Process != pSession->Process)
1180 pCur = pCur->pCommonNextHash;
1181
1182 if (pCur)
1183 {
1184 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1185 if (pCur == pSession)
1186 {
1187 Assert(pSession->fInHashTable);
1188 AssertFailed();
1189 return VERR_WRONG_ORDER;
1190 }
1191 Assert(!pSession->fInHashTable);
1192 if (pCur->R0Process == pSession->R0Process)
1193 return VERR_RESOURCE_IN_USE;
1194 return VERR_DUPLICATE;
1195 }
1196 }
1197 Assert(!pSession->fInHashTable);
1198 Assert(!pSession->ppOsSessionPtr);
1199
1200 /*
1201 * Insert it, doing a callout to the OS specific code in case it has
1202 * anything it wishes to do while we're holding the spinlock.
1203 */
1204 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1205 pDevExt->apSessionHashTab[iHash] = pSession;
1206 pSession->fInHashTable = true;
1207 ASMAtomicIncS32(&pDevExt->cSessions);
1208
1209 pSession->ppOsSessionPtr = ppOsSessionPtr;
1210 if (ppOsSessionPtr)
1211 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1212
1213 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1214
1215 /*
1216 * Retain a reference for the pointer in the session table.
1217 */
1218 ASMAtomicIncU32(&pSession->cRefs);
1219
1220 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1221 return VINF_SUCCESS;
1222}
1223
1224
1225/**
1226 * Removes the session from the global hash table.
1227 *
1228 * @retval VINF_SUCCESS on success.
1229 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1230 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1231 * session (asserted).
1232 *
1233 * @param pDevExt The device extension.
1234 * @param pSession The session. The caller is expected to have a reference
1235 * to this so it won't croak on us when we release the hash
1236 * table reference.
1237 * @param pvUser OS specific context value for the
1238 * supdrvOSSessionHashTabInserted callback.
1239 */
1240int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1241{
1242 PSUPDRVSESSION pCur;
1243 unsigned iHash;
1244 int32_t cRefs;
1245
1246 /*
1247 * Validate input.
1248 */
1249 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1250 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1251
1252 /*
1253 * Calculate the hash table index and acquire the spinlock.
1254 */
1255 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1256
1257 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1258
1259 /*
1260 * Unlink it.
1261 */
1262 pCur = pDevExt->apSessionHashTab[iHash];
1263 if (pCur == pSession)
1264 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1265 else
1266 {
1267 PSUPDRVSESSION pPrev = pCur;
1268 while (pCur && pCur != pSession)
1269 {
1270 pPrev = pCur;
1271 pCur = pCur->pCommonNextHash;
1272 }
1273 if (pCur)
1274 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1275 else
1276 {
1277 Assert(!pSession->fInHashTable);
1278 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1279 return VERR_NOT_FOUND;
1280 }
1281 }
1282
1283 pSession->pCommonNextHash = NULL;
1284 pSession->fInHashTable = false;
1285
1286 ASMAtomicDecS32(&pDevExt->cSessions);
1287
1288 /*
1289 * Clear OS specific session pointer if available and do the OS callback.
1290 */
1291 if (pSession->ppOsSessionPtr)
1292 {
1293 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1294 pSession->ppOsSessionPtr = NULL;
1295 }
1296
1297 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1298
1299 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1300
1301 /*
1302 * Drop the reference the hash table had to the session. This shouldn't
1303 * be the last reference!
1304 */
1305 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1306 Assert(cRefs > 0 && cRefs < _1M);
1307 if (cRefs == 0)
1308 supdrvDestroySession(pDevExt, pSession);
1309
1310 return VINF_SUCCESS;
1311}
1312
1313
1314/**
1315 * Looks up the session for the current process in the global hash table or in
1316 * OS specific pointer.
1317 *
1318 * @returns Pointer to the session with a reference that the caller must
1319 * release. If no valid session was found, NULL is returned.
1320 *
1321 * @param pDevExt The device extension.
1322 * @param Process The process ID.
1323 * @param R0Process The ring-0 process handle.
1324 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1325 * this is used instead of the hash table. For
1326 * additional safety it must then be equal to the
1327 * SUPDRVSESSION::ppOsSessionPtr member.
1328 * This can be NULL even if the OS has a session
1329 * pointer.
1330 */
1331PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1332 PSUPDRVSESSION *ppOsSessionPtr)
1333{
1334 PSUPDRVSESSION pCur;
1335 unsigned iHash;
1336
1337 /*
1338 * Validate input.
1339 */
1340 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1341
1342 /*
1343 * Calculate the hash table index and acquire the spinlock.
1344 */
1345 iHash = SUPDRV_SESSION_HASH(Process);
1346
1347 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1348
1349 /*
1350 * If an OS session pointer is provided, always use it.
1351 */
1352 if (ppOsSessionPtr)
1353 {
1354 pCur = *ppOsSessionPtr;
1355 if ( pCur
1356 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1357 || pCur->Process != Process
1358 || pCur->R0Process != R0Process) )
1359 pCur = NULL;
1360 }
1361 else
1362 {
1363 /*
1364 * Otherwise, do the hash table lookup.
1365 */
1366 pCur = pDevExt->apSessionHashTab[iHash];
1367 while ( pCur
1368 && ( pCur->Process != Process
1369 || pCur->R0Process != R0Process) )
1370 pCur = pCur->pCommonNextHash;
1371 }
1372
1373 /*
1374 * Retain the session.
1375 */
1376 if (pCur)
1377 {
1378 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1379 NOREF(cRefs);
1380 Assert(cRefs > 1 && cRefs < _1M);
1381 }
1382
1383 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1384
1385 return pCur;
1386}
1387
1388
1389/**
1390 * Retain a session to make sure it doesn't go away while it is in use.
1391 *
1392 * @returns New reference count on success, UINT32_MAX on failure.
1393 * @param pSession Session data.
1394 */
1395uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1396{
1397 uint32_t cRefs;
1398 AssertPtrReturn(pSession, UINT32_MAX);
1399 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1400
1401 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1402 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1403 return cRefs;
1404}
1405
1406
1407/**
1408 * Releases a given session.
1409 *
1410 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1411 * @param pSession Session data.
1412 */
1413uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1414{
1415 uint32_t cRefs;
1416 AssertPtrReturn(pSession, UINT32_MAX);
1417 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1418
1419 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1420 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1421 if (cRefs == 0)
1422 supdrvDestroySession(pSession->pDevExt, pSession);
1423 return cRefs;
1424}
1425
1426
1427/**
1428 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1429 *
1430 * @returns IPRT status code, see SUPR0ObjAddRef.
1431 * @param hHandleTable The handle table handle. Ignored.
1432 * @param pvObj The object pointer.
1433 * @param pvCtx Context, the handle type. Ignored.
1434 * @param pvUser Session pointer.
1435 */
1436static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1437{
1438 NOREF(pvCtx);
1439 NOREF(hHandleTable);
1440 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1441}
1442
1443
1444/**
1445 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1446 *
1447 * @param hHandleTable The handle table handle. Ignored.
1448 * @param h The handle value. Ignored.
1449 * @param pvObj The object pointer.
1450 * @param pvCtx Context, the handle type. Ignored.
1451 * @param pvUser Session pointer.
1452 */
1453static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1454{
1455 NOREF(pvCtx);
1456 NOREF(h);
1457 NOREF(hHandleTable);
1458 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1459}
1460
1461
1462/**
1463 * Fast path I/O Control worker.
1464 *
1465 * @returns VBox status code that should be passed down to ring-3 unchanged.
1466 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1467 * @param idCpu VMCPU id.
1468 * @param pDevExt Device extention.
1469 * @param pSession Session data.
1470 */
1471int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1472{
1473 /*
1474 * Validate input and check that the VM has a session.
1475 */
1476 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1477 {
1478 PVM pVM = pSession->pSessionVM;
1479 PGVM pGVM = pSession->pSessionGVM;
1480 if (RT_LIKELY( pGVM != NULL
1481 && pVM != NULL
1482 && pVM == pSession->pFastIoCtrlVM))
1483 {
1484 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1485 {
1486 /*
1487 * Make the call.
1488 */
1489 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1490 return VINF_SUCCESS;
1491 }
1492
1493 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1494 }
1495 else
1496 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1497 pGVM, pVM, pSession->pFastIoCtrlVM);
1498 }
1499 else
1500 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1501 return VERR_INTERNAL_ERROR;
1502}
1503
1504
1505/**
1506 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1507 *
1508 * Check if pszStr contains any character of pszChars. We would use strpbrk
1509 * here if this function would be contained in the RedHat kABI white list, see
1510 * http://www.kerneldrivers.org/RHEL5.
1511 *
1512 * @returns true if fine, false if not.
1513 * @param pszName The module name to check.
1514 */
1515static bool supdrvIsLdrModuleNameValid(const char *pszName)
1516{
1517 int chCur;
1518 while ((chCur = *pszName++) != '\0')
1519 {
1520 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1521 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1522 while (offInv-- > 0)
1523 if (s_szInvalidChars[offInv] == chCur)
1524 return false;
1525 }
1526 return true;
1527}
1528
1529
1530
1531/**
1532 * I/O Control inner worker (tracing reasons).
1533 *
1534 * @returns IPRT status code.
1535 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1536 *
1537 * @param uIOCtl Function number.
1538 * @param pDevExt Device extention.
1539 * @param pSession Session data.
1540 * @param pReqHdr The request header.
1541 */
1542static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1543{
1544 /*
1545 * Validation macros
1546 */
1547#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1548 do { \
1549 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1550 { \
1551 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1552 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1553 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1554 } \
1555 } while (0)
1556
1557#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1558
1559#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1560 do { \
1561 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1562 { \
1563 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1564 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1565 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1566 } \
1567 } while (0)
1568
1569#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1570 do { \
1571 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1572 { \
1573 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1574 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1575 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1576 } \
1577 } while (0)
1578
1579#define REQ_CHECK_EXPR(Name, expr) \
1580 do { \
1581 if (RT_UNLIKELY(!(expr))) \
1582 { \
1583 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1584 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1585 } \
1586 } while (0)
1587
1588#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1589 do { \
1590 if (RT_UNLIKELY(!(expr))) \
1591 { \
1592 OSDBGPRINT( fmt ); \
1593 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1594 } \
1595 } while (0)
1596
1597 /*
1598 * The switch.
1599 */
1600 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1601 {
1602 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1603 {
1604 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1605 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1606 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1607 {
1608 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1609 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1610 return 0;
1611 }
1612
1613#if 0
1614 /*
1615 * Call out to the OS specific code and let it do permission checks on the
1616 * client process.
1617 */
1618 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1619 {
1620 pReq->u.Out.u32Cookie = 0xffffffff;
1621 pReq->u.Out.u32SessionCookie = 0xffffffff;
1622 pReq->u.Out.u32SessionVersion = 0xffffffff;
1623 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1624 pReq->u.Out.pSession = NULL;
1625 pReq->u.Out.cFunctions = 0;
1626 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1627 return 0;
1628 }
1629#endif
1630
1631 /*
1632 * Match the version.
1633 * The current logic is very simple, match the major interface version.
1634 */
1635 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1636 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1637 {
1638 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1639 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1640 pReq->u.Out.u32Cookie = 0xffffffff;
1641 pReq->u.Out.u32SessionCookie = 0xffffffff;
1642 pReq->u.Out.u32SessionVersion = 0xffffffff;
1643 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1644 pReq->u.Out.pSession = NULL;
1645 pReq->u.Out.cFunctions = 0;
1646 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1647 return 0;
1648 }
1649
1650 /*
1651 * Fill in return data and be gone.
1652 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1653 * u32SessionVersion <= u32ReqVersion!
1654 */
1655 /** @todo Somehow validate the client and negotiate a secure cookie... */
1656 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1657 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1658 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1659 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1660 pReq->u.Out.pSession = pSession;
1661 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1662 pReq->Hdr.rc = VINF_SUCCESS;
1663 return 0;
1664 }
1665
1666 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1667 {
1668 /* validate */
1669 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1670 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1671
1672 /* execute */
1673 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1674 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1675 pReq->Hdr.rc = VINF_SUCCESS;
1676 return 0;
1677 }
1678
1679 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1680 {
1681 /* validate */
1682 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1683 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1684 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1685 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1686 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1687
1688 /* execute */
1689 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1690 if (RT_FAILURE(pReq->Hdr.rc))
1691 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1692 return 0;
1693 }
1694
1695 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1696 {
1697 /* validate */
1698 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1699 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1700
1701 /* execute */
1702 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1703 return 0;
1704 }
1705
1706 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1707 {
1708 /* validate */
1709 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1710 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1711
1712 /* execute */
1713 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1714 if (RT_FAILURE(pReq->Hdr.rc))
1715 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1716 return 0;
1717 }
1718
1719 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1720 {
1721 /* validate */
1722 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1723 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1724
1725 /* execute */
1726 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1727 return 0;
1728 }
1729
1730 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1731 {
1732 /* validate */
1733 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1734 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1735 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1736 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1737 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1738 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1739 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1740 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1741 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1742 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1743
1744 /* execute */
1745 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1746 return 0;
1747 }
1748
1749 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1750 {
1751 /* validate */
1752 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1753 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1754 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1755 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1756 || ( pReq->u.In.cSymbols <= 16384
1757 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1758 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1759 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1760 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1761 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1762 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1763 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1764 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1765 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1766 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1767 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1768 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1769 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1770 && pReq->u.In.cSegments <= 128
1771 && pReq->u.In.cSegments <= (pReq->u.In.cbImageBits + PAGE_SIZE - 1) / PAGE_SIZE
1772 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1773 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1774 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1775 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1776 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1777
1778 if (pReq->u.In.cSymbols)
1779 {
1780 uint32_t i;
1781 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1782 for (i = 0; i < pReq->u.In.cSymbols; i++)
1783 {
1784 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1785 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1786 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1787 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1788 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1789 pReq->u.In.cbStrTab - paSyms[i].offName),
1790 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1791 }
1792 }
1793 {
1794 uint32_t i;
1795 uint32_t offPrevEnd = 0;
1796 PSUPLDRSEG paSegs = (PSUPLDRSEG)&pReq->u.In.abImage[pReq->u.In.offSegments];
1797 for (i = 0; i < pReq->u.In.cSegments; i++)
1798 {
1799 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1800 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1801 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1802 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1803 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1804 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1805 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1806 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1807 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1808 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1809 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1810 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1811 }
1812 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1813 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1814 }
1815 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fFlags & ~SUPLDRLOAD_F_VALID_MASK),
1816 ("SUP_IOCTL_LDR_LOAD: fFlags=%#x\n", (unsigned)pReq->u.In.fFlags));
1817
1818 /* execute */
1819 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1820 return 0;
1821 }
1822
1823 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1824 {
1825 /* validate */
1826 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1827 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1828
1829 /* execute */
1830 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1831 return 0;
1832 }
1833
1834 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1835 {
1836 /* validate */
1837 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1838
1839 /* execute */
1840 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1841 return 0;
1842 }
1843
1844 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1845 {
1846 /* validate */
1847 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1848 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1849 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1850
1851 /* execute */
1852 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1853 return 0;
1854 }
1855
1856 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1857 {
1858 /* validate */
1859 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1860 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1861 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1862
1863 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1864 {
1865 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1866
1867 /* execute */
1868 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1869 {
1870 if (pReq->u.In.pVMR0 == NULL)
1871 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1872 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1873 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1874 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1875 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1876 else
1877 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1878 }
1879 else
1880 pReq->Hdr.rc = VERR_WRONG_ORDER;
1881 }
1882 else
1883 {
1884 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1885 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1886 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1887 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1888 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1889
1890 /* execute */
1891 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1892 {
1893 if (pReq->u.In.pVMR0 == NULL)
1894 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1895 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1896 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1897 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1898 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1899 else
1900 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1901 }
1902 else
1903 pReq->Hdr.rc = VERR_WRONG_ORDER;
1904 }
1905
1906 if ( RT_FAILURE(pReq->Hdr.rc)
1907 && pReq->Hdr.rc != VERR_INTERRUPTED
1908 && pReq->Hdr.rc != VERR_TIMEOUT)
1909 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1910 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1911 else
1912 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1913 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1914 return 0;
1915 }
1916
1917 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1918 {
1919 /* validate */
1920 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1921 PSUPVMMR0REQHDR pVMMReq;
1922 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1923 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1924
1925 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1926 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1927 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1928 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1929 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1930
1931 /* execute */
1932 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1933 {
1934 if (pReq->u.In.pVMR0 == NULL)
1935 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1936 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1937 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1938 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1939 else
1940 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1941 }
1942 else
1943 pReq->Hdr.rc = VERR_WRONG_ORDER;
1944
1945 if ( RT_FAILURE(pReq->Hdr.rc)
1946 && pReq->Hdr.rc != VERR_INTERRUPTED
1947 && pReq->Hdr.rc != VERR_TIMEOUT)
1948 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1949 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1950 else
1951 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1952 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1953 return 0;
1954 }
1955
1956 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1957 {
1958 /* validate */
1959 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1960 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1961
1962 /* execute */
1963 pReq->Hdr.rc = VINF_SUCCESS;
1964 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1965 return 0;
1966 }
1967
1968 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1969 {
1970 /* validate */
1971 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1972 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1973 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1974
1975 /* execute */
1976 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1977 if (RT_FAILURE(pReq->Hdr.rc))
1978 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1979 return 0;
1980 }
1981
1982 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1983 {
1984 /* validate */
1985 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1986 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1987
1988 /* execute */
1989 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1990 return 0;
1991 }
1992
1993 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1994 {
1995 /* validate */
1996 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1997 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1998
1999 /* execute */
2000 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
2001 if (RT_SUCCESS(pReq->Hdr.rc))
2002 pReq->u.Out.pGipR0 = pDevExt->pGip;
2003 return 0;
2004 }
2005
2006 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2007 {
2008 /* validate */
2009 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2010 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2011
2012 /* execute */
2013 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2014 return 0;
2015 }
2016
2017 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2018 {
2019 /* validate */
2020 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2021 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2022 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2023 || ( VALID_PTR(pReq->u.In.pVMR0)
2024 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2025 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2026
2027 /* execute */
2028 RTSpinlockAcquire(pDevExt->Spinlock);
2029 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2030 {
2031 if (pSession->pFastIoCtrlVM == NULL)
2032 {
2033 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2034 RTSpinlockRelease(pDevExt->Spinlock);
2035 pReq->Hdr.rc = VINF_SUCCESS;
2036 }
2037 else
2038 {
2039 RTSpinlockRelease(pDevExt->Spinlock);
2040 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2041 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2042 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2043 }
2044 }
2045 else
2046 {
2047 RTSpinlockRelease(pDevExt->Spinlock);
2048 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2049 pSession->pSessionVM, pReq->u.In.pVMR0));
2050 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2051 }
2052 return 0;
2053 }
2054
2055 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2056 {
2057 /* validate */
2058 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2059 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2060 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2061 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2062 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2063 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2064 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2065 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2066 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2067
2068 /* execute */
2069 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2070 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2071 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2072 &pReq->u.Out.aPages[0]);
2073 if (RT_FAILURE(pReq->Hdr.rc))
2074 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2075 return 0;
2076 }
2077
2078 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2079 {
2080 /* validate */
2081 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2082 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2083 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2084 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2085 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2086 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2087
2088 /* execute */
2089 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2090 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2091 if (RT_FAILURE(pReq->Hdr.rc))
2092 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2093 return 0;
2094 }
2095
2096 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2097 {
2098 /* validate */
2099 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2100 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2101 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2102 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2103 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2104 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2105 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2106
2107 /* execute */
2108 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2109 return 0;
2110 }
2111
2112 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2113 {
2114 /* validate */
2115 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2116 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2117
2118 /* execute */
2119 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2120 return 0;
2121 }
2122
2123 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2124 {
2125 /* validate */
2126 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2127 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2128 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2129
2130 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2131 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2132 else
2133 {
2134 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2135 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2136 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2137 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2138 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2139 }
2140 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2141
2142 /* execute */
2143 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2144 return 0;
2145 }
2146
2147 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2148 {
2149 /* validate */
2150 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2151 size_t cbStrTab;
2152 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2153 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2154 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2155 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2156 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2157 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2158 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2159 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2160 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2161 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2162 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2163
2164 /* execute */
2165 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2166 return 0;
2167 }
2168
2169 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2170 {
2171 /* validate */
2172 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2173 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2174 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2175
2176 /* execute */
2177 switch (pReq->u.In.uType)
2178 {
2179 case SUP_SEM_TYPE_EVENT:
2180 {
2181 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2182 switch (pReq->u.In.uOp)
2183 {
2184 case SUPSEMOP2_WAIT_MS_REL:
2185 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2186 break;
2187 case SUPSEMOP2_WAIT_NS_ABS:
2188 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2189 break;
2190 case SUPSEMOP2_WAIT_NS_REL:
2191 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2192 break;
2193 case SUPSEMOP2_SIGNAL:
2194 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2195 break;
2196 case SUPSEMOP2_CLOSE:
2197 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2198 break;
2199 case SUPSEMOP2_RESET:
2200 default:
2201 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2202 break;
2203 }
2204 break;
2205 }
2206
2207 case SUP_SEM_TYPE_EVENT_MULTI:
2208 {
2209 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2210 switch (pReq->u.In.uOp)
2211 {
2212 case SUPSEMOP2_WAIT_MS_REL:
2213 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2214 break;
2215 case SUPSEMOP2_WAIT_NS_ABS:
2216 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2217 break;
2218 case SUPSEMOP2_WAIT_NS_REL:
2219 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2220 break;
2221 case SUPSEMOP2_SIGNAL:
2222 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2223 break;
2224 case SUPSEMOP2_CLOSE:
2225 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2226 break;
2227 case SUPSEMOP2_RESET:
2228 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2229 break;
2230 default:
2231 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2232 break;
2233 }
2234 break;
2235 }
2236
2237 default:
2238 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2239 break;
2240 }
2241 return 0;
2242 }
2243
2244 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2245 {
2246 /* validate */
2247 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2248 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2249 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2250
2251 /* execute */
2252 switch (pReq->u.In.uType)
2253 {
2254 case SUP_SEM_TYPE_EVENT:
2255 {
2256 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2257 switch (pReq->u.In.uOp)
2258 {
2259 case SUPSEMOP3_CREATE:
2260 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2261 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2262 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2263 break;
2264 case SUPSEMOP3_GET_RESOLUTION:
2265 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2266 pReq->Hdr.rc = VINF_SUCCESS;
2267 pReq->Hdr.cbOut = sizeof(*pReq);
2268 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2269 break;
2270 default:
2271 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2272 break;
2273 }
2274 break;
2275 }
2276
2277 case SUP_SEM_TYPE_EVENT_MULTI:
2278 {
2279 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2280 switch (pReq->u.In.uOp)
2281 {
2282 case SUPSEMOP3_CREATE:
2283 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2284 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2285 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2286 break;
2287 case SUPSEMOP3_GET_RESOLUTION:
2288 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2289 pReq->Hdr.rc = VINF_SUCCESS;
2290 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2291 break;
2292 default:
2293 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2294 break;
2295 }
2296 break;
2297 }
2298
2299 default:
2300 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2301 break;
2302 }
2303 return 0;
2304 }
2305
2306 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2307 {
2308 /* validate */
2309 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2310 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2311
2312 /* execute */
2313 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2314 if (RT_FAILURE(pReq->Hdr.rc))
2315 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2316 return 0;
2317 }
2318
2319 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2320 {
2321 /* validate */
2322 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2323 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2324
2325 /* execute */
2326 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2327 return 0;
2328 }
2329
2330 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2331 {
2332 /* validate */
2333 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2334
2335 /* execute */
2336 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2337 return 0;
2338 }
2339
2340 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2341 {
2342 /* validate */
2343 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2344 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2345
2346 /* execute */
2347 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2348 return 0;
2349 }
2350
2351 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2352 {
2353 /* validate */
2354 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2355 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2356 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2357 return VERR_INVALID_PARAMETER;
2358
2359 /* execute */
2360 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2361 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2362 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2363 pReq->u.In.szName, pReq->u.In.fFlags);
2364 return 0;
2365 }
2366
2367 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2368 {
2369 /* validate */
2370 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2371 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2372
2373 /* execute */
2374 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2375 return 0;
2376 }
2377
2378 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2379 {
2380 /* validate */
2381 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2382 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2383
2384 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2385 pReqHdr->rc = VINF_SUCCESS;
2386 return 0;
2387 }
2388
2389 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2390 {
2391 /* validate */
2392 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2393 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2394 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2395 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2396
2397 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2398 return 0;
2399 }
2400
2401 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2402 {
2403 /* validate */
2404 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2405
2406 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2407 return 0;
2408 }
2409
2410 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2411 {
2412 /* validate */
2413 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2414 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2415
2416 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2417 return 0;
2418 }
2419
2420 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2421 {
2422 /* validate */
2423 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2424 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2425
2426 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2427 return 0;
2428 }
2429
2430 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2431 {
2432 /* validate */
2433 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2434 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2435
2436 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2437 return 0;
2438 }
2439
2440 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2441 {
2442 /* validate */
2443 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2444 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2445
2446 /* execute */
2447 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2448 if (RT_FAILURE(pReq->Hdr.rc))
2449 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2450 return 0;
2451 }
2452
2453 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2454 {
2455 /* validate */
2456 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2457 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2458 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2459 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2460 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2461
2462 /* execute */
2463 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2464 if (RT_FAILURE(pReq->Hdr.rc))
2465 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2466 return 0;
2467 }
2468
2469 default:
2470 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2471 break;
2472 }
2473 return VERR_GENERAL_FAILURE;
2474}
2475
2476
2477/**
2478 * I/O Control inner worker for the restricted operations.
2479 *
2480 * @returns IPRT status code.
2481 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2482 *
2483 * @param uIOCtl Function number.
2484 * @param pDevExt Device extention.
2485 * @param pSession Session data.
2486 * @param pReqHdr The request header.
2487 */
2488static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2489{
2490 /*
2491 * The switch.
2492 */
2493 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2494 {
2495 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2496 {
2497 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2498 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2499 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2500 {
2501 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2502 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2503 return 0;
2504 }
2505
2506 /*
2507 * Match the version.
2508 * The current logic is very simple, match the major interface version.
2509 */
2510 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2511 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2512 {
2513 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2514 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2515 pReq->u.Out.u32Cookie = 0xffffffff;
2516 pReq->u.Out.u32SessionCookie = 0xffffffff;
2517 pReq->u.Out.u32SessionVersion = 0xffffffff;
2518 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2519 pReq->u.Out.pSession = NULL;
2520 pReq->u.Out.cFunctions = 0;
2521 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2522 return 0;
2523 }
2524
2525 /*
2526 * Fill in return data and be gone.
2527 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2528 * u32SessionVersion <= u32ReqVersion!
2529 */
2530 /** @todo Somehow validate the client and negotiate a secure cookie... */
2531 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2532 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2533 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2534 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2535 pReq->u.Out.pSession = pSession;
2536 pReq->u.Out.cFunctions = 0;
2537 pReq->Hdr.rc = VINF_SUCCESS;
2538 return 0;
2539 }
2540
2541 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2542 {
2543 /* validate */
2544 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2545 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2546
2547 /* execute */
2548 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2549 if (RT_FAILURE(pReq->Hdr.rc))
2550 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2551 return 0;
2552 }
2553
2554 default:
2555 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2556 break;
2557 }
2558 return VERR_GENERAL_FAILURE;
2559}
2560
2561
2562/**
2563 * I/O Control worker.
2564 *
2565 * @returns IPRT status code.
2566 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2567 *
2568 * @param uIOCtl Function number.
2569 * @param pDevExt Device extention.
2570 * @param pSession Session data.
2571 * @param pReqHdr The request header.
2572 * @param cbReq The size of the request buffer.
2573 */
2574int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2575{
2576 int rc;
2577 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2578
2579 /*
2580 * Validate the request.
2581 */
2582 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2583 {
2584 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2585 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2586 return VERR_INVALID_PARAMETER;
2587 }
2588 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2589 || pReqHdr->cbIn < sizeof(*pReqHdr)
2590 || pReqHdr->cbIn > cbReq
2591 || pReqHdr->cbOut < sizeof(*pReqHdr)
2592 || pReqHdr->cbOut > cbReq))
2593 {
2594 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2595 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2596 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2597 return VERR_INVALID_PARAMETER;
2598 }
2599 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2600 {
2601 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2602 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2603 return VERR_INVALID_PARAMETER;
2604 }
2605 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2606 {
2607 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2608 {
2609 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2610 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2611 return VERR_INVALID_PARAMETER;
2612 }
2613 }
2614 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2615 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2616 {
2617 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2618 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2619 return VERR_INVALID_PARAMETER;
2620 }
2621
2622 /*
2623 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2624 */
2625 if (pSession->fUnrestricted)
2626 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2627 else
2628 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2629
2630 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2631 return rc;
2632}
2633
2634
2635/**
2636 * Inter-Driver Communication (IDC) worker.
2637 *
2638 * @returns VBox status code.
2639 * @retval VINF_SUCCESS on success.
2640 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2641 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2642 *
2643 * @param uReq The request (function) code.
2644 * @param pDevExt Device extention.
2645 * @param pSession Session data.
2646 * @param pReqHdr The request header.
2647 */
2648int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2649{
2650 /*
2651 * The OS specific code has already validated the pSession
2652 * pointer, and the request size being greater or equal to
2653 * size of the header.
2654 *
2655 * So, just check that pSession is a kernel context session.
2656 */
2657 if (RT_UNLIKELY( pSession
2658 && pSession->R0Process != NIL_RTR0PROCESS))
2659 return VERR_INVALID_PARAMETER;
2660
2661/*
2662 * Validation macro.
2663 */
2664#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2665 do { \
2666 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2667 { \
2668 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2669 (long)pReqHdr->cb, (long)(cbExpect))); \
2670 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2671 } \
2672 } while (0)
2673
2674 switch (uReq)
2675 {
2676 case SUPDRV_IDC_REQ_CONNECT:
2677 {
2678 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2679 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2680
2681 /*
2682 * Validate the cookie and other input.
2683 */
2684 if (pReq->Hdr.pSession != NULL)
2685 {
2686 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2687 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2688 }
2689 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2690 {
2691 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2692 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2693 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2694 }
2695 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2696 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2697 {
2698 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2699 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2700 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2701 }
2702 if (pSession != NULL)
2703 {
2704 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2705 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2706 }
2707
2708 /*
2709 * Match the version.
2710 * The current logic is very simple, match the major interface version.
2711 */
2712 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2713 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2714 {
2715 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2716 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2717 pReq->u.Out.pSession = NULL;
2718 pReq->u.Out.uSessionVersion = 0xffffffff;
2719 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2720 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2721 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2722 return VINF_SUCCESS;
2723 }
2724
2725 pReq->u.Out.pSession = NULL;
2726 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2727 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2728 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2729
2730 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2731 if (RT_FAILURE(pReq->Hdr.rc))
2732 {
2733 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2734 return VINF_SUCCESS;
2735 }
2736
2737 pReq->u.Out.pSession = pSession;
2738 pReq->Hdr.pSession = pSession;
2739
2740 return VINF_SUCCESS;
2741 }
2742
2743 case SUPDRV_IDC_REQ_DISCONNECT:
2744 {
2745 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2746
2747 supdrvSessionRelease(pSession);
2748 return pReqHdr->rc = VINF_SUCCESS;
2749 }
2750
2751 case SUPDRV_IDC_REQ_GET_SYMBOL:
2752 {
2753 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2754 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2755
2756 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2757 return VINF_SUCCESS;
2758 }
2759
2760 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2761 {
2762 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2763 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2764
2765 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2766 return VINF_SUCCESS;
2767 }
2768
2769 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2770 {
2771 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2772 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2773
2774 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2775 return VINF_SUCCESS;
2776 }
2777
2778 default:
2779 Log(("Unknown IDC %#lx\n", (long)uReq));
2780 break;
2781 }
2782
2783#undef REQ_CHECK_IDC_SIZE
2784 return VERR_NOT_SUPPORTED;
2785}
2786
2787
2788/**
2789 * Register a object for reference counting.
2790 * The object is registered with one reference in the specified session.
2791 *
2792 * @returns Unique identifier on success (pointer).
2793 * All future reference must use this identifier.
2794 * @returns NULL on failure.
2795 * @param pSession The caller's session.
2796 * @param enmType The object type.
2797 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2798 * @param pvUser1 The first user argument.
2799 * @param pvUser2 The second user argument.
2800 */
2801SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2802{
2803 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2804 PSUPDRVOBJ pObj;
2805 PSUPDRVUSAGE pUsage;
2806
2807 /*
2808 * Validate the input.
2809 */
2810 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2811 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2812 AssertPtrReturn(pfnDestructor, NULL);
2813
2814 /*
2815 * Allocate and initialize the object.
2816 */
2817 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2818 if (!pObj)
2819 return NULL;
2820 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2821 pObj->enmType = enmType;
2822 pObj->pNext = NULL;
2823 pObj->cUsage = 1;
2824 pObj->pfnDestructor = pfnDestructor;
2825 pObj->pvUser1 = pvUser1;
2826 pObj->pvUser2 = pvUser2;
2827 pObj->CreatorUid = pSession->Uid;
2828 pObj->CreatorGid = pSession->Gid;
2829 pObj->CreatorProcess= pSession->Process;
2830 supdrvOSObjInitCreator(pObj, pSession);
2831
2832 /*
2833 * Allocate the usage record.
2834 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2835 */
2836 RTSpinlockAcquire(pDevExt->Spinlock);
2837
2838 pUsage = pDevExt->pUsageFree;
2839 if (pUsage)
2840 pDevExt->pUsageFree = pUsage->pNext;
2841 else
2842 {
2843 RTSpinlockRelease(pDevExt->Spinlock);
2844 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2845 if (!pUsage)
2846 {
2847 RTMemFree(pObj);
2848 return NULL;
2849 }
2850 RTSpinlockAcquire(pDevExt->Spinlock);
2851 }
2852
2853 /*
2854 * Insert the object and create the session usage record.
2855 */
2856 /* The object. */
2857 pObj->pNext = pDevExt->pObjs;
2858 pDevExt->pObjs = pObj;
2859
2860 /* The session record. */
2861 pUsage->cUsage = 1;
2862 pUsage->pObj = pObj;
2863 pUsage->pNext = pSession->pUsage;
2864 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2865 pSession->pUsage = pUsage;
2866
2867 RTSpinlockRelease(pDevExt->Spinlock);
2868
2869 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2870 return pObj;
2871}
2872
2873
2874/**
2875 * Increment the reference counter for the object associating the reference
2876 * with the specified session.
2877 *
2878 * @returns IPRT status code.
2879 * @param pvObj The identifier returned by SUPR0ObjRegister().
2880 * @param pSession The session which is referencing the object.
2881 *
2882 * @remarks The caller should not own any spinlocks and must carefully protect
2883 * itself against potential race with the destructor so freed memory
2884 * isn't accessed here.
2885 */
2886SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2887{
2888 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2889}
2890
2891
2892/**
2893 * Increment the reference counter for the object associating the reference
2894 * with the specified session.
2895 *
2896 * @returns IPRT status code.
2897 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2898 * couldn't be allocated. (If you see this you're not doing the right
2899 * thing and it won't ever work reliably.)
2900 *
2901 * @param pvObj The identifier returned by SUPR0ObjRegister().
2902 * @param pSession The session which is referencing the object.
2903 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2904 * first reference to an object in a session with this
2905 * argument set.
2906 *
2907 * @remarks The caller should not own any spinlocks and must carefully protect
2908 * itself against potential race with the destructor so freed memory
2909 * isn't accessed here.
2910 */
2911SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2912{
2913 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2914 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2915 int rc = VINF_SUCCESS;
2916 PSUPDRVUSAGE pUsagePre;
2917 PSUPDRVUSAGE pUsage;
2918
2919 /*
2920 * Validate the input.
2921 * Be ready for the destruction race (someone might be stuck in the
2922 * destructor waiting a lock we own).
2923 */
2924 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2925 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2926 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2927 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2928 VERR_INVALID_PARAMETER);
2929
2930 RTSpinlockAcquire(pDevExt->Spinlock);
2931
2932 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2933 {
2934 RTSpinlockRelease(pDevExt->Spinlock);
2935
2936 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2937 return VERR_WRONG_ORDER;
2938 }
2939
2940 /*
2941 * Preallocate the usage record if we can.
2942 */
2943 pUsagePre = pDevExt->pUsageFree;
2944 if (pUsagePre)
2945 pDevExt->pUsageFree = pUsagePre->pNext;
2946 else if (!fNoBlocking)
2947 {
2948 RTSpinlockRelease(pDevExt->Spinlock);
2949 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2950 if (!pUsagePre)
2951 return VERR_NO_MEMORY;
2952
2953 RTSpinlockAcquire(pDevExt->Spinlock);
2954 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2955 {
2956 RTSpinlockRelease(pDevExt->Spinlock);
2957
2958 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2959 return VERR_WRONG_ORDER;
2960 }
2961 }
2962
2963 /*
2964 * Reference the object.
2965 */
2966 pObj->cUsage++;
2967
2968 /*
2969 * Look for the session record.
2970 */
2971 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2972 {
2973 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2974 if (pUsage->pObj == pObj)
2975 break;
2976 }
2977 if (pUsage)
2978 pUsage->cUsage++;
2979 else if (pUsagePre)
2980 {
2981 /* create a new session record. */
2982 pUsagePre->cUsage = 1;
2983 pUsagePre->pObj = pObj;
2984 pUsagePre->pNext = pSession->pUsage;
2985 pSession->pUsage = pUsagePre;
2986 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2987
2988 pUsagePre = NULL;
2989 }
2990 else
2991 {
2992 pObj->cUsage--;
2993 rc = VERR_TRY_AGAIN;
2994 }
2995
2996 /*
2997 * Put any unused usage record into the free list..
2998 */
2999 if (pUsagePre)
3000 {
3001 pUsagePre->pNext = pDevExt->pUsageFree;
3002 pDevExt->pUsageFree = pUsagePre;
3003 }
3004
3005 RTSpinlockRelease(pDevExt->Spinlock);
3006
3007 return rc;
3008}
3009
3010
3011/**
3012 * Decrement / destroy a reference counter record for an object.
3013 *
3014 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3015 *
3016 * @returns IPRT status code.
3017 * @retval VINF_SUCCESS if not destroyed.
3018 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3019 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3020 * string builds.
3021 *
3022 * @param pvObj The identifier returned by SUPR0ObjRegister().
3023 * @param pSession The session which is referencing the object.
3024 */
3025SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3026{
3027 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3028 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3029 int rc = VERR_INVALID_PARAMETER;
3030 PSUPDRVUSAGE pUsage;
3031 PSUPDRVUSAGE pUsagePrev;
3032
3033 /*
3034 * Validate the input.
3035 */
3036 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3037 AssertMsgReturn(VALID_PTR(pObj)&& pObj->u32Magic == SUPDRVOBJ_MAGIC,
3038 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3039 VERR_INVALID_PARAMETER);
3040
3041 /*
3042 * Acquire the spinlock and look for the usage record.
3043 */
3044 RTSpinlockAcquire(pDevExt->Spinlock);
3045
3046 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3047 pUsage;
3048 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3049 {
3050 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3051 if (pUsage->pObj == pObj)
3052 {
3053 rc = VINF_SUCCESS;
3054 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3055 if (pUsage->cUsage > 1)
3056 {
3057 pObj->cUsage--;
3058 pUsage->cUsage--;
3059 }
3060 else
3061 {
3062 /*
3063 * Free the session record.
3064 */
3065 if (pUsagePrev)
3066 pUsagePrev->pNext = pUsage->pNext;
3067 else
3068 pSession->pUsage = pUsage->pNext;
3069 pUsage->pNext = pDevExt->pUsageFree;
3070 pDevExt->pUsageFree = pUsage;
3071
3072 /* What about the object? */
3073 if (pObj->cUsage > 1)
3074 pObj->cUsage--;
3075 else
3076 {
3077 /*
3078 * Object is to be destroyed, unlink it.
3079 */
3080 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3081 rc = VINF_OBJECT_DESTROYED;
3082 if (pDevExt->pObjs == pObj)
3083 pDevExt->pObjs = pObj->pNext;
3084 else
3085 {
3086 PSUPDRVOBJ pObjPrev;
3087 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3088 if (pObjPrev->pNext == pObj)
3089 {
3090 pObjPrev->pNext = pObj->pNext;
3091 break;
3092 }
3093 Assert(pObjPrev);
3094 }
3095 }
3096 }
3097 break;
3098 }
3099 }
3100
3101 RTSpinlockRelease(pDevExt->Spinlock);
3102
3103 /*
3104 * Call the destructor and free the object if required.
3105 */
3106 if (rc == VINF_OBJECT_DESTROYED)
3107 {
3108 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3109 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3110 if (pObj->pfnDestructor)
3111 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3112 RTMemFree(pObj);
3113 }
3114
3115 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3116 return rc;
3117}
3118
3119
3120/**
3121 * Verifies that the current process can access the specified object.
3122 *
3123 * @returns The following IPRT status code:
3124 * @retval VINF_SUCCESS if access was granted.
3125 * @retval VERR_PERMISSION_DENIED if denied access.
3126 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3127 *
3128 * @param pvObj The identifier returned by SUPR0ObjRegister().
3129 * @param pSession The session which wishes to access the object.
3130 * @param pszObjName Object string name. This is optional and depends on the object type.
3131 *
3132 * @remark The caller is responsible for making sure the object isn't removed while
3133 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3134 */
3135SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3136{
3137 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3138 int rc;
3139
3140 /*
3141 * Validate the input.
3142 */
3143 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3144 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3145 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3146 VERR_INVALID_PARAMETER);
3147
3148 /*
3149 * Check access. (returns true if a decision has been made.)
3150 */
3151 rc = VERR_INTERNAL_ERROR;
3152 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3153 return rc;
3154
3155 /*
3156 * Default policy is to allow the user to access his own
3157 * stuff but nothing else.
3158 */
3159 if (pObj->CreatorUid == pSession->Uid)
3160 return VINF_SUCCESS;
3161 return VERR_PERMISSION_DENIED;
3162}
3163
3164
3165/**
3166 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3167 *
3168 * @returns The associated VM pointer.
3169 * @param pSession The session of the current thread.
3170 */
3171SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3172{
3173 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3174 return pSession->pSessionVM;
3175}
3176
3177
3178/**
3179 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3180 *
3181 * @returns The associated GVM pointer.
3182 * @param pSession The session of the current thread.
3183 */
3184SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3185{
3186 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3187 return pSession->pSessionGVM;
3188}
3189
3190
3191/**
3192 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3193 *
3194 * This will fail if there is already a VM associated with the session and pVM
3195 * isn't NULL.
3196 *
3197 * @retval VINF_SUCCESS
3198 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3199 * session.
3200 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3201 * the session is invalid.
3202 *
3203 * @param pSession The session of the current thread.
3204 * @param pGVM The GVM to associate with the session. Pass NULL to
3205 * dissassociate.
3206 * @param pVM The VM to associate with the session. Pass NULL to
3207 * dissassociate.
3208 */
3209SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3210{
3211 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3212 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3213
3214 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3215 if (pGVM)
3216 {
3217 if (!pSession->pSessionGVM)
3218 {
3219 pSession->pSessionGVM = pGVM;
3220 pSession->pSessionVM = pVM;
3221 pSession->pFastIoCtrlVM = NULL;
3222 }
3223 else
3224 {
3225 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3226 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3227 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3228 return VERR_ALREADY_EXISTS;
3229 }
3230 }
3231 else
3232 {
3233 pSession->pSessionGVM = NULL;
3234 pSession->pSessionVM = NULL;
3235 pSession->pFastIoCtrlVM = NULL;
3236 }
3237 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3238 return VINF_SUCCESS;
3239}
3240
3241
3242/** @copydoc RTLogGetDefaultInstanceEx
3243 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3244SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3245{
3246 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3247}
3248
3249
3250/** @copydoc RTLogRelGetDefaultInstanceEx
3251 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3252SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3253{
3254 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3255}
3256
3257
3258/**
3259 * Lock pages.
3260 *
3261 * @returns IPRT status code.
3262 * @param pSession Session to which the locked memory should be associated.
3263 * @param pvR3 Start of the memory range to lock.
3264 * This must be page aligned.
3265 * @param cPages Number of pages to lock.
3266 * @param paPages Where to put the physical addresses of locked memory.
3267 */
3268SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3269{
3270 int rc;
3271 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3272 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3273 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3274
3275 /*
3276 * Verify input.
3277 */
3278 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3279 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3280 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3281 || !pvR3)
3282 {
3283 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3284 return VERR_INVALID_PARAMETER;
3285 }
3286
3287 /*
3288 * Let IPRT do the job.
3289 */
3290 Mem.eType = MEMREF_TYPE_LOCKED;
3291 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3292 if (RT_SUCCESS(rc))
3293 {
3294 uint32_t iPage = cPages;
3295 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3296 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3297
3298 while (iPage-- > 0)
3299 {
3300 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3301 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3302 {
3303 AssertMsgFailed(("iPage=%d\n", iPage));
3304 rc = VERR_INTERNAL_ERROR;
3305 break;
3306 }
3307 }
3308 if (RT_SUCCESS(rc))
3309 rc = supdrvMemAdd(&Mem, pSession);
3310 if (RT_FAILURE(rc))
3311 {
3312 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3313 AssertRC(rc2);
3314 }
3315 }
3316
3317 return rc;
3318}
3319
3320
3321/**
3322 * Unlocks the memory pointed to by pv.
3323 *
3324 * @returns IPRT status code.
3325 * @param pSession Session to which the memory was locked.
3326 * @param pvR3 Memory to unlock.
3327 */
3328SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3329{
3330 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3331 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3332 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3333}
3334
3335
3336/**
3337 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3338 * backing.
3339 *
3340 * @returns IPRT status code.
3341 * @param pSession Session data.
3342 * @param cPages Number of pages to allocate.
3343 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3344 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3345 * @param pHCPhys Where to put the physical address of allocated memory.
3346 */
3347SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3348{
3349 int rc;
3350 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3351 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3352
3353 /*
3354 * Validate input.
3355 */
3356 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3357 if (!ppvR3 || !ppvR0 || !pHCPhys)
3358 {
3359 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3360 pSession, ppvR0, ppvR3, pHCPhys));
3361 return VERR_INVALID_PARAMETER;
3362
3363 }
3364 if (cPages < 1 || cPages >= 256)
3365 {
3366 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3367 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3368 }
3369
3370 /*
3371 * Let IPRT do the job.
3372 */
3373 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3374 if (RT_SUCCESS(rc))
3375 {
3376 int rc2;
3377 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3378 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3379 if (RT_SUCCESS(rc))
3380 {
3381 Mem.eType = MEMREF_TYPE_CONT;
3382 rc = supdrvMemAdd(&Mem, pSession);
3383 if (!rc)
3384 {
3385 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3386 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3387 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3388 return 0;
3389 }
3390
3391 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3392 AssertRC(rc2);
3393 }
3394 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3395 AssertRC(rc2);
3396 }
3397
3398 return rc;
3399}
3400
3401
3402/**
3403 * Frees memory allocated using SUPR0ContAlloc().
3404 *
3405 * @returns IPRT status code.
3406 * @param pSession The session to which the memory was allocated.
3407 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3408 */
3409SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3410{
3411 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3412 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3413 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3414}
3415
3416
3417/**
3418 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3419 *
3420 * The memory isn't zeroed.
3421 *
3422 * @returns IPRT status code.
3423 * @param pSession Session data.
3424 * @param cPages Number of pages to allocate.
3425 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3426 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3427 * @param paPages Where to put the physical addresses of allocated memory.
3428 */
3429SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3430{
3431 unsigned iPage;
3432 int rc;
3433 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3434 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3435
3436 /*
3437 * Validate input.
3438 */
3439 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3440 if (!ppvR3 || !ppvR0 || !paPages)
3441 {
3442 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3443 pSession, ppvR3, ppvR0, paPages));
3444 return VERR_INVALID_PARAMETER;
3445
3446 }
3447 if (cPages < 1 || cPages >= 256)
3448 {
3449 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3450 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3451 }
3452
3453 /*
3454 * Let IPRT do the work.
3455 */
3456 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3457 if (RT_SUCCESS(rc))
3458 {
3459 int rc2;
3460 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3461 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3462 if (RT_SUCCESS(rc))
3463 {
3464 Mem.eType = MEMREF_TYPE_LOW;
3465 rc = supdrvMemAdd(&Mem, pSession);
3466 if (!rc)
3467 {
3468 for (iPage = 0; iPage < cPages; iPage++)
3469 {
3470 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3471 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3472 }
3473 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3474 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3475 return 0;
3476 }
3477
3478 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3479 AssertRC(rc2);
3480 }
3481
3482 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3483 AssertRC(rc2);
3484 }
3485
3486 return rc;
3487}
3488
3489
3490/**
3491 * Frees memory allocated using SUPR0LowAlloc().
3492 *
3493 * @returns IPRT status code.
3494 * @param pSession The session to which the memory was allocated.
3495 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3496 */
3497SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3498{
3499 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3500 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3501 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3502}
3503
3504
3505
3506/**
3507 * Allocates a chunk of memory with both R0 and R3 mappings.
3508 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3509 *
3510 * @returns IPRT status code.
3511 * @param pSession The session to associated the allocation with.
3512 * @param cb Number of bytes to allocate.
3513 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3514 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3515 */
3516SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3517{
3518 int rc;
3519 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3520 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3521
3522 /*
3523 * Validate input.
3524 */
3525 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3526 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3527 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3528 if (cb < 1 || cb >= _4M)
3529 {
3530 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3531 return VERR_INVALID_PARAMETER;
3532 }
3533
3534 /*
3535 * Let IPRT do the work.
3536 */
3537 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3538 if (RT_SUCCESS(rc))
3539 {
3540 int rc2;
3541 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3542 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3543 if (RT_SUCCESS(rc))
3544 {
3545 Mem.eType = MEMREF_TYPE_MEM;
3546 rc = supdrvMemAdd(&Mem, pSession);
3547 if (!rc)
3548 {
3549 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3550 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3551 return VINF_SUCCESS;
3552 }
3553
3554 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3555 AssertRC(rc2);
3556 }
3557
3558 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3559 AssertRC(rc2);
3560 }
3561
3562 return rc;
3563}
3564
3565
3566/**
3567 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3568 *
3569 * @returns IPRT status code.
3570 * @param pSession The session to which the memory was allocated.
3571 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3572 * @param paPages Where to store the physical addresses.
3573 */
3574SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3575{
3576 PSUPDRVBUNDLE pBundle;
3577 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3578
3579 /*
3580 * Validate input.
3581 */
3582 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3583 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3584 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3585
3586 /*
3587 * Search for the address.
3588 */
3589 RTSpinlockAcquire(pSession->Spinlock);
3590 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3591 {
3592 if (pBundle->cUsed > 0)
3593 {
3594 unsigned i;
3595 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3596 {
3597 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3598 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3599 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3600 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3601 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3602 )
3603 )
3604 {
3605 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3606 size_t iPage;
3607 for (iPage = 0; iPage < cPages; iPage++)
3608 {
3609 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3610 paPages[iPage].uReserved = 0;
3611 }
3612 RTSpinlockRelease(pSession->Spinlock);
3613 return VINF_SUCCESS;
3614 }
3615 }
3616 }
3617 }
3618 RTSpinlockRelease(pSession->Spinlock);
3619 Log(("Failed to find %p!!!\n", (void *)uPtr));
3620 return VERR_INVALID_PARAMETER;
3621}
3622
3623
3624/**
3625 * Free memory allocated by SUPR0MemAlloc().
3626 *
3627 * @returns IPRT status code.
3628 * @param pSession The session owning the allocation.
3629 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3630 */
3631SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3632{
3633 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3634 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3635 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3636}
3637
3638
3639/**
3640 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3641 *
3642 * The memory is fixed and it's possible to query the physical addresses using
3643 * SUPR0MemGetPhys().
3644 *
3645 * @returns IPRT status code.
3646 * @param pSession The session to associated the allocation with.
3647 * @param cPages The number of pages to allocate.
3648 * @param fFlags Flags, reserved for the future. Must be zero.
3649 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3650 * NULL if no ring-3 mapping.
3651 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3652 * NULL if no ring-0 mapping.
3653 * @param paPages Where to store the addresses of the pages. Optional.
3654 */
3655SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3656{
3657 int rc;
3658 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3659 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3660
3661 /*
3662 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3663 */
3664 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3665 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3666 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3667 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3668 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3669 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3670 {
3671 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3672 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3673 }
3674
3675 /*
3676 * Let IPRT do the work.
3677 */
3678 if (ppvR0)
3679 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3680 else
3681 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3682 if (RT_SUCCESS(rc))
3683 {
3684 int rc2;
3685 if (ppvR3)
3686 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3687 else
3688 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3689 if (RT_SUCCESS(rc))
3690 {
3691 Mem.eType = MEMREF_TYPE_PAGE;
3692 rc = supdrvMemAdd(&Mem, pSession);
3693 if (!rc)
3694 {
3695 if (ppvR3)
3696 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3697 if (ppvR0)
3698 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3699 if (paPages)
3700 {
3701 uint32_t iPage = cPages;
3702 while (iPage-- > 0)
3703 {
3704 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3705 Assert(paPages[iPage] != NIL_RTHCPHYS);
3706 }
3707 }
3708 return VINF_SUCCESS;
3709 }
3710
3711 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3712 AssertRC(rc2);
3713 }
3714
3715 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3716 AssertRC(rc2);
3717 }
3718 return rc;
3719}
3720
3721
3722/**
3723 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3724 * space.
3725 *
3726 * @returns IPRT status code.
3727 * @param pSession The session to associated the allocation with.
3728 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3729 * @param offSub Where to start mapping. Must be page aligned.
3730 * @param cbSub How much to map. Must be page aligned.
3731 * @param fFlags Flags, MBZ.
3732 * @param ppvR0 Where to return the address of the ring-0 mapping on
3733 * success.
3734 */
3735SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3736 uint32_t fFlags, PRTR0PTR ppvR0)
3737{
3738 int rc;
3739 PSUPDRVBUNDLE pBundle;
3740 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3741 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3742
3743 /*
3744 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3745 */
3746 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3747 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3748 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3749 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3750 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3751 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3752
3753 /*
3754 * Find the memory object.
3755 */
3756 RTSpinlockAcquire(pSession->Spinlock);
3757 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3758 {
3759 if (pBundle->cUsed > 0)
3760 {
3761 unsigned i;
3762 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3763 {
3764 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3765 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3766 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3767 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3768 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3769 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3770 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3771 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3772 {
3773 hMemObj = pBundle->aMem[i].MemObj;
3774 break;
3775 }
3776 }
3777 }
3778 }
3779 RTSpinlockRelease(pSession->Spinlock);
3780
3781 rc = VERR_INVALID_PARAMETER;
3782 if (hMemObj != NIL_RTR0MEMOBJ)
3783 {
3784 /*
3785 * Do some further input validations before calling IPRT.
3786 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3787 */
3788 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3789 if ( offSub < cbMemObj
3790 && cbSub <= cbMemObj
3791 && offSub + cbSub <= cbMemObj)
3792 {
3793 RTR0MEMOBJ hMapObj;
3794 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3795 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3796 if (RT_SUCCESS(rc))
3797 *ppvR0 = RTR0MemObjAddress(hMapObj);
3798 }
3799 else
3800 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3801
3802 }
3803 return rc;
3804}
3805
3806
3807/**
3808 * Changes the page level protection of one or more pages previously allocated
3809 * by SUPR0PageAllocEx.
3810 *
3811 * @returns IPRT status code.
3812 * @param pSession The session to associated the allocation with.
3813 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3814 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3815 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3816 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3817 * @param offSub Where to start changing. Must be page aligned.
3818 * @param cbSub How much to change. Must be page aligned.
3819 * @param fProt The new page level protection, see RTMEM_PROT_*.
3820 */
3821SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3822{
3823 int rc;
3824 PSUPDRVBUNDLE pBundle;
3825 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3826 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3827 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3828
3829 /*
3830 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3831 */
3832 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3833 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3834 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3835 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3836 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3837
3838 /*
3839 * Find the memory object.
3840 */
3841 RTSpinlockAcquire(pSession->Spinlock);
3842 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3843 {
3844 if (pBundle->cUsed > 0)
3845 {
3846 unsigned i;
3847 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3848 {
3849 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3850 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3851 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3852 || pvR3 == NIL_RTR3PTR)
3853 && ( pvR0 == NIL_RTR0PTR
3854 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3855 && ( pvR3 == NIL_RTR3PTR
3856 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3857 {
3858 if (pvR0 != NIL_RTR0PTR)
3859 hMemObjR0 = pBundle->aMem[i].MemObj;
3860 if (pvR3 != NIL_RTR3PTR)
3861 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3862 break;
3863 }
3864 }
3865 }
3866 }
3867 RTSpinlockRelease(pSession->Spinlock);
3868
3869 rc = VERR_INVALID_PARAMETER;
3870 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3871 || hMemObjR3 != NIL_RTR0MEMOBJ)
3872 {
3873 /*
3874 * Do some further input validations before calling IPRT.
3875 */
3876 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3877 if ( offSub < cbMemObj
3878 && cbSub <= cbMemObj
3879 && offSub + cbSub <= cbMemObj)
3880 {
3881 rc = VINF_SUCCESS;
3882 if (hMemObjR3 != NIL_RTR0PTR)
3883 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3884 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3885 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3886 }
3887 else
3888 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3889
3890 }
3891 return rc;
3892
3893}
3894
3895
3896/**
3897 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3898 *
3899 * @returns IPRT status code.
3900 * @param pSession The session owning the allocation.
3901 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3902 * SUPR0PageAllocEx().
3903 */
3904SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3905{
3906 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3907 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3908 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3909}
3910
3911
3912/**
3913 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3914 *
3915 * @param pDevExt The device extension.
3916 * @param pszFile The source file where the caller detected the bad
3917 * context.
3918 * @param uLine The line number in @a pszFile.
3919 * @param pszExtra Optional additional message to give further hints.
3920 */
3921void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3922{
3923 uint32_t cCalls;
3924
3925 /*
3926 * Shorten the filename before displaying the message.
3927 */
3928 for (;;)
3929 {
3930 const char *pszTmp = strchr(pszFile, '/');
3931 if (!pszTmp)
3932 pszTmp = strchr(pszFile, '\\');
3933 if (!pszTmp)
3934 break;
3935 pszFile = pszTmp + 1;
3936 }
3937 if (RT_VALID_PTR(pszExtra) && *pszExtra)
3938 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
3939 else
3940 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
3941
3942 /*
3943 * Record the incident so that we stand a chance of blocking I/O controls
3944 * before panicing the system.
3945 */
3946 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
3947 if (cCalls > UINT32_MAX - _1K)
3948 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
3949}
3950
3951
3952/**
3953 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3954 *
3955 * @param pSession The session of the caller.
3956 * @param pszFile The source file where the caller detected the bad
3957 * context.
3958 * @param uLine The line number in @a pszFile.
3959 * @param pszExtra Optional additional message to give further hints.
3960 */
3961SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
3962{
3963 PSUPDRVDEVEXT pDevExt;
3964
3965 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
3966 pDevExt = pSession->pDevExt;
3967
3968 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
3969}
3970
3971
3972/**
3973 * Gets the paging mode of the current CPU.
3974 *
3975 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3976 */
3977SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3978{
3979 SUPPAGINGMODE enmMode;
3980
3981 RTR0UINTREG cr0 = ASMGetCR0();
3982 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3983 enmMode = SUPPAGINGMODE_INVALID;
3984 else
3985 {
3986 RTR0UINTREG cr4 = ASMGetCR4();
3987 uint32_t fNXEPlusLMA = 0;
3988 if (cr4 & X86_CR4_PAE)
3989 {
3990 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
3991 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
3992 {
3993 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3994 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3995 fNXEPlusLMA |= RT_BIT(0);
3996 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3997 fNXEPlusLMA |= RT_BIT(1);
3998 }
3999 }
4000
4001 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4002 {
4003 case 0:
4004 enmMode = SUPPAGINGMODE_32_BIT;
4005 break;
4006
4007 case X86_CR4_PGE:
4008 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4009 break;
4010
4011 case X86_CR4_PAE:
4012 enmMode = SUPPAGINGMODE_PAE;
4013 break;
4014
4015 case X86_CR4_PAE | RT_BIT(0):
4016 enmMode = SUPPAGINGMODE_PAE_NX;
4017 break;
4018
4019 case X86_CR4_PAE | X86_CR4_PGE:
4020 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4021 break;
4022
4023 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4024 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4025 break;
4026
4027 case RT_BIT(1) | X86_CR4_PAE:
4028 enmMode = SUPPAGINGMODE_AMD64;
4029 break;
4030
4031 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4032 enmMode = SUPPAGINGMODE_AMD64_NX;
4033 break;
4034
4035 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4036 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4037 break;
4038
4039 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4040 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4041 break;
4042
4043 default:
4044 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4045 enmMode = SUPPAGINGMODE_INVALID;
4046 break;
4047 }
4048 }
4049 return enmMode;
4050}
4051
4052
4053/**
4054 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4055 *
4056 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4057 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4058 * for code with interrupts enabled.
4059 *
4060 * @returns the old CR4 value.
4061 *
4062 * @param fOrMask bits to be set in CR4.
4063 * @param fAndMask bits to be cleard in CR4.
4064 *
4065 * @remarks Must be called with preemption/interrupts disabled.
4066 */
4067SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4068{
4069#ifdef RT_OS_LINUX
4070 return supdrvOSChangeCR4(fOrMask, fAndMask);
4071#else
4072 RTCCUINTREG uOld = ASMGetCR4();
4073 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4074 if (uNew != uOld)
4075 ASMSetCR4(uNew);
4076 return uOld;
4077#endif
4078}
4079
4080
4081/**
4082 * Enables or disabled hardware virtualization extensions using native OS APIs.
4083 *
4084 * @returns VBox status code.
4085 * @retval VINF_SUCCESS on success.
4086 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4087 *
4088 * @param fEnable Whether to enable or disable.
4089 */
4090SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4091{
4092#ifdef RT_OS_DARWIN
4093 return supdrvOSEnableVTx(fEnable);
4094#else
4095 RT_NOREF1(fEnable);
4096 return VERR_NOT_SUPPORTED;
4097#endif
4098}
4099
4100
4101/**
4102 * Suspends hardware virtualization extensions using the native OS API.
4103 *
4104 * This is called prior to entering raw-mode context.
4105 *
4106 * @returns @c true if suspended, @c false if not.
4107 */
4108SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4109{
4110#ifdef RT_OS_DARWIN
4111 return supdrvOSSuspendVTxOnCpu();
4112#else
4113 return false;
4114#endif
4115}
4116
4117
4118/**
4119 * Resumes hardware virtualization extensions using the native OS API.
4120 *
4121 * This is called after to entering raw-mode context.
4122 *
4123 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4124 */
4125SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4126{
4127#ifdef RT_OS_DARWIN
4128 supdrvOSResumeVTxOnCpu(fSuspended);
4129#else
4130 RT_NOREF1(fSuspended);
4131 Assert(!fSuspended);
4132#endif
4133}
4134
4135
4136SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4137{
4138#ifdef RT_OS_LINUX
4139 return supdrvOSGetCurrentGdtRw(pGdtRw);
4140#else
4141 NOREF(pGdtRw);
4142 return VERR_NOT_IMPLEMENTED;
4143#endif
4144}
4145
4146
4147/**
4148 * Gets AMD-V and VT-x support for the calling CPU.
4149 *
4150 * @returns VBox status code.
4151 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4152 * (SUPVTCAPS_AMD_V) is supported.
4153 */
4154SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4155{
4156 Assert(pfCaps);
4157 *pfCaps = 0;
4158
4159 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4160 if (ASMHasCpuId())
4161 {
4162 /* Check the range of standard CPUID leafs. */
4163 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4164 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4165 if (ASMIsValidStdRange(uMaxLeaf))
4166 {
4167 /* Query the standard CPUID leaf. */
4168 uint32_t fFeatEcx, fFeatEdx, uDummy;
4169 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4170
4171 /* Check if the vendor is Intel (or compatible). */
4172 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4173 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4174 || ASMIsShanghaiCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4175 {
4176 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4177 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4178 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4179 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4180 {
4181 *pfCaps = SUPVTCAPS_VT_X;
4182 return VINF_SUCCESS;
4183 }
4184 return VERR_VMX_NO_VMX;
4185 }
4186
4187 /* Check if the vendor is AMD (or compatible). */
4188 if ( ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4189 || ASMIsHygonCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4190 {
4191 uint32_t fExtFeatEcx, uExtMaxId;
4192 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4193 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4194
4195 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4196 if ( ASMIsValidExtRange(uExtMaxId)
4197 && uExtMaxId >= 0x8000000a
4198 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4199 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4200 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4201 {
4202 *pfCaps = SUPVTCAPS_AMD_V;
4203 return VINF_SUCCESS;
4204 }
4205 return VERR_SVM_NO_SVM;
4206 }
4207 }
4208 }
4209 return VERR_UNSUPPORTED_CPU;
4210}
4211
4212
4213/**
4214 * Checks if Intel VT-x feature is usable on this CPU.
4215 *
4216 * @returns VBox status code.
4217 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4218 * ambiguity that makes us unsure whether we
4219 * really can use VT-x or not.
4220 *
4221 * @remarks Must be called with preemption disabled.
4222 * The caller is also expected to check that the CPU is an Intel (or
4223 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4224 * function might throw a \#GP fault as it tries to read/write MSRs
4225 * that may not be present!
4226 */
4227SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4228{
4229 uint64_t fFeatMsr;
4230 bool fMaybeSmxMode;
4231 bool fMsrLocked;
4232 bool fSmxVmxAllowed;
4233 bool fVmxAllowed;
4234 bool fIsSmxModeAmbiguous;
4235 int rc;
4236
4237 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4238
4239 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4240 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4241 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4242 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4243 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4244 fIsSmxModeAmbiguous = false;
4245 rc = VERR_INTERNAL_ERROR_5;
4246
4247 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4248 if (fMsrLocked)
4249 {
4250 if (fVmxAllowed && fSmxVmxAllowed)
4251 rc = VINF_SUCCESS;
4252 else if (!fVmxAllowed && !fSmxVmxAllowed)
4253 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4254 else if (!fMaybeSmxMode)
4255 {
4256 if (fVmxAllowed)
4257 rc = VINF_SUCCESS;
4258 else
4259 rc = VERR_VMX_MSR_VMX_DISABLED;
4260 }
4261 else
4262 {
4263 /*
4264 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4265 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4266 * See @bugref{6873}.
4267 */
4268 Assert(fMaybeSmxMode == true);
4269 fIsSmxModeAmbiguous = true;
4270 rc = VINF_SUCCESS;
4271 }
4272 }
4273 else
4274 {
4275 /*
4276 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4277 * this MSR can no longer be modified.
4278 *
4279 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4280 * accurately. See @bugref{6873}.
4281 *
4282 * We need to check for SMX hardware support here, before writing the MSR as
4283 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4284 * for it.
4285 */
4286 uint32_t fFeaturesECX, uDummy;
4287#ifdef VBOX_STRICT
4288 /* Callers should have verified these at some point. */
4289 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4290 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4291 Assert(ASMIsValidStdRange(uMaxId));
4292 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4293 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4294 || ASMIsShanghaiCpuEx( uVendorEBX, uVendorECX, uVendorEDX));
4295#endif
4296 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4297 bool fSmxVmxHwSupport = false;
4298 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4299 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4300 fSmxVmxHwSupport = true;
4301
4302 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4303 | MSR_IA32_FEATURE_CONTROL_VMXON;
4304 if (fSmxVmxHwSupport)
4305 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4306
4307 /*
4308 * Commit.
4309 */
4310 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4311
4312 /*
4313 * Verify.
4314 */
4315 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4316 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4317 if (fMsrLocked)
4318 {
4319 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4320 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4321 if ( fVmxAllowed
4322 && ( !fSmxVmxHwSupport
4323 || fSmxVmxAllowed))
4324 rc = VINF_SUCCESS;
4325 else
4326 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4327 }
4328 else
4329 rc = VERR_VMX_MSR_LOCKING_FAILED;
4330 }
4331
4332 if (pfIsSmxModeAmbiguous)
4333 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4334
4335 return rc;
4336}
4337
4338
4339/**
4340 * Checks if AMD-V SVM feature is usable on this CPU.
4341 *
4342 * @returns VBox status code.
4343 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4344 *
4345 * @remarks Must be called with preemption disabled.
4346 */
4347SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4348{
4349 int rc;
4350 uint64_t fVmCr;
4351 uint64_t fEfer;
4352
4353 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4354 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4355 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4356 {
4357 rc = VINF_SUCCESS;
4358 if (fInitSvm)
4359 {
4360 /* Turn on SVM in the EFER MSR. */
4361 fEfer = ASMRdMsr(MSR_K6_EFER);
4362 if (fEfer & MSR_K6_EFER_SVME)
4363 rc = VERR_SVM_IN_USE;
4364 else
4365 {
4366 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4367
4368 /* Paranoia. */
4369 fEfer = ASMRdMsr(MSR_K6_EFER);
4370 if (fEfer & MSR_K6_EFER_SVME)
4371 {
4372 /* Restore previous value. */
4373 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4374 }
4375 else
4376 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4377 }
4378 }
4379 }
4380 else
4381 rc = VERR_SVM_DISABLED;
4382 return rc;
4383}
4384
4385
4386/**
4387 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4388 *
4389 * @returns VBox status code.
4390 * @retval VERR_VMX_NO_VMX
4391 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4392 * @retval VERR_VMX_MSR_VMX_DISABLED
4393 * @retval VERR_VMX_MSR_LOCKING_FAILED
4394 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4395 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4396 * @retval VERR_SVM_NO_SVM
4397 * @retval VERR_SVM_DISABLED
4398 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4399 * (centaur)/Shanghai CPU.
4400 *
4401 * @param pfCaps Where to store the capabilities.
4402 */
4403int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4404{
4405 int rc = VERR_UNSUPPORTED_CPU;
4406 bool fIsSmxModeAmbiguous = false;
4407 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4408
4409 /*
4410 * Input validation.
4411 */
4412 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4413 *pfCaps = 0;
4414
4415 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4416 RTThreadPreemptDisable(&PreemptState);
4417
4418 /* Check if VT-x/AMD-V is supported. */
4419 rc = SUPR0GetVTSupport(pfCaps);
4420 if (RT_SUCCESS(rc))
4421 {
4422 /* Check if VT-x is supported. */
4423 if (*pfCaps & SUPVTCAPS_VT_X)
4424 {
4425 /* Check if VT-x is usable. */
4426 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4427 if (RT_SUCCESS(rc))
4428 {
4429 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4430 VMXCTLSMSR vtCaps;
4431 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4432 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4433 {
4434 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4435 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4436 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4437 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4438 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4439 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4440 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4441 }
4442 }
4443 }
4444 /* Check if AMD-V is supported. */
4445 else if (*pfCaps & SUPVTCAPS_AMD_V)
4446 {
4447 /* Check is SVM is usable. */
4448 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4449 if (RT_SUCCESS(rc))
4450 {
4451 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4452 uint32_t uDummy, fSvmFeatures;
4453 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4454 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4455 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4456 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD)
4457 *pfCaps |= SUPVTCAPS_AMDV_VIRT_VMSAVE_VMLOAD;
4458 }
4459 }
4460 }
4461
4462 /* Restore preemption. */
4463 RTThreadPreemptRestore(&PreemptState);
4464
4465 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4466 if (fIsSmxModeAmbiguous)
4467 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4468
4469 return rc;
4470}
4471
4472
4473/**
4474 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4475 *
4476 * @returns VBox status code.
4477 * @retval VERR_VMX_NO_VMX
4478 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4479 * @retval VERR_VMX_MSR_VMX_DISABLED
4480 * @retval VERR_VMX_MSR_LOCKING_FAILED
4481 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4482 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4483 * @retval VERR_SVM_NO_SVM
4484 * @retval VERR_SVM_DISABLED
4485 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4486 * (centaur)/Shanghai CPU.
4487 *
4488 * @param pSession The session handle.
4489 * @param pfCaps Where to store the capabilities.
4490 */
4491SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4492{
4493 /*
4494 * Input validation.
4495 */
4496 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4497 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4498
4499 /*
4500 * Call common worker.
4501 */
4502 return supdrvQueryVTCapsInternal(pfCaps);
4503}
4504
4505
4506/**
4507 * Queries the CPU microcode revision.
4508 *
4509 * @returns VBox status code.
4510 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4511 * readable microcode rev.
4512 *
4513 * @param puRevision Where to store the microcode revision.
4514 */
4515static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4516{
4517 int rc = VERR_UNSUPPORTED_CPU;
4518 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4519
4520 /*
4521 * Input validation.
4522 */
4523 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4524
4525 *puRevision = 0;
4526
4527 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4528 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4529 RTThreadPreemptDisable(&PreemptState);
4530
4531 if (ASMHasCpuId())
4532 {
4533 uint32_t uDummy, uTFMSEAX;
4534 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4535
4536 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4537 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4538
4539 if (ASMIsValidStdRange(uMaxId))
4540 {
4541 uint64_t uRevMsr;
4542 if (ASMIsIntelCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4543 {
4544 /* Architectural MSR available on Pentium Pro and later. */
4545 if (ASMGetCpuFamily(uTFMSEAX) >= 6)
4546 {
4547 /* Revision is in the high dword. */
4548 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4549 *puRevision = RT_HIDWORD(uRevMsr);
4550 rc = VINF_SUCCESS;
4551 }
4552 }
4553 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4554 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4555 {
4556 /* Not well documented, but at least all AMD64 CPUs support this. */
4557 if (ASMGetCpuFamily(uTFMSEAX) >= 15)
4558 {
4559 /* Revision is in the low dword. */
4560 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4561 *puRevision = RT_LODWORD(uRevMsr);
4562 rc = VINF_SUCCESS;
4563 }
4564 }
4565 }
4566 }
4567
4568 RTThreadPreemptRestore(&PreemptState);
4569
4570 return rc;
4571}
4572
4573/**
4574 * Queries the CPU microcode revision.
4575 *
4576 * @returns VBox status code.
4577 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4578 * readable microcode rev.
4579 *
4580 * @param pSession The session handle.
4581 * @param puRevision Where to store the microcode revision.
4582 */
4583SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4584{
4585 /*
4586 * Input validation.
4587 */
4588 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4589 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4590
4591 /*
4592 * Call common worker.
4593 */
4594 return supdrvQueryUcodeRev(puRevision);
4595}
4596
4597
4598/**
4599 * Gets hardware-virtualization MSRs of the calling CPU.
4600 *
4601 * @returns VBox status code.
4602 * @param pMsrs Where to store the hardware-virtualization MSRs.
4603 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4604 * to explicitly check for the presence of VT-x/AMD-V before
4605 * querying MSRs.
4606 * @param fForce Force querying of MSRs from the hardware.
4607 */
4608SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4609{
4610 NOREF(fForce);
4611
4612 int rc;
4613 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4614
4615 /*
4616 * Input validation.
4617 */
4618 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4619
4620 /*
4621 * Disable preemption so we make sure we don't migrate CPUs and because
4622 * we access global data.
4623 */
4624 RTThreadPreemptDisable(&PreemptState);
4625
4626 /*
4627 * Query the MSRs from the hardware.
4628 */
4629 /** @todo Cache MSR values so future accesses can avoid querying the hardware as
4630 * it may be expensive (esp. in nested virtualization scenarios). Do this
4631 * with proper locking and race safety. */
4632 SUPHWVIRTMSRS Msrs;
4633 RT_ZERO(Msrs);
4634
4635 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4636 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4637 rc = SUPR0GetVTSupport(&fCaps);
4638 else
4639 rc = VINF_SUCCESS;
4640 if (RT_SUCCESS(rc))
4641 {
4642 if (fCaps & SUPVTCAPS_VT_X)
4643 {
4644 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4645 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4646 Msrs.u.vmx.u64PinCtls = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4647 Msrs.u.vmx.u64ProcCtls = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4648 Msrs.u.vmx.u64ExitCtls = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4649 Msrs.u.vmx.u64EntryCtls = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4650 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4651 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4652 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4653 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4654 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4655 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4656
4657 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4658 {
4659 Msrs.u.vmx.u64TruePinCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4660 Msrs.u.vmx.u64TrueProcCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4661 Msrs.u.vmx.u64TrueEntryCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4662 Msrs.u.vmx.u64TrueExitCtls = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4663 }
4664
4665 uint32_t const fProcCtlsAllowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls);
4666 if (fProcCtlsAllowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4667 {
4668 Msrs.u.vmx.u64ProcCtls2 = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4669
4670 uint32_t const fProcCtls2Allowed1 = RT_HI_U32(Msrs.u.vmx.u64ProcCtls2);
4671 if (fProcCtls2Allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4672 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4673
4674 if (fProcCtls2Allowed1 & VMX_PROC_CTLS2_VMFUNC)
4675 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4676 }
4677 }
4678 else if (fCaps & SUPVTCAPS_AMD_V)
4679 {
4680 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4681 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4682 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4683 }
4684 else
4685 {
4686 RTThreadPreemptRestore(&PreemptState);
4687 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4688 VERR_INTERNAL_ERROR_2);
4689 }
4690
4691 /*
4692 * Copy the MSRs out.
4693 */
4694 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4695 }
4696
4697 RTThreadPreemptRestore(&PreemptState);
4698
4699 return rc;
4700}
4701
4702
4703/**
4704 * Register a component factory with the support driver.
4705 *
4706 * This is currently restricted to kernel sessions only.
4707 *
4708 * @returns VBox status code.
4709 * @retval VINF_SUCCESS on success.
4710 * @retval VERR_NO_MEMORY if we're out of memory.
4711 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4712 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4713 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4714 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4715 *
4716 * @param pSession The SUPDRV session (must be a ring-0 session).
4717 * @param pFactory Pointer to the component factory registration structure.
4718 *
4719 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4720 */
4721SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4722{
4723 PSUPDRVFACTORYREG pNewReg;
4724 const char *psz;
4725 int rc;
4726
4727 /*
4728 * Validate parameters.
4729 */
4730 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4731 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4732 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4733 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4734 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4735 AssertReturn(psz, VERR_INVALID_PARAMETER);
4736
4737 /*
4738 * Allocate and initialize a new registration structure.
4739 */
4740 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4741 if (pNewReg)
4742 {
4743 pNewReg->pNext = NULL;
4744 pNewReg->pFactory = pFactory;
4745 pNewReg->pSession = pSession;
4746 pNewReg->cchName = psz - &pFactory->szName[0];
4747
4748 /*
4749 * Add it to the tail of the list after checking for prior registration.
4750 */
4751 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4752 if (RT_SUCCESS(rc))
4753 {
4754 PSUPDRVFACTORYREG pPrev = NULL;
4755 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4756 while (pCur && pCur->pFactory != pFactory)
4757 {
4758 pPrev = pCur;
4759 pCur = pCur->pNext;
4760 }
4761 if (!pCur)
4762 {
4763 if (pPrev)
4764 pPrev->pNext = pNewReg;
4765 else
4766 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4767 rc = VINF_SUCCESS;
4768 }
4769 else
4770 rc = VERR_ALREADY_EXISTS;
4771
4772 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4773 }
4774
4775 if (RT_FAILURE(rc))
4776 RTMemFree(pNewReg);
4777 }
4778 else
4779 rc = VERR_NO_MEMORY;
4780 return rc;
4781}
4782
4783
4784/**
4785 * Deregister a component factory.
4786 *
4787 * @returns VBox status code.
4788 * @retval VINF_SUCCESS on success.
4789 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4790 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4791 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4792 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4793 *
4794 * @param pSession The SUPDRV session (must be a ring-0 session).
4795 * @param pFactory Pointer to the component factory registration structure
4796 * previously passed SUPR0ComponentRegisterFactory().
4797 *
4798 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4799 */
4800SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4801{
4802 int rc;
4803
4804 /*
4805 * Validate parameters.
4806 */
4807 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4808 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4809 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4810
4811 /*
4812 * Take the lock and look for the registration record.
4813 */
4814 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4815 if (RT_SUCCESS(rc))
4816 {
4817 PSUPDRVFACTORYREG pPrev = NULL;
4818 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4819 while (pCur && pCur->pFactory != pFactory)
4820 {
4821 pPrev = pCur;
4822 pCur = pCur->pNext;
4823 }
4824 if (pCur)
4825 {
4826 if (!pPrev)
4827 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4828 else
4829 pPrev->pNext = pCur->pNext;
4830
4831 pCur->pNext = NULL;
4832 pCur->pFactory = NULL;
4833 pCur->pSession = NULL;
4834 rc = VINF_SUCCESS;
4835 }
4836 else
4837 rc = VERR_NOT_FOUND;
4838
4839 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4840
4841 RTMemFree(pCur);
4842 }
4843 return rc;
4844}
4845
4846
4847/**
4848 * Queries a component factory.
4849 *
4850 * @returns VBox status code.
4851 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4852 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4853 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4854 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4855 *
4856 * @param pSession The SUPDRV session.
4857 * @param pszName The name of the component factory.
4858 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4859 * @param ppvFactoryIf Where to store the factory interface.
4860 */
4861SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4862{
4863 const char *pszEnd;
4864 size_t cchName;
4865 int rc;
4866
4867 /*
4868 * Validate parameters.
4869 */
4870 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4871
4872 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4873 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4874 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4875 cchName = pszEnd - pszName;
4876
4877 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4878 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4879 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4880
4881 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4882 *ppvFactoryIf = NULL;
4883
4884 /*
4885 * Take the lock and try all factories by this name.
4886 */
4887 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4888 if (RT_SUCCESS(rc))
4889 {
4890 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4891 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4892 while (pCur)
4893 {
4894 if ( pCur->cchName == cchName
4895 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4896 {
4897 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4898 if (pvFactory)
4899 {
4900 *ppvFactoryIf = pvFactory;
4901 rc = VINF_SUCCESS;
4902 break;
4903 }
4904 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4905 }
4906
4907 /* next */
4908 pCur = pCur->pNext;
4909 }
4910
4911 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4912 }
4913 return rc;
4914}
4915
4916
4917/**
4918 * Adds a memory object to the session.
4919 *
4920 * @returns IPRT status code.
4921 * @param pMem Memory tracking structure containing the
4922 * information to track.
4923 * @param pSession The session.
4924 */
4925static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
4926{
4927 PSUPDRVBUNDLE pBundle;
4928
4929 /*
4930 * Find free entry and record the allocation.
4931 */
4932 RTSpinlockAcquire(pSession->Spinlock);
4933 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4934 {
4935 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
4936 {
4937 unsigned i;
4938 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4939 {
4940 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
4941 {
4942 pBundle->cUsed++;
4943 pBundle->aMem[i] = *pMem;
4944 RTSpinlockRelease(pSession->Spinlock);
4945 return VINF_SUCCESS;
4946 }
4947 }
4948 AssertFailed(); /* !!this can't be happening!!! */
4949 }
4950 }
4951 RTSpinlockRelease(pSession->Spinlock);
4952
4953 /*
4954 * Need to allocate a new bundle.
4955 * Insert into the last entry in the bundle.
4956 */
4957 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
4958 if (!pBundle)
4959 return VERR_NO_MEMORY;
4960
4961 /* take last entry. */
4962 pBundle->cUsed++;
4963 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
4964
4965 /* insert into list. */
4966 RTSpinlockAcquire(pSession->Spinlock);
4967 pBundle->pNext = pSession->Bundle.pNext;
4968 pSession->Bundle.pNext = pBundle;
4969 RTSpinlockRelease(pSession->Spinlock);
4970
4971 return VINF_SUCCESS;
4972}
4973
4974
4975/**
4976 * Releases a memory object referenced by pointer and type.
4977 *
4978 * @returns IPRT status code.
4979 * @param pSession Session data.
4980 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
4981 * @param eType Memory type.
4982 */
4983static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
4984{
4985 PSUPDRVBUNDLE pBundle;
4986
4987 /*
4988 * Validate input.
4989 */
4990 if (!uPtr)
4991 {
4992 Log(("Illegal address %p\n", (void *)uPtr));
4993 return VERR_INVALID_PARAMETER;
4994 }
4995
4996 /*
4997 * Search for the address.
4998 */
4999 RTSpinlockAcquire(pSession->Spinlock);
5000 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
5001 {
5002 if (pBundle->cUsed > 0)
5003 {
5004 unsigned i;
5005 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5006 {
5007 if ( pBundle->aMem[i].eType == eType
5008 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5009 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5010 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5011 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5012 )
5013 {
5014 /* Make a copy of it and release it outside the spinlock. */
5015 SUPDRVMEMREF Mem = pBundle->aMem[i];
5016 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5017 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5018 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5019 RTSpinlockRelease(pSession->Spinlock);
5020
5021 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5022 {
5023 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5024 AssertRC(rc); /** @todo figure out how to handle this. */
5025 }
5026 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5027 {
5028 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5029 AssertRC(rc); /** @todo figure out how to handle this. */
5030 }
5031 return VINF_SUCCESS;
5032 }
5033 }
5034 }
5035 }
5036 RTSpinlockRelease(pSession->Spinlock);
5037 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5038 return VERR_INVALID_PARAMETER;
5039}
5040
5041
5042/**
5043 * Opens an image. If it's the first time it's opened the call must upload
5044 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5045 *
5046 * This is the 1st step of the loading.
5047 *
5048 * @returns IPRT status code.
5049 * @param pDevExt Device globals.
5050 * @param pSession Session data.
5051 * @param pReq The open request.
5052 */
5053static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5054{
5055 int rc;
5056 PSUPDRVLDRIMAGE pImage;
5057 void *pv;
5058 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5059 SUPDRV_CHECK_SMAP_SETUP();
5060 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5061 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5062
5063 /*
5064 * Check if we got an instance of the image already.
5065 */
5066 supdrvLdrLock(pDevExt);
5067 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5068 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5069 {
5070 if ( pImage->szName[cchName] == '\0'
5071 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5072 {
5073 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
5074 {
5075 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5076 * that indicates that the images are different. */
5077 pImage->cUsage++;
5078 pReq->u.Out.pvImageBase = pImage->pvImage;
5079 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5080 pReq->u.Out.fNativeLoader = pImage->fNative;
5081 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5082 supdrvLdrUnlock(pDevExt);
5083 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5084 return VINF_SUCCESS;
5085 }
5086 supdrvLdrUnlock(pDevExt);
5087 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5088 return VERR_TOO_MANY_REFERENCES;
5089 }
5090 }
5091 /* (not found - add it!) */
5092
5093 /* If the loader interface is locked down, make userland fail early */
5094 if (pDevExt->fLdrLockedDown)
5095 {
5096 supdrvLdrUnlock(pDevExt);
5097 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5098 return VERR_PERMISSION_DENIED;
5099 }
5100
5101 /*
5102 * Allocate memory.
5103 */
5104 Assert(cchName < sizeof(pImage->szName));
5105 pv = RTMemAlloc(sizeof(SUPDRVLDRIMAGE));
5106 if (!pv)
5107 {
5108 supdrvLdrUnlock(pDevExt);
5109 Log(("supdrvIOCtl_LdrOpen: RTMemAlloc() failed\n"));
5110 return /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_2;
5111 }
5112 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5113
5114 /*
5115 * Setup and link in the LDR stuff.
5116 */
5117 pImage = (PSUPDRVLDRIMAGE)pv;
5118 pImage->pvImage = NULL;
5119#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5120 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5121#else
5122 pImage->pvImageAlloc = NULL;
5123#endif
5124 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5125 pImage->cbImageBits = pReq->u.In.cbImageBits;
5126 pImage->cSymbols = 0;
5127 pImage->paSymbols = NULL;
5128 pImage->pachStrTab = NULL;
5129 pImage->cbStrTab = 0;
5130 pImage->cSegments = 0;
5131 pImage->paSegments = NULL;
5132 pImage->pfnModuleInit = NULL;
5133 pImage->pfnModuleTerm = NULL;
5134 pImage->pfnServiceReqHandler = NULL;
5135 pImage->uState = SUP_IOCTL_LDR_OPEN;
5136 pImage->cUsage = 1;
5137 pImage->pDevExt = pDevExt;
5138 pImage->pImageImport = NULL;
5139 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5140 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5141
5142 /*
5143 * Try load it using the native loader, if that isn't supported, fall back
5144 * on the older method.
5145 */
5146 pImage->fNative = true;
5147 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5148 if (rc == VERR_NOT_SUPPORTED)
5149 {
5150#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5151 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5152 if (RT_SUCCESS(rc))
5153 {
5154 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5155 pImage->fNative = false;
5156 }
5157#else
5158 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
5159 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
5160 pImage->fNative = false;
5161 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
5162#endif
5163 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5164 }
5165 if (RT_FAILURE(rc))
5166 {
5167 supdrvLdrUnlock(pDevExt);
5168 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5169 RTMemFree(pImage);
5170 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5171 return rc;
5172 }
5173 Assert(VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5174
5175 /*
5176 * Link it.
5177 */
5178 pImage->pNext = pDevExt->pLdrImages;
5179 pDevExt->pLdrImages = pImage;
5180
5181 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5182
5183 pReq->u.Out.pvImageBase = pImage->pvImage;
5184 pReq->u.Out.fNeedsLoading = true;
5185 pReq->u.Out.fNativeLoader = pImage->fNative;
5186 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5187
5188 supdrvLdrUnlock(pDevExt);
5189 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5190 return VINF_SUCCESS;
5191}
5192
5193
5194/**
5195 * Formats a load error message.
5196 *
5197 * @returns @a rc
5198 * @param rc Return code.
5199 * @param pReq The request.
5200 * @param pszFormat The error message format string.
5201 * @param ... Argument to the format string.
5202 */
5203int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5204{
5205 va_list va;
5206 va_start(va, pszFormat);
5207 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5208 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5209 va_end(va);
5210 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5211 return rc;
5212}
5213
5214
5215/**
5216 * Worker that validates a pointer to an image entrypoint.
5217 *
5218 * Calls supdrvLdrLoadError on error.
5219 *
5220 * @returns IPRT status code.
5221 * @param pDevExt The device globals.
5222 * @param pImage The loader image.
5223 * @param pv The pointer into the image.
5224 * @param fMayBeNull Whether it may be NULL.
5225 * @param pszSymbol The entrypoint name or log name. If the symbol is
5226 * capitalized it signifies a specific symbol, otherwise it
5227 * for logging.
5228 * @param pbImageBits The image bits prepared by ring-3.
5229 * @param pReq The request for passing to supdrvLdrLoadError.
5230 *
5231 * @note Will leave the loader lock on failure!
5232 */
5233static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5234 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5235{
5236 if (!fMayBeNull || pv)
5237 {
5238 uint32_t iSeg;
5239
5240 /* Must be within the image bits: */
5241 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5242 if (uRva >= pImage->cbImageBits)
5243 {
5244 supdrvLdrUnlock(pDevExt);
5245 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5246 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5247 pv, pszSymbol, uRva, pImage->cbImageBits);
5248 }
5249
5250 /* Must be in an executable segment: */
5251 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5252 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5253 {
5254 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5255 break;
5256 supdrvLdrUnlock(pDevExt);
5257 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5258 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5259 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5260 pImage->paSegments[iSeg].fProt);
5261 }
5262 if (iSeg >= pImage->cSegments)
5263 {
5264 supdrvLdrUnlock(pDevExt);
5265 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5266 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5267 pv, pszSymbol, uRva);
5268 }
5269
5270 if (pImage->fNative)
5271 {
5272 /** @todo pass pReq along to the native code. */
5273 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5274 if (RT_FAILURE(rc))
5275 {
5276 supdrvLdrUnlock(pDevExt);
5277 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5278 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5279 }
5280 }
5281 }
5282 return VINF_SUCCESS;
5283}
5284
5285
5286/**
5287 * Loads the image bits.
5288 *
5289 * This is the 2nd step of the loading.
5290 *
5291 * @returns IPRT status code.
5292 * @param pDevExt Device globals.
5293 * @param pSession Session data.
5294 * @param pReq The request.
5295 */
5296static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5297{
5298 PSUPDRVLDRUSAGE pUsage;
5299 PSUPDRVLDRIMAGE pImage;
5300 PSUPDRVLDRIMAGE pImageImport;
5301 int rc;
5302 SUPDRV_CHECK_SMAP_SETUP();
5303 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5304 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5305
5306 /*
5307 * Find the ldr image.
5308 */
5309 supdrvLdrLock(pDevExt);
5310 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5311
5312 pUsage = pSession->pLdrUsage;
5313 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5314 pUsage = pUsage->pNext;
5315 if (!pUsage)
5316 {
5317 supdrvLdrUnlock(pDevExt);
5318 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5319 }
5320 pImage = pUsage->pImage;
5321
5322 /*
5323 * Validate input.
5324 */
5325 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5326 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5327 {
5328 supdrvLdrUnlock(pDevExt);
5329 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5330 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5331 }
5332
5333 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5334 {
5335 unsigned uState = pImage->uState;
5336 supdrvLdrUnlock(pDevExt);
5337 if (uState != SUP_IOCTL_LDR_LOAD)
5338 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5339 pReq->u.Out.uErrorMagic = 0;
5340 return VERR_ALREADY_LOADED;
5341 }
5342
5343 /* If the loader interface is locked down, don't load new images */
5344 if (pDevExt->fLdrLockedDown)
5345 {
5346 supdrvLdrUnlock(pDevExt);
5347 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5348 }
5349
5350 /*
5351 * If the new image is a dependant of VMMR0.r0, resolve it via the
5352 * caller's usage list and make sure it's in ready state.
5353 */
5354 pImageImport = NULL;
5355 if (pReq->u.In.fFlags & SUPLDRLOAD_F_DEP_VMMR0)
5356 {
5357 PSUPDRVLDRUSAGE pUsageDependency = pSession->pLdrUsage;
5358 while (pUsageDependency && pUsageDependency->pImage->pvImage != pDevExt->pvVMMR0)
5359 pUsageDependency = pUsageDependency->pNext;
5360 if (!pUsageDependency || !pDevExt->pvVMMR0)
5361 {
5362 supdrvLdrUnlock(pDevExt);
5363 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 not loaded by session");
5364 }
5365 pImageImport = pUsageDependency->pImage;
5366 if (pImageImport->uState != SUP_IOCTL_LDR_LOAD)
5367 {
5368 supdrvLdrUnlock(pDevExt);
5369 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 is not ready (state %#x)", pImageImport->uState);
5370 }
5371 }
5372
5373 /*
5374 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5375 */
5376 pImage->cSegments = pReq->u.In.cSegments;
5377 {
5378 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5379 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
5380 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5381 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5382 else
5383 {
5384 supdrvLdrUnlock(pDevExt);
5385 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5386 }
5387 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5388 }
5389
5390 /*
5391 * Validate entrypoints.
5392 */
5393 switch (pReq->u.In.eEPType)
5394 {
5395 case SUPLDRLOADEP_NOTHING:
5396 break;
5397
5398 case SUPLDRLOADEP_VMMR0:
5399 if (pReq->u.In.EP.VMMR0.pvVMMR0 != pImage->pvImage)
5400 {
5401 supdrvLdrUnlock(pDevExt);
5402 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid pvVMMR0 pointer: %p, expected %p", pReq->u.In.EP.VMMR0.pvVMMR0, pImage->pvImage);
5403 }
5404 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5405 if (RT_FAILURE(rc))
5406 return rc;
5407 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5408 if (RT_FAILURE(rc))
5409 return rc;
5410 break;
5411
5412 case SUPLDRLOADEP_SERVICE:
5413 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5414 if (RT_FAILURE(rc))
5415 return rc;
5416 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5417 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5418 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5419 {
5420 supdrvLdrUnlock(pDevExt);
5421 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5422 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5423 pReq->u.In.EP.Service.apvReserved[2]);
5424 }
5425 break;
5426
5427 default:
5428 supdrvLdrUnlock(pDevExt);
5429 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5430 }
5431
5432 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5433 if (RT_FAILURE(rc))
5434 return rc;
5435 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5436 if (RT_FAILURE(rc))
5437 return rc;
5438 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5439
5440 /*
5441 * Allocate and copy the tables if non-native.
5442 * (No need to do try/except as this is a buffered request.)
5443 */
5444 if (!pImage->fNative)
5445 {
5446 pImage->cbStrTab = pReq->u.In.cbStrTab;
5447 if (pImage->cbStrTab)
5448 {
5449 pImage->pachStrTab = (char *)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5450 if (!pImage->pachStrTab)
5451 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5452 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5453 }
5454
5455 pImage->cSymbols = pReq->u.In.cSymbols;
5456 if (RT_SUCCESS(rc) && pImage->cSymbols)
5457 {
5458 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5459 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
5460 if (!pImage->paSymbols)
5461 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5462 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5463 }
5464 }
5465
5466 /*
5467 * Copy the bits and apply permissions / complete native loading.
5468 */
5469 if (RT_SUCCESS(rc))
5470 {
5471 pImage->uState = SUP_IOCTL_LDR_LOAD;
5472 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5473 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5474
5475 if (pImage->fNative)
5476 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5477 else
5478 {
5479#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5480 uint32_t i;
5481 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5482
5483 for (i = 0; i < pImage->cSegments; i++)
5484 {
5485 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5486 pImage->paSegments[i].fProt);
5487 if (RT_SUCCESS(rc))
5488 continue;
5489 if (rc == VERR_NOT_SUPPORTED)
5490 rc = VINF_SUCCESS;
5491 else
5492 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5493 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5494 break;
5495 }
5496#else
5497 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5498#endif
5499 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5500 }
5501 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5502 }
5503
5504 /*
5505 * Update any entry points.
5506 */
5507 if (RT_SUCCESS(rc))
5508 {
5509 switch (pReq->u.In.eEPType)
5510 {
5511 default:
5512 case SUPLDRLOADEP_NOTHING:
5513 rc = VINF_SUCCESS;
5514 break;
5515 case SUPLDRLOADEP_VMMR0:
5516 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0,
5517 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5518 break;
5519 case SUPLDRLOADEP_SERVICE:
5520 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5521 rc = VINF_SUCCESS;
5522 break;
5523 }
5524 }
5525
5526 /*
5527 * On success call the module initialization.
5528 */
5529 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5530 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5531 {
5532 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5533 pDevExt->pLdrInitImage = pImage;
5534 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5535 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5536 rc = pImage->pfnModuleInit(pImage);
5537 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5538 pDevExt->pLdrInitImage = NULL;
5539 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5540 if (RT_FAILURE(rc))
5541 {
5542 if (pDevExt->pvVMMR0 == pImage->pvImage)
5543 supdrvLdrUnsetVMMR0EPs(pDevExt);
5544 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5545 }
5546 }
5547 if (RT_SUCCESS(rc))
5548 {
5549 /* Increase the usage counter of any import image. */
5550 if (pImageImport)
5551 {
5552 pImageImport->cUsage++;
5553 pImage->pImageImport = pImageImport;
5554 }
5555
5556 /* Done! */
5557 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5558 pReq->u.Out.uErrorMagic = 0;
5559 pReq->u.Out.szError[0] = '\0';
5560 }
5561 else
5562 {
5563 /* Inform the tracing component in case ModuleInit registered TPs. */
5564 supdrvTracerModuleUnloading(pDevExt, pImage);
5565
5566 pImage->uState = SUP_IOCTL_LDR_OPEN;
5567 pImage->pfnModuleInit = NULL;
5568 pImage->pfnModuleTerm = NULL;
5569 pImage->pfnServiceReqHandler= NULL;
5570 pImage->cbStrTab = 0;
5571 RTMemFree(pImage->pachStrTab);
5572 pImage->pachStrTab = NULL;
5573 RTMemFree(pImage->paSymbols);
5574 pImage->paSymbols = NULL;
5575 pImage->cSymbols = 0;
5576 }
5577
5578 supdrvLdrUnlock(pDevExt);
5579 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5580 return rc;
5581}
5582
5583
5584/**
5585 * Frees a previously loaded (prep'ed) image.
5586 *
5587 * @returns IPRT status code.
5588 * @param pDevExt Device globals.
5589 * @param pSession Session data.
5590 * @param pReq The request.
5591 */
5592static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
5593{
5594 int rc;
5595 PSUPDRVLDRUSAGE pUsagePrev;
5596 PSUPDRVLDRUSAGE pUsage;
5597 PSUPDRVLDRIMAGE pImage;
5598 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
5599
5600 /*
5601 * Find the ldr image.
5602 */
5603 supdrvLdrLock(pDevExt);
5604 pUsagePrev = NULL;
5605 pUsage = pSession->pLdrUsage;
5606 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5607 {
5608 pUsagePrev = pUsage;
5609 pUsage = pUsage->pNext;
5610 }
5611 if (!pUsage)
5612 {
5613 supdrvLdrUnlock(pDevExt);
5614 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
5615 return VERR_INVALID_HANDLE;
5616 }
5617 if (pUsage->cRing3Usage == 0)
5618 {
5619 supdrvLdrUnlock(pDevExt);
5620 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
5621 return VERR_CALLER_NO_REFERENCE;
5622 }
5623
5624 /*
5625 * Check if we can remove anything.
5626 */
5627 rc = VINF_SUCCESS;
5628 pImage = pUsage->pImage;
5629 Log(("SUP_IOCTL_LDR_FREE: pImage=%p %s cUsage=%d r3=%d r0=%u\n",
5630 pImage, pImage->szName, pImage->cUsage, pUsage->cRing3Usage, pUsage->cRing0Usage));
5631 if (pImage->cUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
5632 {
5633 /*
5634 * Check if there are any objects with destructors in the image, if
5635 * so leave it for the session cleanup routine so we get a chance to
5636 * clean things up in the right order and not leave them all dangling.
5637 */
5638 RTSpinlockAcquire(pDevExt->Spinlock);
5639 if (pImage->cUsage <= 1)
5640 {
5641 PSUPDRVOBJ pObj;
5642 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5643 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5644 {
5645 rc = VERR_DANGLING_OBJECTS;
5646 break;
5647 }
5648 }
5649 else
5650 {
5651 PSUPDRVUSAGE pGenUsage;
5652 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5653 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5654 {
5655 rc = VERR_DANGLING_OBJECTS;
5656 break;
5657 }
5658 }
5659 RTSpinlockRelease(pDevExt->Spinlock);
5660 if (rc == VINF_SUCCESS)
5661 {
5662 /* unlink it */
5663 if (pUsagePrev)
5664 pUsagePrev->pNext = pUsage->pNext;
5665 else
5666 pSession->pLdrUsage = pUsage->pNext;
5667
5668 /* free it */
5669 pUsage->pImage = NULL;
5670 pUsage->pNext = NULL;
5671 RTMemFree(pUsage);
5672
5673 /*
5674 * Dereference the image.
5675 */
5676 if (pImage->cUsage <= 1)
5677 supdrvLdrFree(pDevExt, pImage);
5678 else
5679 pImage->cUsage--;
5680 }
5681 else
5682 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
5683 }
5684 else
5685 {
5686 /*
5687 * Dereference both image and usage.
5688 */
5689 pImage->cUsage--;
5690 pUsage->cRing3Usage--;
5691 }
5692
5693 supdrvLdrUnlock(pDevExt);
5694 return rc;
5695}
5696
5697
5698/**
5699 * Lock down the image loader interface.
5700 *
5701 * @returns IPRT status code.
5702 * @param pDevExt Device globals.
5703 */
5704static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
5705{
5706 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
5707
5708 supdrvLdrLock(pDevExt);
5709 if (!pDevExt->fLdrLockedDown)
5710 {
5711 pDevExt->fLdrLockedDown = true;
5712 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
5713 }
5714 supdrvLdrUnlock(pDevExt);
5715
5716 return VINF_SUCCESS;
5717}
5718
5719
5720/**
5721 * Queries the address of a symbol in an open image.
5722 *
5723 * @returns IPRT status code.
5724 * @param pDevExt Device globals.
5725 * @param pSession Session data.
5726 * @param pReq The request buffer.
5727 */
5728static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
5729{
5730 PSUPDRVLDRIMAGE pImage;
5731 PSUPDRVLDRUSAGE pUsage;
5732 uint32_t i;
5733 PSUPLDRSYM paSyms;
5734 const char *pchStrings;
5735 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
5736 void *pvSymbol = NULL;
5737 int rc = VERR_SYMBOL_NOT_FOUND;
5738 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
5739
5740 /*
5741 * Find the ldr image.
5742 */
5743 supdrvLdrLock(pDevExt);
5744 pUsage = pSession->pLdrUsage;
5745 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5746 pUsage = pUsage->pNext;
5747 if (!pUsage)
5748 {
5749 supdrvLdrUnlock(pDevExt);
5750 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
5751 return VERR_INVALID_HANDLE;
5752 }
5753 pImage = pUsage->pImage;
5754 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
5755 {
5756 unsigned uState = pImage->uState;
5757 supdrvLdrUnlock(pDevExt);
5758 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
5759 return VERR_ALREADY_LOADED;
5760 }
5761
5762 /*
5763 * Search the image exports / symbol strings.
5764 *
5765 * Note! The int32_t is for native loading on solaris where the data
5766 * and text segments are in very different places.
5767 */
5768 if (pImage->fNative)
5769 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pReq->u.In.szSymbol, cbSymbol - 1, &pvSymbol);
5770 else
5771 {
5772 pchStrings = pImage->pachStrTab;
5773 paSyms = pImage->paSymbols;
5774 for (i = 0; i < pImage->cSymbols; i++)
5775 {
5776 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5777 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
5778 {
5779 pvSymbol = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
5780 rc = VINF_SUCCESS;
5781 break;
5782 }
5783 }
5784 }
5785 supdrvLdrUnlock(pDevExt);
5786 pReq->u.Out.pvSymbol = pvSymbol;
5787 return rc;
5788}
5789
5790
5791/**
5792 * Gets the address of a symbol in an open image or the support driver.
5793 *
5794 * @returns VINF_SUCCESS on success.
5795 * @returns
5796 * @param pDevExt Device globals.
5797 * @param pSession Session data.
5798 * @param pReq The request buffer.
5799 */
5800static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
5801{
5802 int rc = VINF_SUCCESS;
5803 const char *pszSymbol = pReq->u.In.pszSymbol;
5804 const char *pszModule = pReq->u.In.pszModule;
5805 size_t cbSymbol;
5806 char const *pszEnd;
5807 uint32_t i;
5808
5809 /*
5810 * Input validation.
5811 */
5812 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
5813 pszEnd = RTStrEnd(pszSymbol, 512);
5814 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5815 cbSymbol = pszEnd - pszSymbol + 1;
5816
5817 if (pszModule)
5818 {
5819 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
5820 pszEnd = RTStrEnd(pszModule, 64);
5821 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5822 }
5823 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
5824
5825
5826 if ( !pszModule
5827 || !strcmp(pszModule, "SupDrv"))
5828 {
5829 /*
5830 * Search the support driver export table.
5831 */
5832 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5833 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5834 {
5835 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
5836 break;
5837 }
5838 }
5839 else
5840 {
5841 /*
5842 * Find the loader image.
5843 */
5844 PSUPDRVLDRIMAGE pImage;
5845
5846 supdrvLdrLock(pDevExt);
5847
5848 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5849 if (!strcmp(pImage->szName, pszModule))
5850 break;
5851 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
5852 {
5853 /*
5854 * Search the image exports / symbol strings.
5855 */
5856 if (pImage->fNative)
5857 {
5858 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cbSymbol - 1, (void **)&pReq->u.Out.pfnSymbol);
5859 if (RT_SUCCESS(rc))
5860 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5861 }
5862 else
5863 {
5864 const char *pchStrings = pImage->pachStrTab;
5865 PCSUPLDRSYM paSyms = pImage->paSymbols;
5866 rc = VERR_SYMBOL_NOT_FOUND;
5867 for (i = 0; i < pImage->cSymbols; i++)
5868 {
5869 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5870 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
5871 {
5872 /*
5873 * Found it! Calc the symbol address and add a reference to the module.
5874 */
5875 pReq->u.Out.pfnSymbol = (PFNRT)((uintptr_t)pImage->pvImage + (int32_t)paSyms[i].offSymbol);
5876 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5877 break;
5878 }
5879 }
5880 }
5881 }
5882 else
5883 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
5884
5885 supdrvLdrUnlock(pDevExt);
5886 }
5887 return rc;
5888}
5889
5890
5891/**
5892 * Looks up a symbol in g_aFunctions
5893 *
5894 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
5895 * @param pszSymbol The symbol to look up.
5896 * @param puValue Where to return the value.
5897 */
5898int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
5899{
5900 uint32_t i;
5901 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5902 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5903 {
5904 *puValue = (uintptr_t)g_aFunctions[i].pfn;
5905 return VINF_SUCCESS;
5906 }
5907
5908 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
5909 {
5910 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
5911 return VINF_SUCCESS;
5912 }
5913
5914 return VERR_SYMBOL_NOT_FOUND;
5915}
5916
5917
5918/**
5919 * Updates the VMMR0 entry point pointers.
5920 *
5921 * @returns IPRT status code.
5922 * @param pDevExt Device globals.
5923 * @param pvVMMR0 VMMR0 image handle.
5924 * @param pvVMMR0EntryFast VMMR0EntryFast address.
5925 * @param pvVMMR0EntryEx VMMR0EntryEx address.
5926 * @remark Caller must own the loader mutex.
5927 */
5928static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
5929{
5930 int rc = VINF_SUCCESS;
5931 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryFast=%p\n", pvVMMR0, pvVMMR0EntryFast));
5932
5933
5934 /*
5935 * Check if not yet set.
5936 */
5937 if (!pDevExt->pvVMMR0)
5938 {
5939 pDevExt->pvVMMR0 = pvVMMR0;
5940 *(void **)&pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
5941 *(void **)&pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
5942 ASMCompilerBarrier(); /* the above isn't nice, so be careful... */
5943 }
5944 else
5945 {
5946 /*
5947 * Return failure or success depending on whether the values match or not.
5948 */
5949 if ( pDevExt->pvVMMR0 != pvVMMR0
5950 || (uintptr_t)pDevExt->pfnVMMR0EntryFast != (uintptr_t)pvVMMR0EntryFast
5951 || (uintptr_t)pDevExt->pfnVMMR0EntryEx != (uintptr_t)pvVMMR0EntryEx)
5952 {
5953 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
5954 rc = VERR_INVALID_PARAMETER;
5955 }
5956 }
5957 return rc;
5958}
5959
5960
5961/**
5962 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
5963 *
5964 * @param pDevExt Device globals.
5965 */
5966static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
5967{
5968 pDevExt->pvVMMR0 = NULL;
5969 pDevExt->pfnVMMR0EntryFast = NULL;
5970 pDevExt->pfnVMMR0EntryEx = NULL;
5971}
5972
5973
5974/**
5975 * Adds a usage reference in the specified session of an image.
5976 *
5977 * Called while owning the loader semaphore.
5978 *
5979 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
5980 * @param pSession Session in question.
5981 * @param pImage Image which the session is using.
5982 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
5983 */
5984static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
5985{
5986 PSUPDRVLDRUSAGE pUsage;
5987 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
5988
5989 /*
5990 * Referenced it already?
5991 */
5992 pUsage = pSession->pLdrUsage;
5993 while (pUsage)
5994 {
5995 if (pUsage->pImage == pImage)
5996 {
5997 if (fRing3Usage)
5998 pUsage->cRing3Usage++;
5999 else
6000 pUsage->cRing0Usage++;
6001 return VINF_SUCCESS;
6002 }
6003 pUsage = pUsage->pNext;
6004 }
6005
6006 /*
6007 * Allocate new usage record.
6008 */
6009 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
6010 AssertReturn(pUsage, /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_5);
6011 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
6012 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
6013 pUsage->pImage = pImage;
6014 pUsage->pNext = pSession->pLdrUsage;
6015 pSession->pLdrUsage = pUsage;
6016 return VINF_SUCCESS;
6017}
6018
6019
6020/**
6021 * Frees a load image.
6022 *
6023 * @param pDevExt Pointer to device extension.
6024 * @param pImage Pointer to the image we're gonna free.
6025 * This image must exit!
6026 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
6027 */
6028static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
6029{
6030 unsigned cLoops;
6031 for (cLoops = 0; ; cLoops++)
6032 {
6033 PSUPDRVLDRIMAGE pImagePrev;
6034 PSUPDRVLDRIMAGE pImageImport;
6035 LogFlow(("supdrvLdrFree: pImage=%p %s [loop %u]\n", pImage, pImage->szName, cLoops));
6036 AssertBreak(cLoops < 2);
6037
6038 /*
6039 * Warn if we're releasing images while the image loader interface is
6040 * locked down -- we won't be able to reload them!
6041 */
6042 if (pDevExt->fLdrLockedDown)
6043 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6044
6045 /* find it - arg. should've used doubly linked list. */
6046 Assert(pDevExt->pLdrImages);
6047 pImagePrev = NULL;
6048 if (pDevExt->pLdrImages != pImage)
6049 {
6050 pImagePrev = pDevExt->pLdrImages;
6051 while (pImagePrev->pNext != pImage)
6052 pImagePrev = pImagePrev->pNext;
6053 Assert(pImagePrev->pNext == pImage);
6054 }
6055
6056 /* unlink */
6057 if (pImagePrev)
6058 pImagePrev->pNext = pImage->pNext;
6059 else
6060 pDevExt->pLdrImages = pImage->pNext;
6061
6062 /* check if this is VMMR0.r0 unset its entry point pointers. */
6063 if (pDevExt->pvVMMR0 == pImage->pvImage)
6064 supdrvLdrUnsetVMMR0EPs(pDevExt);
6065
6066 /* check for objects with destructors in this image. (Shouldn't happen.) */
6067 if (pDevExt->pObjs)
6068 {
6069 unsigned cObjs = 0;
6070 PSUPDRVOBJ pObj;
6071 RTSpinlockAcquire(pDevExt->Spinlock);
6072 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6073 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6074 {
6075 pObj->pfnDestructor = NULL;
6076 cObjs++;
6077 }
6078 RTSpinlockRelease(pDevExt->Spinlock);
6079 if (cObjs)
6080 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6081 }
6082
6083 /* call termination function if fully loaded. */
6084 if ( pImage->pfnModuleTerm
6085 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6086 {
6087 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6088 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6089 pImage->pfnModuleTerm(pImage);
6090 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6091 }
6092
6093 /* Inform the tracing component. */
6094 supdrvTracerModuleUnloading(pDevExt, pImage);
6095
6096 /* Do native unload if appropriate, then inform the native code about the
6097 unloading (mainly for non-native loading case). */
6098 if (pImage->fNative)
6099 supdrvOSLdrUnload(pDevExt, pImage);
6100 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6101
6102 /* free the image */
6103 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6104 pImage->cUsage = 0;
6105 pImage->pDevExt = NULL;
6106 pImage->pNext = NULL;
6107 pImage->uState = SUP_IOCTL_LDR_FREE;
6108#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
6109 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6110 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6111#else
6112 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
6113 pImage->pvImageAlloc = NULL;
6114#endif
6115 pImage->pvImage = NULL;
6116 RTMemFree(pImage->pachStrTab);
6117 pImage->pachStrTab = NULL;
6118 RTMemFree(pImage->paSymbols);
6119 pImage->paSymbols = NULL;
6120 RTMemFree(pImage->paSegments);
6121 pImage->paSegments = NULL;
6122
6123 pImageImport = pImage->pImageImport;
6124 pImage->pImageImport = NULL;
6125
6126 RTMemFree(pImage);
6127
6128 /*
6129 * Deal with any import image.
6130 */
6131 if (!pImageImport)
6132 break;
6133 if (pImageImport->cUsage > 1)
6134 {
6135 pImageImport->cUsage--;
6136 break;
6137 }
6138 pImage = pImageImport;
6139 }
6140}
6141
6142
6143/**
6144 * Acquires the loader lock.
6145 *
6146 * @returns IPRT status code.
6147 * @param pDevExt The device extension.
6148 * @note Not recursive on all platforms yet.
6149 */
6150DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6151{
6152#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6153 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6154#else
6155 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6156#endif
6157 AssertRC(rc);
6158 return rc;
6159}
6160
6161
6162/**
6163 * Releases the loader lock.
6164 *
6165 * @returns IPRT status code.
6166 * @param pDevExt The device extension.
6167 */
6168DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6169{
6170#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6171 return RTSemMutexRelease(pDevExt->mtxLdr);
6172#else
6173 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6174#endif
6175}
6176
6177
6178/**
6179 * Acquires the global loader lock.
6180 *
6181 * This can be useful when accessing structures being modified by the ModuleInit
6182 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6183 *
6184 * @returns VBox status code.
6185 * @param pSession The session doing the locking.
6186 *
6187 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6188 */
6189SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6190{
6191 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6192 return supdrvLdrLock(pSession->pDevExt);
6193}
6194
6195
6196/**
6197 * Releases the global loader lock.
6198 *
6199 * Must correspond to a SUPR0LdrLock call!
6200 *
6201 * @returns VBox status code.
6202 * @param pSession The session doing the locking.
6203 *
6204 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6205 */
6206SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6207{
6208 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6209 return supdrvLdrUnlock(pSession->pDevExt);
6210}
6211
6212
6213/**
6214 * For checking lock ownership in Assert() statements during ModuleInit and
6215 * ModuleTerm.
6216 *
6217 * @returns Whether we own the loader lock or not.
6218 * @param hMod The module in question.
6219 * @param fWantToHear For hosts where it is difficult to know who owns the
6220 * lock, this will be returned instead.
6221 */
6222SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6223{
6224 PSUPDRVDEVEXT pDevExt;
6225 RTNATIVETHREAD hOwner;
6226
6227 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6228 AssertPtrReturn(pImage, fWantToHear);
6229 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6230
6231 pDevExt = pImage->pDevExt;
6232 AssertPtrReturn(pDevExt, fWantToHear);
6233
6234 /*
6235 * Expecting this to be called at init/term time only, so this will be sufficient.
6236 */
6237 hOwner = pDevExt->hLdrInitThread;
6238 if (hOwner == NIL_RTNATIVETHREAD)
6239 hOwner = pDevExt->hLdrTermThread;
6240 if (hOwner != NIL_RTNATIVETHREAD)
6241 return hOwner == RTThreadNativeSelf();
6242
6243 /*
6244 * Neither of the two semaphore variants currently offers very good
6245 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6246 */
6247#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6248 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6249#else
6250 return fWantToHear;
6251#endif
6252}
6253
6254
6255/**
6256 * Locates and retains the given module for ring-0 usage.
6257 *
6258 * @returns VBox status code.
6259 * @param pSession The session to associate the module reference with.
6260 * @param pszName The module name (no path).
6261 * @param phMod Where to return the module handle. The module is
6262 * referenced and a call to SUPR0LdrModRelease() is
6263 * necessary when done with it.
6264 */
6265SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6266{
6267 int rc;
6268 size_t cchName;
6269 PSUPDRVDEVEXT pDevExt;
6270
6271 /*
6272 * Validate input.
6273 */
6274 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6275 *phMod = NULL;
6276 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6277 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6278 cchName = strlen(pszName);
6279 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6280 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6281
6282 /*
6283 * Do the lookup.
6284 */
6285 pDevExt = pSession->pDevExt;
6286 rc = supdrvLdrLock(pDevExt);
6287 if (RT_SUCCESS(rc))
6288 {
6289 PSUPDRVLDRIMAGE pImage;
6290 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6291 {
6292 if ( pImage->szName[cchName] == '\0'
6293 && !memcmp(pImage->szName, pszName, cchName))
6294 {
6295 /*
6296 * Check the state and make sure we don't overflow the reference counter before return it.
6297 */
6298 uint32_t uState = pImage->uState;
6299 if (uState == SUP_IOCTL_LDR_LOAD)
6300 {
6301 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6302 {
6303 pImage->cUsage++;
6304 supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6305 *phMod = pImage;
6306 supdrvLdrUnlock(pDevExt);
6307 return VINF_SUCCESS;
6308 }
6309 supdrvLdrUnlock(pDevExt);
6310 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6311 return VERR_TOO_MANY_REFERENCES;
6312 }
6313 supdrvLdrUnlock(pDevExt);
6314 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6315 return VERR_INVALID_STATE;
6316 }
6317 }
6318 supdrvLdrUnlock(pDevExt);
6319 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6320 rc = VERR_MODULE_NOT_FOUND;
6321 }
6322 return rc;
6323}
6324
6325
6326/**
6327 * Retains a ring-0 module reference.
6328 *
6329 * Release reference when done by calling SUPR0LdrModRelease().
6330 *
6331 * @returns VBox status code.
6332 * @param pSession The session to reference the module in. A usage
6333 * record is added if needed.
6334 * @param hMod The handle to the module to retain.
6335 */
6336SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6337{
6338 PSUPDRVDEVEXT pDevExt;
6339 PSUPDRVLDRIMAGE pImage;
6340 int rc;
6341
6342 /* Validate input a little. */
6343 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6344 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6345 pImage = (PSUPDRVLDRIMAGE)hMod;
6346 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6347
6348 /* Reference the module: */
6349 pDevExt = pSession->pDevExt;
6350 rc = supdrvLdrLock(pDevExt);
6351 if (RT_SUCCESS(rc))
6352 {
6353 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6354 {
6355 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6356 {
6357 rc = supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6358 if (RT_SUCCESS(rc))
6359 {
6360 pImage->cUsage++;
6361 rc = VINF_SUCCESS;
6362 }
6363 }
6364 else
6365 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6366 }
6367 else
6368 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6369 supdrvLdrUnlock(pDevExt);
6370 }
6371 return rc;
6372}
6373
6374
6375/**
6376 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6377 * SUPR0LdrModRetain().
6378 *
6379 * @returns VBox status code.
6380 * @param pSession The session that the module was retained in.
6381 * @param hMod The module handle. NULL is silently ignored.
6382 */
6383SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6384{
6385 PSUPDRVDEVEXT pDevExt;
6386 PSUPDRVLDRIMAGE pImage;
6387 int rc;
6388
6389 /*
6390 * Validate input.
6391 */
6392 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6393 if (!hMod)
6394 return VINF_SUCCESS;
6395 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6396 pImage = (PSUPDRVLDRIMAGE)hMod;
6397 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6398
6399 /*
6400 * Take the loader lock and revalidate the module:
6401 */
6402 pDevExt = pSession->pDevExt;
6403 rc = supdrvLdrLock(pDevExt);
6404 if (RT_SUCCESS(rc))
6405 {
6406 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6407 {
6408 /*
6409 * Find the usage record for the module:
6410 */
6411 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6412 PSUPDRVLDRUSAGE pUsage;
6413
6414 rc = VERR_MODULE_NOT_FOUND;
6415 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6416 {
6417 if (pUsage->pImage == pImage)
6418 {
6419 /*
6420 * Drop a ring-0 reference:
6421 */
6422 Assert(pImage->cUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6423 if (pUsage->cRing0Usage > 0)
6424 {
6425 if (pImage->cUsage > 1)
6426 {
6427 pImage->cUsage -= 1;
6428 pUsage->cRing0Usage -= 1;
6429 rc = VINF_SUCCESS;
6430 }
6431 else
6432 {
6433 supdrvLdrFree(pDevExt, pImage);
6434
6435 if (pPrevUsage)
6436 pPrevUsage->pNext = pUsage->pNext;
6437 else
6438 pSession->pLdrUsage = pUsage->pNext;
6439 pUsage->pNext = NULL;
6440 pUsage->pImage = NULL;
6441 pUsage->cRing0Usage = 0;
6442 pUsage->cRing3Usage = 0;
6443 RTMemFree(pUsage);
6444
6445 rc = VINF_OBJECT_DESTROYED;
6446 }
6447 }
6448 else
6449 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6450 break;
6451 }
6452 pPrevUsage = pUsage;
6453 }
6454 }
6455 else
6456 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6457 supdrvLdrUnlock(pDevExt);
6458 }
6459 return rc;
6460
6461}
6462
6463
6464/**
6465 * Implements the service call request.
6466 *
6467 * @returns VBox status code.
6468 * @param pDevExt The device extension.
6469 * @param pSession The calling session.
6470 * @param pReq The request packet, valid.
6471 */
6472static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6473{
6474#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
6475 int rc;
6476
6477 /*
6478 * Find the module first in the module referenced by the calling session.
6479 */
6480 rc = supdrvLdrLock(pDevExt);
6481 if (RT_SUCCESS(rc))
6482 {
6483 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
6484 PSUPDRVLDRUSAGE pUsage;
6485
6486 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6487 if ( pUsage->pImage->pfnServiceReqHandler
6488 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
6489 {
6490 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
6491 break;
6492 }
6493 supdrvLdrUnlock(pDevExt);
6494
6495 if (pfnServiceReqHandler)
6496 {
6497 /*
6498 * Call it.
6499 */
6500 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
6501 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
6502 else
6503 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
6504 }
6505 else
6506 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
6507 }
6508
6509 /* log it */
6510 if ( RT_FAILURE(rc)
6511 && rc != VERR_INTERRUPTED
6512 && rc != VERR_TIMEOUT)
6513 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6514 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6515 else
6516 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6517 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6518 return rc;
6519#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6520 RT_NOREF3(pDevExt, pSession, pReq);
6521 return VERR_NOT_IMPLEMENTED;
6522#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6523}
6524
6525
6526/**
6527 * Implements the logger settings request.
6528 *
6529 * @returns VBox status code.
6530 * @param pReq The request.
6531 */
6532static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
6533{
6534 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
6535 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
6536 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
6537 PRTLOGGER pLogger = NULL;
6538 int rc;
6539
6540 /*
6541 * Some further validation.
6542 */
6543 switch (pReq->u.In.fWhat)
6544 {
6545 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6546 case SUPLOGGERSETTINGS_WHAT_CREATE:
6547 break;
6548
6549 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6550 if (*pszGroup || *pszFlags || *pszDest)
6551 return VERR_INVALID_PARAMETER;
6552 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
6553 return VERR_ACCESS_DENIED;
6554 break;
6555
6556 default:
6557 return VERR_INTERNAL_ERROR;
6558 }
6559
6560 /*
6561 * Get the logger.
6562 */
6563 switch (pReq->u.In.fWhich)
6564 {
6565 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6566 pLogger = RTLogGetDefaultInstance();
6567 break;
6568
6569 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6570 pLogger = RTLogRelGetDefaultInstance();
6571 break;
6572
6573 default:
6574 return VERR_INTERNAL_ERROR;
6575 }
6576
6577 /*
6578 * Do the job.
6579 */
6580 switch (pReq->u.In.fWhat)
6581 {
6582 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6583 if (pLogger)
6584 {
6585 rc = RTLogFlags(pLogger, pszFlags);
6586 if (RT_SUCCESS(rc))
6587 rc = RTLogGroupSettings(pLogger, pszGroup);
6588 NOREF(pszDest);
6589 }
6590 else
6591 rc = VERR_NOT_FOUND;
6592 break;
6593
6594 case SUPLOGGERSETTINGS_WHAT_CREATE:
6595 {
6596 if (pLogger)
6597 rc = VERR_ALREADY_EXISTS;
6598 else
6599 {
6600 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
6601
6602 rc = RTLogCreate(&pLogger,
6603 0 /* fFlags */,
6604 pszGroup,
6605 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
6606 ? "VBOX_LOG"
6607 : "VBOX_RELEASE_LOG",
6608 RT_ELEMENTS(s_apszGroups),
6609 s_apszGroups,
6610 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
6611 NULL);
6612 if (RT_SUCCESS(rc))
6613 {
6614 rc = RTLogFlags(pLogger, pszFlags);
6615 NOREF(pszDest);
6616 if (RT_SUCCESS(rc))
6617 {
6618 switch (pReq->u.In.fWhich)
6619 {
6620 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6621 pLogger = RTLogSetDefaultInstance(pLogger);
6622 break;
6623 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6624 pLogger = RTLogRelSetDefaultInstance(pLogger);
6625 break;
6626 }
6627 }
6628 RTLogDestroy(pLogger);
6629 }
6630 }
6631 break;
6632 }
6633
6634 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6635 switch (pReq->u.In.fWhich)
6636 {
6637 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6638 pLogger = RTLogSetDefaultInstance(NULL);
6639 break;
6640 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6641 pLogger = RTLogRelSetDefaultInstance(NULL);
6642 break;
6643 }
6644 rc = RTLogDestroy(pLogger);
6645 break;
6646
6647 default:
6648 {
6649 rc = VERR_INTERNAL_ERROR;
6650 break;
6651 }
6652 }
6653
6654 return rc;
6655}
6656
6657
6658/**
6659 * Implements the MSR prober operations.
6660 *
6661 * @returns VBox status code.
6662 * @param pDevExt The device extension.
6663 * @param pReq The request.
6664 */
6665static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
6666{
6667#ifdef SUPDRV_WITH_MSR_PROBER
6668 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
6669 int rc;
6670
6671 switch (pReq->u.In.enmOp)
6672 {
6673 case SUPMSRPROBEROP_READ:
6674 {
6675 uint64_t uValue;
6676 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
6677 if (RT_SUCCESS(rc))
6678 {
6679 pReq->u.Out.uResults.Read.uValue = uValue;
6680 pReq->u.Out.uResults.Read.fGp = false;
6681 }
6682 else if (rc == VERR_ACCESS_DENIED)
6683 {
6684 pReq->u.Out.uResults.Read.uValue = 0;
6685 pReq->u.Out.uResults.Read.fGp = true;
6686 rc = VINF_SUCCESS;
6687 }
6688 break;
6689 }
6690
6691 case SUPMSRPROBEROP_WRITE:
6692 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
6693 if (RT_SUCCESS(rc))
6694 pReq->u.Out.uResults.Write.fGp = false;
6695 else if (rc == VERR_ACCESS_DENIED)
6696 {
6697 pReq->u.Out.uResults.Write.fGp = true;
6698 rc = VINF_SUCCESS;
6699 }
6700 break;
6701
6702 case SUPMSRPROBEROP_MODIFY:
6703 case SUPMSRPROBEROP_MODIFY_FASTER:
6704 rc = supdrvOSMsrProberModify(idCpu, pReq);
6705 break;
6706
6707 default:
6708 return VERR_INVALID_FUNCTION;
6709 }
6710 RT_NOREF1(pDevExt);
6711 return rc;
6712#else
6713 RT_NOREF2(pDevExt, pReq);
6714 return VERR_NOT_IMPLEMENTED;
6715#endif
6716}
6717
6718
6719/**
6720 * Resume built-in keyboard on MacBook Air and Pro hosts.
6721 * If there is no built-in keyboard device, return success anyway.
6722 *
6723 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
6724 */
6725static int supdrvIOCtl_ResumeSuspendedKbds(void)
6726{
6727#if defined(RT_OS_DARWIN)
6728 return supdrvDarwinResumeSuspendedKbds();
6729#else
6730 return VERR_NOT_IMPLEMENTED;
6731#endif
6732}
6733
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette