VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 87593

Last change on this file since 87593 was 87542, checked in by vboxsync, 4 years ago

SUPDrv,VMM/HM: Modified the representation of the 9 VT-x controls MSRs in SUPHWVIRTMSRS. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 257.6 KB
Line 
1/* $Id: SUPDrv.cpp 87542 2021-02-02 16:51:25Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_vmx.h>
68
69#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
70# include "dtrace/SUPDrv.h"
71#else
72# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
73# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
74# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
75# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
76#endif
77
78/*
79 * Logging assignments:
80 * Log - useful stuff, like failures.
81 * LogFlow - program flow, except the really noisy bits.
82 * Log2 - Cleanup.
83 * Log3 - Loader flow noise.
84 * Log4 - Call VMMR0 flow noise.
85 * Log5 - Native yet-to-be-defined noise.
86 * Log6 - Native ioctl flow noise.
87 *
88 * Logging requires KBUILD_TYPE=debug and possibly changes to the logger
89 * instantiation in log-vbox.c(pp).
90 */
91
92
93/*********************************************************************************************************************************
94* Defined Constants And Macros *
95*********************************************************************************************************************************/
96/** @def VBOX_SVN_REV
97 * The makefile should define this if it can. */
98#ifndef VBOX_SVN_REV
99# define VBOX_SVN_REV 0
100#endif
101
102/** @ SUPDRV_CHECK_SMAP_SETUP
103 * SMAP check setup. */
104/** @def SUPDRV_CHECK_SMAP_CHECK
105 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
106 * will be logged and @a a_BadExpr is executed. */
107#if (defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)) && !defined(VBOX_WITHOUT_EFLAGS_AC_SET_IN_VBOXDRV)
108# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
109# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
110 do { \
111 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
112 { \
113 RTCCUINTREG fEfl = ASMGetFlags(); \
114 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
115 { /* likely */ } \
116 else \
117 { \
118 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
119 a_BadExpr; \
120 } \
121 } \
122 } while (0)
123#else
124# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
125# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
126#endif
127
128
129/*********************************************************************************************************************************
130* Internal Functions *
131*********************************************************************************************************************************/
132static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
133static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
134static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
135static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
136static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
137static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
138static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
139static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
140static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
141static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
142static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
143static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
144static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage);
145static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
146DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
147DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
148static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
149static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq);
150static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
151static int supdrvIOCtl_ResumeSuspendedKbds(void);
152
153
154/*********************************************************************************************************************************
155* Global Variables *
156*********************************************************************************************************************************/
157/**
158 * Array of the R0 SUP API.
159 *
160 * While making changes to these exports, make sure to update the IOC
161 * minor version (SUPDRV_IOC_VERSION).
162 *
163 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
164 * produce definition files from which import libraries are generated.
165 * Take care when commenting things and especially with \#ifdef'ing.
166 */
167static SUPFUNC g_aFunctions[] =
168{
169/* SED: START */
170 /* name function */
171 /* Entries with absolute addresses determined at runtime, fixup
172 code makes ugly ASSUMPTIONS about the order here: */
173 { "SUPR0AbsIs64bit", (void *)0 },
174 { "SUPR0Abs64bitKernelCS", (void *)0 },
175 { "SUPR0Abs64bitKernelSS", (void *)0 },
176 { "SUPR0Abs64bitKernelDS", (void *)0 },
177 { "SUPR0AbsKernelCS", (void *)0 },
178 { "SUPR0AbsKernelSS", (void *)0 },
179 { "SUPR0AbsKernelDS", (void *)0 },
180 { "SUPR0AbsKernelES", (void *)0 },
181 { "SUPR0AbsKernelFS", (void *)0 },
182 { "SUPR0AbsKernelGS", (void *)0 },
183 /* Normal function pointers: */
184 { "g_pSUPGlobalInfoPage", (void *)&g_pSUPGlobalInfoPage }, /* SED: DATA */
185 { "SUPGetGIP", (void *)(uintptr_t)SUPGetGIP },
186 { "SUPReadTscWithDelta", (void *)(uintptr_t)SUPReadTscWithDelta },
187 { "SUPGetTscDeltaSlow", (void *)(uintptr_t)SUPGetTscDeltaSlow },
188 { "SUPGetCpuHzFromGipForAsyncMode", (void *)(uintptr_t)SUPGetCpuHzFromGipForAsyncMode },
189 { "SUPIsTscFreqCompatible", (void *)(uintptr_t)SUPIsTscFreqCompatible },
190 { "SUPIsTscFreqCompatibleEx", (void *)(uintptr_t)SUPIsTscFreqCompatibleEx },
191 { "SUPR0BadContext", (void *)(uintptr_t)SUPR0BadContext },
192 { "SUPR0ComponentDeregisterFactory", (void *)(uintptr_t)SUPR0ComponentDeregisterFactory },
193 { "SUPR0ComponentQueryFactory", (void *)(uintptr_t)SUPR0ComponentQueryFactory },
194 { "SUPR0ComponentRegisterFactory", (void *)(uintptr_t)SUPR0ComponentRegisterFactory },
195 { "SUPR0ContAlloc", (void *)(uintptr_t)SUPR0ContAlloc },
196 { "SUPR0ContFree", (void *)(uintptr_t)SUPR0ContFree },
197 { "SUPR0ChangeCR4", (void *)(uintptr_t)SUPR0ChangeCR4 },
198 { "SUPR0EnableVTx", (void *)(uintptr_t)SUPR0EnableVTx },
199 { "SUPR0SuspendVTxOnCpu", (void *)(uintptr_t)SUPR0SuspendVTxOnCpu },
200 { "SUPR0ResumeVTxOnCpu", (void *)(uintptr_t)SUPR0ResumeVTxOnCpu },
201 { "SUPR0GetCurrentGdtRw", (void *)(uintptr_t)SUPR0GetCurrentGdtRw },
202 { "SUPR0GetKernelFeatures", (void *)(uintptr_t)SUPR0GetKernelFeatures },
203 { "SUPR0GetHwvirtMsrs", (void *)(uintptr_t)SUPR0GetHwvirtMsrs },
204 { "SUPR0GetPagingMode", (void *)(uintptr_t)SUPR0GetPagingMode },
205 { "SUPR0GetSvmUsability", (void *)(uintptr_t)SUPR0GetSvmUsability },
206 { "SUPR0GetVTSupport", (void *)(uintptr_t)SUPR0GetVTSupport },
207 { "SUPR0GetVmxUsability", (void *)(uintptr_t)SUPR0GetVmxUsability },
208 { "SUPR0LdrIsLockOwnerByMod", (void *)(uintptr_t)SUPR0LdrIsLockOwnerByMod },
209 { "SUPR0LdrLock", (void *)(uintptr_t)SUPR0LdrLock },
210 { "SUPR0LdrUnlock", (void *)(uintptr_t)SUPR0LdrUnlock },
211 { "SUPR0LdrModByName", (void *)(uintptr_t)SUPR0LdrModByName },
212 { "SUPR0LdrModRelease", (void *)(uintptr_t)SUPR0LdrModRelease },
213 { "SUPR0LdrModRetain", (void *)(uintptr_t)SUPR0LdrModRetain },
214 { "SUPR0LockMem", (void *)(uintptr_t)SUPR0LockMem },
215 { "SUPR0LowAlloc", (void *)(uintptr_t)SUPR0LowAlloc },
216 { "SUPR0LowFree", (void *)(uintptr_t)SUPR0LowFree },
217 { "SUPR0MemAlloc", (void *)(uintptr_t)SUPR0MemAlloc },
218 { "SUPR0MemFree", (void *)(uintptr_t)SUPR0MemFree },
219 { "SUPR0MemGetPhys", (void *)(uintptr_t)SUPR0MemGetPhys },
220 { "SUPR0ObjAddRef", (void *)(uintptr_t)SUPR0ObjAddRef },
221 { "SUPR0ObjAddRefEx", (void *)(uintptr_t)SUPR0ObjAddRefEx },
222 { "SUPR0ObjRegister", (void *)(uintptr_t)SUPR0ObjRegister },
223 { "SUPR0ObjRelease", (void *)(uintptr_t)SUPR0ObjRelease },
224 { "SUPR0ObjVerifyAccess", (void *)(uintptr_t)SUPR0ObjVerifyAccess },
225 { "SUPR0PageAllocEx", (void *)(uintptr_t)SUPR0PageAllocEx },
226 { "SUPR0PageFree", (void *)(uintptr_t)SUPR0PageFree },
227 { "SUPR0PageMapKernel", (void *)(uintptr_t)SUPR0PageMapKernel },
228 { "SUPR0PageProtect", (void *)(uintptr_t)SUPR0PageProtect },
229#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
230 { "SUPR0HCPhysToVirt", (void *)(uintptr_t)SUPR0HCPhysToVirt }, /* only-linux, only solaris */
231#endif
232 { "SUPR0Printf", (void *)(uintptr_t)SUPR0Printf },
233 { "SUPR0GetSessionGVM", (void *)(uintptr_t)SUPR0GetSessionGVM },
234 { "SUPR0GetSessionVM", (void *)(uintptr_t)SUPR0GetSessionVM },
235 { "SUPR0SetSessionVM", (void *)(uintptr_t)SUPR0SetSessionVM },
236 { "SUPR0TscDeltaMeasureBySetIndex", (void *)(uintptr_t)SUPR0TscDeltaMeasureBySetIndex },
237 { "SUPR0TracerDeregisterDrv", (void *)(uintptr_t)SUPR0TracerDeregisterDrv },
238 { "SUPR0TracerDeregisterImpl", (void *)(uintptr_t)SUPR0TracerDeregisterImpl },
239 { "SUPR0TracerFireProbe", (void *)(uintptr_t)SUPR0TracerFireProbe },
240 { "SUPR0TracerRegisterDrv", (void *)(uintptr_t)SUPR0TracerRegisterDrv },
241 { "SUPR0TracerRegisterImpl", (void *)(uintptr_t)SUPR0TracerRegisterImpl },
242 { "SUPR0TracerRegisterModule", (void *)(uintptr_t)SUPR0TracerRegisterModule },
243 { "SUPR0TracerUmodProbeFire", (void *)(uintptr_t)SUPR0TracerUmodProbeFire },
244 { "SUPR0UnlockMem", (void *)(uintptr_t)SUPR0UnlockMem },
245#ifdef RT_OS_WINDOWS
246 { "SUPR0IoCtlSetupForHandle", (void *)(uintptr_t)SUPR0IoCtlSetupForHandle }, /* only-windows */
247 { "SUPR0IoCtlPerform", (void *)(uintptr_t)SUPR0IoCtlPerform }, /* only-windows */
248 { "SUPR0IoCtlCleanup", (void *)(uintptr_t)SUPR0IoCtlCleanup }, /* only-windows */
249#endif
250 { "SUPSemEventClose", (void *)(uintptr_t)SUPSemEventClose },
251 { "SUPSemEventCreate", (void *)(uintptr_t)SUPSemEventCreate },
252 { "SUPSemEventGetResolution", (void *)(uintptr_t)SUPSemEventGetResolution },
253 { "SUPSemEventMultiClose", (void *)(uintptr_t)SUPSemEventMultiClose },
254 { "SUPSemEventMultiCreate", (void *)(uintptr_t)SUPSemEventMultiCreate },
255 { "SUPSemEventMultiGetResolution", (void *)(uintptr_t)SUPSemEventMultiGetResolution },
256 { "SUPSemEventMultiReset", (void *)(uintptr_t)SUPSemEventMultiReset },
257 { "SUPSemEventMultiSignal", (void *)(uintptr_t)SUPSemEventMultiSignal },
258 { "SUPSemEventMultiWait", (void *)(uintptr_t)SUPSemEventMultiWait },
259 { "SUPSemEventMultiWaitNoResume", (void *)(uintptr_t)SUPSemEventMultiWaitNoResume },
260 { "SUPSemEventMultiWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsAbsIntr },
261 { "SUPSemEventMultiWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventMultiWaitNsRelIntr },
262 { "SUPSemEventSignal", (void *)(uintptr_t)SUPSemEventSignal },
263 { "SUPSemEventWait", (void *)(uintptr_t)SUPSemEventWait },
264 { "SUPSemEventWaitNoResume", (void *)(uintptr_t)SUPSemEventWaitNoResume },
265 { "SUPSemEventWaitNsAbsIntr", (void *)(uintptr_t)SUPSemEventWaitNsAbsIntr },
266 { "SUPSemEventWaitNsRelIntr", (void *)(uintptr_t)SUPSemEventWaitNsRelIntr },
267
268 { "RTAssertAreQuiet", (void *)(uintptr_t)RTAssertAreQuiet },
269 { "RTAssertMayPanic", (void *)(uintptr_t)RTAssertMayPanic },
270 { "RTAssertMsg1", (void *)(uintptr_t)RTAssertMsg1 },
271 { "RTAssertMsg2AddV", (void *)(uintptr_t)RTAssertMsg2AddV },
272 { "RTAssertMsg2V", (void *)(uintptr_t)RTAssertMsg2V },
273 { "RTAssertSetMayPanic", (void *)(uintptr_t)RTAssertSetMayPanic },
274 { "RTAssertSetQuiet", (void *)(uintptr_t)RTAssertSetQuiet },
275 { "RTCrc32", (void *)(uintptr_t)RTCrc32 },
276 { "RTCrc32Finish", (void *)(uintptr_t)RTCrc32Finish },
277 { "RTCrc32Process", (void *)(uintptr_t)RTCrc32Process },
278 { "RTCrc32Start", (void *)(uintptr_t)RTCrc32Start },
279 { "RTErrConvertFromErrno", (void *)(uintptr_t)RTErrConvertFromErrno },
280 { "RTErrConvertToErrno", (void *)(uintptr_t)RTErrConvertToErrno },
281 { "RTHandleTableAllocWithCtx", (void *)(uintptr_t)RTHandleTableAllocWithCtx },
282 { "RTHandleTableCreate", (void *)(uintptr_t)RTHandleTableCreate },
283 { "RTHandleTableCreateEx", (void *)(uintptr_t)RTHandleTableCreateEx },
284 { "RTHandleTableDestroy", (void *)(uintptr_t)RTHandleTableDestroy },
285 { "RTHandleTableFreeWithCtx", (void *)(uintptr_t)RTHandleTableFreeWithCtx },
286 { "RTHandleTableLookupWithCtx", (void *)(uintptr_t)RTHandleTableLookupWithCtx },
287 { "RTLogDefaultInstance", (void *)(uintptr_t)RTLogDefaultInstance },
288 { "RTLogDefaultInstanceEx", (void *)(uintptr_t)RTLogDefaultInstanceEx },
289 { "RTLogGetDefaultInstance", (void *)(uintptr_t)RTLogGetDefaultInstance },
290 { "RTLogGetDefaultInstanceEx", (void *)(uintptr_t)RTLogGetDefaultInstanceEx },
291 { "SUPR0GetDefaultLogInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogInstanceEx },
292 { "RTLogLoggerExV", (void *)(uintptr_t)RTLogLoggerExV },
293 { "RTLogPrintfV", (void *)(uintptr_t)RTLogPrintfV },
294 { "RTLogRelGetDefaultInstance", (void *)(uintptr_t)RTLogRelGetDefaultInstance },
295 { "RTLogRelGetDefaultInstanceEx", (void *)(uintptr_t)RTLogRelGetDefaultInstanceEx },
296 { "SUPR0GetDefaultLogRelInstanceEx", (void *)(uintptr_t)SUPR0GetDefaultLogRelInstanceEx },
297 { "RTLogSetDefaultInstanceThread", (void *)(uintptr_t)RTLogSetDefaultInstanceThread },
298 { "RTMemAllocExTag", (void *)(uintptr_t)RTMemAllocExTag },
299 { "RTMemAllocTag", (void *)(uintptr_t)RTMemAllocTag },
300 { "RTMemAllocVarTag", (void *)(uintptr_t)RTMemAllocVarTag },
301 { "RTMemAllocZTag", (void *)(uintptr_t)RTMemAllocZTag },
302 { "RTMemAllocZVarTag", (void *)(uintptr_t)RTMemAllocZVarTag },
303 { "RTMemDupExTag", (void *)(uintptr_t)RTMemDupExTag },
304 { "RTMemDupTag", (void *)(uintptr_t)RTMemDupTag },
305 { "RTMemFree", (void *)(uintptr_t)RTMemFree },
306 { "RTMemFreeEx", (void *)(uintptr_t)RTMemFreeEx },
307 { "RTMemReallocTag", (void *)(uintptr_t)RTMemReallocTag },
308 { "RTMpCpuId", (void *)(uintptr_t)RTMpCpuId },
309 { "RTMpCpuIdFromSetIndex", (void *)(uintptr_t)RTMpCpuIdFromSetIndex },
310 { "RTMpCpuIdToSetIndex", (void *)(uintptr_t)RTMpCpuIdToSetIndex },
311 { "RTMpCurSetIndex", (void *)(uintptr_t)RTMpCurSetIndex },
312 { "RTMpCurSetIndexAndId", (void *)(uintptr_t)RTMpCurSetIndexAndId },
313 { "RTMpGetArraySize", (void *)(uintptr_t)RTMpGetArraySize },
314 { "RTMpGetCount", (void *)(uintptr_t)RTMpGetCount },
315 { "RTMpGetMaxCpuId", (void *)(uintptr_t)RTMpGetMaxCpuId },
316 { "RTMpGetOnlineCount", (void *)(uintptr_t)RTMpGetOnlineCount },
317 { "RTMpGetOnlineSet", (void *)(uintptr_t)RTMpGetOnlineSet },
318 { "RTMpGetSet", (void *)(uintptr_t)RTMpGetSet },
319 { "RTMpIsCpuOnline", (void *)(uintptr_t)RTMpIsCpuOnline },
320 { "RTMpIsCpuPossible", (void *)(uintptr_t)RTMpIsCpuPossible },
321 { "RTMpIsCpuWorkPending", (void *)(uintptr_t)RTMpIsCpuWorkPending },
322 { "RTMpNotificationDeregister", (void *)(uintptr_t)RTMpNotificationDeregister },
323 { "RTMpNotificationRegister", (void *)(uintptr_t)RTMpNotificationRegister },
324 { "RTMpOnAll", (void *)(uintptr_t)RTMpOnAll },
325 { "RTMpOnOthers", (void *)(uintptr_t)RTMpOnOthers },
326 { "RTMpOnSpecific", (void *)(uintptr_t)RTMpOnSpecific },
327 { "RTMpPokeCpu", (void *)(uintptr_t)RTMpPokeCpu },
328 { "RTNetIPv4AddDataChecksum", (void *)(uintptr_t)RTNetIPv4AddDataChecksum },
329 { "RTNetIPv4AddTCPChecksum", (void *)(uintptr_t)RTNetIPv4AddTCPChecksum },
330 { "RTNetIPv4AddUDPChecksum", (void *)(uintptr_t)RTNetIPv4AddUDPChecksum },
331 { "RTNetIPv4FinalizeChecksum", (void *)(uintptr_t)RTNetIPv4FinalizeChecksum },
332 { "RTNetIPv4HdrChecksum", (void *)(uintptr_t)RTNetIPv4HdrChecksum },
333 { "RTNetIPv4IsDHCPValid", (void *)(uintptr_t)RTNetIPv4IsDHCPValid },
334 { "RTNetIPv4IsHdrValid", (void *)(uintptr_t)RTNetIPv4IsHdrValid },
335 { "RTNetIPv4IsTCPSizeValid", (void *)(uintptr_t)RTNetIPv4IsTCPSizeValid },
336 { "RTNetIPv4IsTCPValid", (void *)(uintptr_t)RTNetIPv4IsTCPValid },
337 { "RTNetIPv4IsUDPSizeValid", (void *)(uintptr_t)RTNetIPv4IsUDPSizeValid },
338 { "RTNetIPv4IsUDPValid", (void *)(uintptr_t)RTNetIPv4IsUDPValid },
339 { "RTNetIPv4PseudoChecksum", (void *)(uintptr_t)RTNetIPv4PseudoChecksum },
340 { "RTNetIPv4PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv4PseudoChecksumBits },
341 { "RTNetIPv4TCPChecksum", (void *)(uintptr_t)RTNetIPv4TCPChecksum },
342 { "RTNetIPv4UDPChecksum", (void *)(uintptr_t)RTNetIPv4UDPChecksum },
343 { "RTNetIPv6PseudoChecksum", (void *)(uintptr_t)RTNetIPv6PseudoChecksum },
344 { "RTNetIPv6PseudoChecksumBits", (void *)(uintptr_t)RTNetIPv6PseudoChecksumBits },
345 { "RTNetIPv6PseudoChecksumEx", (void *)(uintptr_t)RTNetIPv6PseudoChecksumEx },
346 { "RTNetTCPChecksum", (void *)(uintptr_t)RTNetTCPChecksum },
347 { "RTNetUDPChecksum", (void *)(uintptr_t)RTNetUDPChecksum },
348 { "RTPowerNotificationDeregister", (void *)(uintptr_t)RTPowerNotificationDeregister },
349 { "RTPowerNotificationRegister", (void *)(uintptr_t)RTPowerNotificationRegister },
350 { "RTProcSelf", (void *)(uintptr_t)RTProcSelf },
351 { "RTR0AssertPanicSystem", (void *)(uintptr_t)RTR0AssertPanicSystem },
352#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_WINDOWS)
353 { "RTR0DbgKrnlInfoOpen", (void *)(uintptr_t)RTR0DbgKrnlInfoOpen }, /* only-darwin, only-solaris, only-windows */
354 { "RTR0DbgKrnlInfoQueryMember", (void *)(uintptr_t)RTR0DbgKrnlInfoQueryMember }, /* only-darwin, only-solaris, only-windows */
355# if defined(RT_OS_SOLARIS)
356 { "RTR0DbgKrnlInfoQuerySize", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySize }, /* only-solaris */
357# endif
358 { "RTR0DbgKrnlInfoQuerySymbol", (void *)(uintptr_t)RTR0DbgKrnlInfoQuerySymbol }, /* only-darwin, only-solaris, only-windows */
359 { "RTR0DbgKrnlInfoRelease", (void *)(uintptr_t)RTR0DbgKrnlInfoRelease }, /* only-darwin, only-solaris, only-windows */
360 { "RTR0DbgKrnlInfoRetain", (void *)(uintptr_t)RTR0DbgKrnlInfoRetain }, /* only-darwin, only-solaris, only-windows */
361#endif
362 { "RTR0MemAreKrnlAndUsrDifferent", (void *)(uintptr_t)RTR0MemAreKrnlAndUsrDifferent },
363 { "RTR0MemKernelIsValidAddr", (void *)(uintptr_t)RTR0MemKernelIsValidAddr },
364 { "RTR0MemKernelCopyFrom", (void *)(uintptr_t)RTR0MemKernelCopyFrom },
365 { "RTR0MemKernelCopyTo", (void *)(uintptr_t)RTR0MemKernelCopyTo },
366 { "RTR0MemObjAddress", (void *)(uintptr_t)RTR0MemObjAddress },
367 { "RTR0MemObjAddressR3", (void *)(uintptr_t)RTR0MemObjAddressR3 },
368 { "RTR0MemObjAllocContTag", (void *)(uintptr_t)RTR0MemObjAllocContTag },
369 { "RTR0MemObjAllocLowTag", (void *)(uintptr_t)RTR0MemObjAllocLowTag },
370 { "RTR0MemObjAllocPageTag", (void *)(uintptr_t)RTR0MemObjAllocPageTag },
371 { "RTR0MemObjAllocPhysExTag", (void *)(uintptr_t)RTR0MemObjAllocPhysExTag },
372 { "RTR0MemObjAllocPhysNCTag", (void *)(uintptr_t)RTR0MemObjAllocPhysNCTag },
373 { "RTR0MemObjAllocPhysTag", (void *)(uintptr_t)RTR0MemObjAllocPhysTag },
374 { "RTR0MemObjEnterPhysTag", (void *)(uintptr_t)RTR0MemObjEnterPhysTag },
375 { "RTR0MemObjFree", (void *)(uintptr_t)RTR0MemObjFree },
376 { "RTR0MemObjGetPagePhysAddr", (void *)(uintptr_t)RTR0MemObjGetPagePhysAddr },
377 { "RTR0MemObjIsMapping", (void *)(uintptr_t)RTR0MemObjIsMapping },
378 { "RTR0MemObjLockUserTag", (void *)(uintptr_t)RTR0MemObjLockUserTag },
379 { "RTR0MemObjMapKernelExTag", (void *)(uintptr_t)RTR0MemObjMapKernelExTag },
380 { "RTR0MemObjMapKernelTag", (void *)(uintptr_t)RTR0MemObjMapKernelTag },
381 { "RTR0MemObjMapUserTag", (void *)(uintptr_t)RTR0MemObjMapUserTag },
382 { "RTR0MemObjMapUserExTag", (void *)(uintptr_t)RTR0MemObjMapUserExTag },
383 { "RTR0MemObjProtect", (void *)(uintptr_t)RTR0MemObjProtect },
384 { "RTR0MemObjSize", (void *)(uintptr_t)RTR0MemObjSize },
385 { "RTR0MemUserCopyFrom", (void *)(uintptr_t)RTR0MemUserCopyFrom },
386 { "RTR0MemUserCopyTo", (void *)(uintptr_t)RTR0MemUserCopyTo },
387 { "RTR0MemUserIsValidAddr", (void *)(uintptr_t)RTR0MemUserIsValidAddr },
388 { "RTR0ProcHandleSelf", (void *)(uintptr_t)RTR0ProcHandleSelf },
389 { "RTSemEventCreate", (void *)(uintptr_t)RTSemEventCreate },
390 { "RTSemEventDestroy", (void *)(uintptr_t)RTSemEventDestroy },
391 { "RTSemEventGetResolution", (void *)(uintptr_t)RTSemEventGetResolution },
392 { "RTSemEventMultiCreate", (void *)(uintptr_t)RTSemEventMultiCreate },
393 { "RTSemEventMultiDestroy", (void *)(uintptr_t)RTSemEventMultiDestroy },
394 { "RTSemEventMultiGetResolution", (void *)(uintptr_t)RTSemEventMultiGetResolution },
395 { "RTSemEventMultiReset", (void *)(uintptr_t)RTSemEventMultiReset },
396 { "RTSemEventMultiSignal", (void *)(uintptr_t)RTSemEventMultiSignal },
397 { "RTSemEventMultiWait", (void *)(uintptr_t)RTSemEventMultiWait },
398 { "RTSemEventMultiWaitEx", (void *)(uintptr_t)RTSemEventMultiWaitEx },
399 { "RTSemEventMultiWaitExDebug", (void *)(uintptr_t)RTSemEventMultiWaitExDebug },
400 { "RTSemEventMultiWaitNoResume", (void *)(uintptr_t)RTSemEventMultiWaitNoResume },
401 { "RTSemEventSignal", (void *)(uintptr_t)RTSemEventSignal },
402 { "RTSemEventWait", (void *)(uintptr_t)RTSemEventWait },
403 { "RTSemEventWaitEx", (void *)(uintptr_t)RTSemEventWaitEx },
404 { "RTSemEventWaitExDebug", (void *)(uintptr_t)RTSemEventWaitExDebug },
405 { "RTSemEventWaitNoResume", (void *)(uintptr_t)RTSemEventWaitNoResume },
406 { "RTSemFastMutexCreate", (void *)(uintptr_t)RTSemFastMutexCreate },
407 { "RTSemFastMutexDestroy", (void *)(uintptr_t)RTSemFastMutexDestroy },
408 { "RTSemFastMutexRelease", (void *)(uintptr_t)RTSemFastMutexRelease },
409 { "RTSemFastMutexRequest", (void *)(uintptr_t)RTSemFastMutexRequest },
410 { "RTSemMutexCreate", (void *)(uintptr_t)RTSemMutexCreate },
411 { "RTSemMutexDestroy", (void *)(uintptr_t)RTSemMutexDestroy },
412 { "RTSemMutexRelease", (void *)(uintptr_t)RTSemMutexRelease },
413 { "RTSemMutexRequest", (void *)(uintptr_t)RTSemMutexRequest },
414 { "RTSemMutexRequestDebug", (void *)(uintptr_t)RTSemMutexRequestDebug },
415 { "RTSemMutexRequestNoResume", (void *)(uintptr_t)RTSemMutexRequestNoResume },
416 { "RTSemMutexRequestNoResumeDebug", (void *)(uintptr_t)RTSemMutexRequestNoResumeDebug },
417 { "RTSpinlockAcquire", (void *)(uintptr_t)RTSpinlockAcquire },
418 { "RTSpinlockCreate", (void *)(uintptr_t)RTSpinlockCreate },
419 { "RTSpinlockDestroy", (void *)(uintptr_t)RTSpinlockDestroy },
420 { "RTSpinlockRelease", (void *)(uintptr_t)RTSpinlockRelease },
421 { "RTStrCopy", (void *)(uintptr_t)RTStrCopy },
422 { "RTStrDupTag", (void *)(uintptr_t)RTStrDupTag },
423 { "RTStrFormat", (void *)(uintptr_t)RTStrFormat },
424 { "RTStrFormatNumber", (void *)(uintptr_t)RTStrFormatNumber },
425 { "RTStrFormatTypeDeregister", (void *)(uintptr_t)RTStrFormatTypeDeregister },
426 { "RTStrFormatTypeRegister", (void *)(uintptr_t)RTStrFormatTypeRegister },
427 { "RTStrFormatTypeSetUser", (void *)(uintptr_t)RTStrFormatTypeSetUser },
428 { "RTStrFormatV", (void *)(uintptr_t)RTStrFormatV },
429 { "RTStrFree", (void *)(uintptr_t)RTStrFree },
430 { "RTStrNCmp", (void *)(uintptr_t)RTStrNCmp },
431 { "RTStrPrintf", (void *)(uintptr_t)RTStrPrintf },
432 { "RTStrPrintfEx", (void *)(uintptr_t)RTStrPrintfEx },
433 { "RTStrPrintfExV", (void *)(uintptr_t)RTStrPrintfExV },
434 { "RTStrPrintfV", (void *)(uintptr_t)RTStrPrintfV },
435 { "RTThreadCreate", (void *)(uintptr_t)RTThreadCreate },
436 { "RTThreadCtxHookIsEnabled", (void *)(uintptr_t)RTThreadCtxHookIsEnabled },
437 { "RTThreadCtxHookCreate", (void *)(uintptr_t)RTThreadCtxHookCreate },
438 { "RTThreadCtxHookDestroy", (void *)(uintptr_t)RTThreadCtxHookDestroy },
439 { "RTThreadCtxHookDisable", (void *)(uintptr_t)RTThreadCtxHookDisable },
440 { "RTThreadCtxHookEnable", (void *)(uintptr_t)RTThreadCtxHookEnable },
441 { "RTThreadGetName", (void *)(uintptr_t)RTThreadGetName },
442 { "RTThreadGetNative", (void *)(uintptr_t)RTThreadGetNative },
443 { "RTThreadGetType", (void *)(uintptr_t)RTThreadGetType },
444 { "RTThreadIsInInterrupt", (void *)(uintptr_t)RTThreadIsInInterrupt },
445 { "RTThreadNativeSelf", (void *)(uintptr_t)RTThreadNativeSelf },
446 { "RTThreadPreemptDisable", (void *)(uintptr_t)RTThreadPreemptDisable },
447 { "RTThreadPreemptIsEnabled", (void *)(uintptr_t)RTThreadPreemptIsEnabled },
448 { "RTThreadPreemptIsPending", (void *)(uintptr_t)RTThreadPreemptIsPending },
449 { "RTThreadPreemptIsPendingTrusty", (void *)(uintptr_t)RTThreadPreemptIsPendingTrusty },
450 { "RTThreadPreemptIsPossible", (void *)(uintptr_t)RTThreadPreemptIsPossible },
451 { "RTThreadPreemptRestore", (void *)(uintptr_t)RTThreadPreemptRestore },
452 { "RTThreadSelf", (void *)(uintptr_t)RTThreadSelf },
453 { "RTThreadSelfName", (void *)(uintptr_t)RTThreadSelfName },
454 { "RTThreadSleep", (void *)(uintptr_t)RTThreadSleep },
455 { "RTThreadUserReset", (void *)(uintptr_t)RTThreadUserReset },
456 { "RTThreadUserSignal", (void *)(uintptr_t)RTThreadUserSignal },
457 { "RTThreadUserWait", (void *)(uintptr_t)RTThreadUserWait },
458 { "RTThreadUserWaitNoResume", (void *)(uintptr_t)RTThreadUserWaitNoResume },
459 { "RTThreadWait", (void *)(uintptr_t)RTThreadWait },
460 { "RTThreadWaitNoResume", (void *)(uintptr_t)RTThreadWaitNoResume },
461 { "RTThreadYield", (void *)(uintptr_t)RTThreadYield },
462 { "RTTimeNow", (void *)(uintptr_t)RTTimeNow },
463 { "RTTimerCanDoHighResolution", (void *)(uintptr_t)RTTimerCanDoHighResolution },
464 { "RTTimerChangeInterval", (void *)(uintptr_t)RTTimerChangeInterval },
465 { "RTTimerCreate", (void *)(uintptr_t)RTTimerCreate },
466 { "RTTimerCreateEx", (void *)(uintptr_t)RTTimerCreateEx },
467 { "RTTimerDestroy", (void *)(uintptr_t)RTTimerDestroy },
468 { "RTTimerGetSystemGranularity", (void *)(uintptr_t)RTTimerGetSystemGranularity },
469 { "RTTimerReleaseSystemGranularity", (void *)(uintptr_t)RTTimerReleaseSystemGranularity },
470 { "RTTimerRequestSystemGranularity", (void *)(uintptr_t)RTTimerRequestSystemGranularity },
471 { "RTTimerStart", (void *)(uintptr_t)RTTimerStart },
472 { "RTTimerStop", (void *)(uintptr_t)RTTimerStop },
473 { "RTTimeSystemMilliTS", (void *)(uintptr_t)RTTimeSystemMilliTS },
474 { "RTTimeSystemNanoTS", (void *)(uintptr_t)RTTimeSystemNanoTS },
475 { "RTUuidCompare", (void *)(uintptr_t)RTUuidCompare },
476 { "RTUuidCompareStr", (void *)(uintptr_t)RTUuidCompareStr },
477 { "RTUuidFromStr", (void *)(uintptr_t)RTUuidFromStr },
478/* SED: END */
479};
480
481#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
482/**
483 * Drag in the rest of IRPT since we share it with the
484 * rest of the kernel modules on darwin.
485 */
486struct CLANG11WERIDNESS { PFNRT pfn; } g_apfnVBoxDrvIPRTDeps[] =
487{
488 /* VBoxNetAdp */
489 { (PFNRT)RTRandBytes },
490 /* VBoxUSB */
491 { (PFNRT)RTPathStripFilename },
492#if !defined(RT_OS_FREEBSD)
493 { (PFNRT)RTHandleTableAlloc },
494 { (PFNRT)RTStrPurgeEncoding },
495#endif
496 { NULL }
497};
498#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_FREEBSD */
499
500
501
502/**
503 * Initializes the device extentsion structure.
504 *
505 * @returns IPRT status code.
506 * @param pDevExt The device extension to initialize.
507 * @param cbSession The size of the session structure. The size of
508 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
509 * defined because we're skipping the OS specific members
510 * then.
511 */
512int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
513{
514 int rc;
515
516#ifdef SUPDRV_WITH_RELEASE_LOGGER
517 /*
518 * Create the release log.
519 */
520 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
521 PRTLOGGER pRelLogger;
522 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
523 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
524 if (RT_SUCCESS(rc))
525 RTLogRelSetDefaultInstance(pRelLogger);
526 /** @todo Add native hook for getting logger config parameters and setting
527 * them. On linux we should use the module parameter stuff... */
528#endif
529
530#if (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) && !defined(VBOX_WITH_OLD_CPU_SUPPORT)
531 /*
532 * Require SSE2 to be present.
533 */
534 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2))
535 {
536 SUPR0Printf("vboxdrv: Requires SSE2 (cpuid(0).EDX=%#x)\n", ASMCpuId_EDX(1));
537 return VERR_UNSUPPORTED_CPU;
538 }
539#endif
540
541 /*
542 * Initialize it.
543 */
544 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
545 pDevExt->Spinlock = NIL_RTSPINLOCK;
546 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
547 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
548#ifdef SUPDRV_USE_MUTEX_FOR_LDR
549 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
550#else
551 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
552#endif
553#ifdef SUPDRV_USE_MUTEX_FOR_GIP
554 pDevExt->mtxGip = NIL_RTSEMMUTEX;
555 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
556#else
557 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
558 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
559#endif
560
561 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
562 if (RT_SUCCESS(rc))
563 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
564 if (RT_SUCCESS(rc))
565 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
566
567 if (RT_SUCCESS(rc))
568#ifdef SUPDRV_USE_MUTEX_FOR_LDR
569 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
570#else
571 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
572#endif
573 if (RT_SUCCESS(rc))
574#ifdef SUPDRV_USE_MUTEX_FOR_GIP
575 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
576#else
577 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
578#endif
579 if (RT_SUCCESS(rc))
580 {
581 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
582 if (RT_SUCCESS(rc))
583 {
584#ifdef SUPDRV_USE_MUTEX_FOR_GIP
585 rc = RTSemMutexCreate(&pDevExt->mtxGip);
586#else
587 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
588#endif
589 if (RT_SUCCESS(rc))
590 {
591 rc = supdrvGipCreate(pDevExt);
592 if (RT_SUCCESS(rc))
593 {
594 rc = supdrvTracerInit(pDevExt);
595 if (RT_SUCCESS(rc))
596 {
597 pDevExt->pLdrInitImage = NULL;
598 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
599 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
600 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
601 pDevExt->cbSession = (uint32_t)cbSession;
602
603 /*
604 * Fixup the absolute symbols.
605 *
606 * Because of the table indexing assumptions we'll have a little #ifdef orgy
607 * here rather than distributing this to OS specific files. At least for now.
608 */
609#ifdef RT_OS_DARWIN
610# if ARCH_BITS == 32
611 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
612 {
613 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
614 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
615 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
616 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
617 }
618 else
619 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
620 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
621 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
622 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
623 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
624 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
625 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
626# else /* 64-bit darwin: */
627 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
628 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
629 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
630 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
631 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
632 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
633 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
634 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
635 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
636 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
637
638# endif
639#else /* !RT_OS_DARWIN */
640# if ARCH_BITS == 64
641 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
642 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
643 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
644 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
645# else
646 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[3].pfn = (void *)0;
647# endif
648 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
649 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
650 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
651 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
652 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
653 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
654#endif /* !RT_OS_DARWIN */
655 return VINF_SUCCESS;
656 }
657
658 supdrvGipDestroy(pDevExt);
659 }
660
661#ifdef SUPDRV_USE_MUTEX_FOR_GIP
662 RTSemMutexDestroy(pDevExt->mtxGip);
663 pDevExt->mtxGip = NIL_RTSEMMUTEX;
664#else
665 RTSemFastMutexDestroy(pDevExt->mtxGip);
666 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
667#endif
668 }
669 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
670 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
671 }
672 }
673
674#ifdef SUPDRV_USE_MUTEX_FOR_GIP
675 RTSemMutexDestroy(pDevExt->mtxTscDelta);
676 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
677#else
678 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
679 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
680#endif
681#ifdef SUPDRV_USE_MUTEX_FOR_LDR
682 RTSemMutexDestroy(pDevExt->mtxLdr);
683 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
684#else
685 RTSemFastMutexDestroy(pDevExt->mtxLdr);
686 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
687#endif
688 RTSpinlockDestroy(pDevExt->Spinlock);
689 pDevExt->Spinlock = NIL_RTSPINLOCK;
690 RTSpinlockDestroy(pDevExt->hGipSpinlock);
691 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
692 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
693 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
694
695#ifdef SUPDRV_WITH_RELEASE_LOGGER
696 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
697 RTLogDestroy(RTLogSetDefaultInstance(NULL));
698#endif
699
700 return rc;
701}
702
703
704/**
705 * Delete the device extension (e.g. cleanup members).
706 *
707 * @param pDevExt The device extension to delete.
708 */
709void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
710{
711 PSUPDRVOBJ pObj;
712 PSUPDRVUSAGE pUsage;
713
714 /*
715 * Kill mutexes and spinlocks.
716 */
717#ifdef SUPDRV_USE_MUTEX_FOR_GIP
718 RTSemMutexDestroy(pDevExt->mtxGip);
719 pDevExt->mtxGip = NIL_RTSEMMUTEX;
720 RTSemMutexDestroy(pDevExt->mtxTscDelta);
721 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
722#else
723 RTSemFastMutexDestroy(pDevExt->mtxGip);
724 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
725 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
726 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
727#endif
728#ifdef SUPDRV_USE_MUTEX_FOR_LDR
729 RTSemMutexDestroy(pDevExt->mtxLdr);
730 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
731#else
732 RTSemFastMutexDestroy(pDevExt->mtxLdr);
733 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
734#endif
735 RTSpinlockDestroy(pDevExt->Spinlock);
736 pDevExt->Spinlock = NIL_RTSPINLOCK;
737 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
738 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
739 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
740 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
741
742 /*
743 * Free lists.
744 */
745 /* objects. */
746 pObj = pDevExt->pObjs;
747 Assert(!pObj); /* (can trigger on forced unloads) */
748 pDevExt->pObjs = NULL;
749 while (pObj)
750 {
751 void *pvFree = pObj;
752 pObj = pObj->pNext;
753 RTMemFree(pvFree);
754 }
755
756 /* usage records. */
757 pUsage = pDevExt->pUsageFree;
758 pDevExt->pUsageFree = NULL;
759 while (pUsage)
760 {
761 void *pvFree = pUsage;
762 pUsage = pUsage->pNext;
763 RTMemFree(pvFree);
764 }
765
766 /* kill the GIP. */
767 supdrvGipDestroy(pDevExt);
768 RTSpinlockDestroy(pDevExt->hGipSpinlock);
769 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
770
771 supdrvTracerTerm(pDevExt);
772
773#ifdef SUPDRV_WITH_RELEASE_LOGGER
774 /* destroy the loggers. */
775 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
776 RTLogDestroy(RTLogSetDefaultInstance(NULL));
777#endif
778}
779
780
781/**
782 * Create session.
783 *
784 * @returns IPRT status code.
785 * @param pDevExt Device extension.
786 * @param fUser Flag indicating whether this is a user or kernel
787 * session.
788 * @param fUnrestricted Unrestricted access (system) or restricted access
789 * (user)?
790 * @param ppSession Where to store the pointer to the session data.
791 */
792int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
793{
794 int rc;
795 PSUPDRVSESSION pSession;
796
797 if (!SUP_IS_DEVEXT_VALID(pDevExt))
798 return VERR_INVALID_PARAMETER;
799
800 /*
801 * Allocate memory for the session data.
802 */
803 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
804 if (pSession)
805 {
806 /* Initialize session data. */
807 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
808 if (!rc)
809 {
810 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
811 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
812 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
813 if (RT_SUCCESS(rc))
814 {
815 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
816 pSession->pDevExt = pDevExt;
817 pSession->u32Cookie = BIRD_INV;
818 pSession->fUnrestricted = fUnrestricted;
819 /*pSession->fInHashTable = false; */
820 pSession->cRefs = 1;
821 /*pSession->pCommonNextHash = NULL;
822 pSession->ppOsSessionPtr = NULL; */
823 if (fUser)
824 {
825 pSession->Process = RTProcSelf();
826 pSession->R0Process = RTR0ProcHandleSelf();
827 }
828 else
829 {
830 pSession->Process = NIL_RTPROCESS;
831 pSession->R0Process = NIL_RTR0PROCESS;
832 }
833 /*pSession->pLdrUsage = NULL;
834 pSession->pVM = NULL;
835 pSession->pUsage = NULL;
836 pSession->pGip = NULL;
837 pSession->fGipReferenced = false;
838 pSession->Bundle.cUsed = 0; */
839 pSession->Uid = NIL_RTUID;
840 pSession->Gid = NIL_RTGID;
841 /*pSession->uTracerData = 0;*/
842 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
843 RTListInit(&pSession->TpProviders);
844 /*pSession->cTpProviders = 0;*/
845 /*pSession->cTpProbesFiring = 0;*/
846 RTListInit(&pSession->TpUmods);
847 /*RT_ZERO(pSession->apTpLookupTable);*/
848
849 VBOXDRV_SESSION_CREATE(pSession, fUser);
850 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
851 return VINF_SUCCESS;
852 }
853
854 RTSpinlockDestroy(pSession->Spinlock);
855 }
856 RTMemFree(pSession);
857 *ppSession = NULL;
858 Log(("Failed to create spinlock, rc=%d!\n", rc));
859 }
860 else
861 rc = VERR_NO_MEMORY;
862
863 return rc;
864}
865
866
867/**
868 * Cleans up the session in the context of the process to which it belongs, the
869 * caller will free the session and the session spinlock.
870 *
871 * This should normally occur when the session is closed or as the process
872 * exits. Careful reference counting in the OS specfic code makes sure that
873 * there cannot be any races between process/handle cleanup callbacks and
874 * threads doing I/O control calls.
875 *
876 * @param pDevExt The device extension.
877 * @param pSession Session data.
878 */
879static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
880{
881 int rc;
882 PSUPDRVBUNDLE pBundle;
883 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
884
885 Assert(!pSession->fInHashTable);
886 Assert(!pSession->ppOsSessionPtr);
887 AssertLogRelMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
888 ("R0Process=%p cur=%p; curpid=%u\n",
889 pSession->R0Process, RTR0ProcHandleSelf(), RTProcSelf()));
890
891 /*
892 * Remove logger instances related to this session.
893 */
894 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
895
896 /*
897 * Destroy the handle table.
898 */
899 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
900 AssertRC(rc);
901 pSession->hHandleTable = NIL_RTHANDLETABLE;
902
903 /*
904 * Release object references made in this session.
905 * In theory there should be noone racing us in this session.
906 */
907 Log2(("release objects - start\n"));
908 if (pSession->pUsage)
909 {
910 PSUPDRVUSAGE pUsage;
911 RTSpinlockAcquire(pDevExt->Spinlock);
912
913 while ((pUsage = pSession->pUsage) != NULL)
914 {
915 PSUPDRVOBJ pObj = pUsage->pObj;
916 pSession->pUsage = pUsage->pNext;
917
918 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
919 if (pUsage->cUsage < pObj->cUsage)
920 {
921 pObj->cUsage -= pUsage->cUsage;
922 RTSpinlockRelease(pDevExt->Spinlock);
923 }
924 else
925 {
926 /* Destroy the object and free the record. */
927 if (pDevExt->pObjs == pObj)
928 pDevExt->pObjs = pObj->pNext;
929 else
930 {
931 PSUPDRVOBJ pObjPrev;
932 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
933 if (pObjPrev->pNext == pObj)
934 {
935 pObjPrev->pNext = pObj->pNext;
936 break;
937 }
938 Assert(pObjPrev);
939 }
940 RTSpinlockRelease(pDevExt->Spinlock);
941
942 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
943 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
944 if (pObj->pfnDestructor)
945 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
946 RTMemFree(pObj);
947 }
948
949 /* free it and continue. */
950 RTMemFree(pUsage);
951
952 RTSpinlockAcquire(pDevExt->Spinlock);
953 }
954
955 RTSpinlockRelease(pDevExt->Spinlock);
956 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
957 }
958 Log2(("release objects - done\n"));
959
960 /*
961 * Make sure the associated VM pointers are NULL.
962 */
963 if (pSession->pSessionGVM || pSession->pSessionVM || pSession->pFastIoCtrlVM)
964 {
965 SUPR0Printf("supdrvCleanupSession: VM not disassociated! pSessionGVM=%p pSessionVM=%p pFastIoCtrlVM=%p\n",
966 pSession->pSessionGVM, pSession->pSessionVM, pSession->pFastIoCtrlVM);
967 pSession->pSessionGVM = NULL;
968 pSession->pSessionVM = NULL;
969 pSession->pFastIoCtrlVM = NULL;
970 }
971
972 /*
973 * Do tracer cleanups related to this session.
974 */
975 Log2(("release tracer stuff - start\n"));
976 supdrvTracerCleanupSession(pDevExt, pSession);
977 Log2(("release tracer stuff - end\n"));
978
979 /*
980 * Release memory allocated in the session.
981 *
982 * We do not serialize this as we assume that the application will
983 * not allocated memory while closing the file handle object.
984 */
985 Log2(("freeing memory:\n"));
986 pBundle = &pSession->Bundle;
987 while (pBundle)
988 {
989 PSUPDRVBUNDLE pToFree;
990 unsigned i;
991
992 /*
993 * Check and unlock all entries in the bundle.
994 */
995 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
996 {
997 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
998 {
999 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
1000 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
1001 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
1002 {
1003 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
1004 AssertRC(rc); /** @todo figure out how to handle this. */
1005 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
1006 }
1007 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
1008 AssertRC(rc); /** @todo figure out how to handle this. */
1009 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
1010 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
1011 }
1012 }
1013
1014 /*
1015 * Advance and free previous bundle.
1016 */
1017 pToFree = pBundle;
1018 pBundle = pBundle->pNext;
1019
1020 pToFree->pNext = NULL;
1021 pToFree->cUsed = 0;
1022 if (pToFree != &pSession->Bundle)
1023 RTMemFree(pToFree);
1024 }
1025 Log2(("freeing memory - done\n"));
1026
1027 /*
1028 * Deregister component factories.
1029 */
1030 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
1031 Log2(("deregistering component factories:\n"));
1032 if (pDevExt->pComponentFactoryHead)
1033 {
1034 PSUPDRVFACTORYREG pPrev = NULL;
1035 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
1036 while (pCur)
1037 {
1038 if (pCur->pSession == pSession)
1039 {
1040 /* unlink it */
1041 PSUPDRVFACTORYREG pNext = pCur->pNext;
1042 if (pPrev)
1043 pPrev->pNext = pNext;
1044 else
1045 pDevExt->pComponentFactoryHead = pNext;
1046
1047 /* free it */
1048 pCur->pNext = NULL;
1049 pCur->pSession = NULL;
1050 pCur->pFactory = NULL;
1051 RTMemFree(pCur);
1052
1053 /* next */
1054 pCur = pNext;
1055 }
1056 else
1057 {
1058 /* next */
1059 pPrev = pCur;
1060 pCur = pCur->pNext;
1061 }
1062 }
1063 }
1064 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1065 Log2(("deregistering component factories - done\n"));
1066
1067 /*
1068 * Loaded images needs to be dereferenced and possibly freed up.
1069 */
1070 supdrvLdrLock(pDevExt);
1071 Log2(("freeing images:\n"));
1072 if (pSession->pLdrUsage)
1073 {
1074 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1075 pSession->pLdrUsage = NULL;
1076 while (pUsage)
1077 {
1078 void *pvFree = pUsage;
1079 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1080 uint32_t cUsage = pUsage->cRing0Usage + pUsage->cRing3Usage;
1081 if (pImage->cUsage > cUsage)
1082 pImage->cUsage -= cUsage;
1083 else
1084 supdrvLdrFree(pDevExt, pImage);
1085 pUsage->pImage = NULL;
1086 pUsage = pUsage->pNext;
1087 RTMemFree(pvFree);
1088 }
1089 }
1090 supdrvLdrUnlock(pDevExt);
1091 Log2(("freeing images - done\n"));
1092
1093 /*
1094 * Unmap the GIP.
1095 */
1096 Log2(("umapping GIP:\n"));
1097 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1098 {
1099 SUPR0GipUnmap(pSession);
1100 pSession->fGipReferenced = 0;
1101 }
1102 Log2(("umapping GIP - done\n"));
1103}
1104
1105
1106/**
1107 * Common code for freeing a session when the reference count reaches zero.
1108 *
1109 * @param pDevExt Device extension.
1110 * @param pSession Session data.
1111 * This data will be freed by this routine.
1112 */
1113static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1114{
1115 VBOXDRV_SESSION_CLOSE(pSession);
1116
1117 /*
1118 * Cleanup the session first.
1119 */
1120 supdrvCleanupSession(pDevExt, pSession);
1121 supdrvOSCleanupSession(pDevExt, pSession);
1122
1123 /*
1124 * Free the rest of the session stuff.
1125 */
1126 RTSpinlockDestroy(pSession->Spinlock);
1127 pSession->Spinlock = NIL_RTSPINLOCK;
1128 pSession->pDevExt = NULL;
1129 RTMemFree(pSession);
1130 LogFlow(("supdrvDestroySession: returns\n"));
1131}
1132
1133
1134/**
1135 * Inserts the session into the global hash table.
1136 *
1137 * @retval VINF_SUCCESS on success.
1138 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1139 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1140 * session (asserted).
1141 * @retval VERR_DUPLICATE if there is already a session for that pid.
1142 *
1143 * @param pDevExt The device extension.
1144 * @param pSession The session.
1145 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1146 * available and used. This will set to point to the
1147 * session while under the protection of the session
1148 * hash table spinlock. It will also be kept in
1149 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1150 * cleanup use.
1151 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1152 */
1153int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1154 void *pvUser)
1155{
1156 PSUPDRVSESSION pCur;
1157 unsigned iHash;
1158
1159 /*
1160 * Validate input.
1161 */
1162 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1163 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1164
1165 /*
1166 * Calculate the hash table index and acquire the spinlock.
1167 */
1168 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1169
1170 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1171
1172 /*
1173 * If there are a collisions, we need to carefully check if we got a
1174 * duplicate. There can only be one open session per process.
1175 */
1176 pCur = pDevExt->apSessionHashTab[iHash];
1177 if (pCur)
1178 {
1179 while (pCur && pCur->Process != pSession->Process)
1180 pCur = pCur->pCommonNextHash;
1181
1182 if (pCur)
1183 {
1184 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1185 if (pCur == pSession)
1186 {
1187 Assert(pSession->fInHashTable);
1188 AssertFailed();
1189 return VERR_WRONG_ORDER;
1190 }
1191 Assert(!pSession->fInHashTable);
1192 if (pCur->R0Process == pSession->R0Process)
1193 return VERR_RESOURCE_IN_USE;
1194 return VERR_DUPLICATE;
1195 }
1196 }
1197 Assert(!pSession->fInHashTable);
1198 Assert(!pSession->ppOsSessionPtr);
1199
1200 /*
1201 * Insert it, doing a callout to the OS specific code in case it has
1202 * anything it wishes to do while we're holding the spinlock.
1203 */
1204 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1205 pDevExt->apSessionHashTab[iHash] = pSession;
1206 pSession->fInHashTable = true;
1207 ASMAtomicIncS32(&pDevExt->cSessions);
1208
1209 pSession->ppOsSessionPtr = ppOsSessionPtr;
1210 if (ppOsSessionPtr)
1211 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1212
1213 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1214
1215 /*
1216 * Retain a reference for the pointer in the session table.
1217 */
1218 ASMAtomicIncU32(&pSession->cRefs);
1219
1220 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1221 return VINF_SUCCESS;
1222}
1223
1224
1225/**
1226 * Removes the session from the global hash table.
1227 *
1228 * @retval VINF_SUCCESS on success.
1229 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1230 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1231 * session (asserted).
1232 *
1233 * @param pDevExt The device extension.
1234 * @param pSession The session. The caller is expected to have a reference
1235 * to this so it won't croak on us when we release the hash
1236 * table reference.
1237 * @param pvUser OS specific context value for the
1238 * supdrvOSSessionHashTabInserted callback.
1239 */
1240int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1241{
1242 PSUPDRVSESSION pCur;
1243 unsigned iHash;
1244 int32_t cRefs;
1245
1246 /*
1247 * Validate input.
1248 */
1249 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1250 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1251
1252 /*
1253 * Calculate the hash table index and acquire the spinlock.
1254 */
1255 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1256
1257 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1258
1259 /*
1260 * Unlink it.
1261 */
1262 pCur = pDevExt->apSessionHashTab[iHash];
1263 if (pCur == pSession)
1264 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1265 else
1266 {
1267 PSUPDRVSESSION pPrev = pCur;
1268 while (pCur && pCur != pSession)
1269 {
1270 pPrev = pCur;
1271 pCur = pCur->pCommonNextHash;
1272 }
1273 if (pCur)
1274 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1275 else
1276 {
1277 Assert(!pSession->fInHashTable);
1278 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1279 return VERR_NOT_FOUND;
1280 }
1281 }
1282
1283 pSession->pCommonNextHash = NULL;
1284 pSession->fInHashTable = false;
1285
1286 ASMAtomicDecS32(&pDevExt->cSessions);
1287
1288 /*
1289 * Clear OS specific session pointer if available and do the OS callback.
1290 */
1291 if (pSession->ppOsSessionPtr)
1292 {
1293 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1294 pSession->ppOsSessionPtr = NULL;
1295 }
1296
1297 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1298
1299 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1300
1301 /*
1302 * Drop the reference the hash table had to the session. This shouldn't
1303 * be the last reference!
1304 */
1305 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1306 Assert(cRefs > 0 && cRefs < _1M);
1307 if (cRefs == 0)
1308 supdrvDestroySession(pDevExt, pSession);
1309
1310 return VINF_SUCCESS;
1311}
1312
1313
1314/**
1315 * Looks up the session for the current process in the global hash table or in
1316 * OS specific pointer.
1317 *
1318 * @returns Pointer to the session with a reference that the caller must
1319 * release. If no valid session was found, NULL is returned.
1320 *
1321 * @param pDevExt The device extension.
1322 * @param Process The process ID.
1323 * @param R0Process The ring-0 process handle.
1324 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1325 * this is used instead of the hash table. For
1326 * additional safety it must then be equal to the
1327 * SUPDRVSESSION::ppOsSessionPtr member.
1328 * This can be NULL even if the OS has a session
1329 * pointer.
1330 */
1331PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1332 PSUPDRVSESSION *ppOsSessionPtr)
1333{
1334 PSUPDRVSESSION pCur;
1335 unsigned iHash;
1336
1337 /*
1338 * Validate input.
1339 */
1340 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1341
1342 /*
1343 * Calculate the hash table index and acquire the spinlock.
1344 */
1345 iHash = SUPDRV_SESSION_HASH(Process);
1346
1347 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1348
1349 /*
1350 * If an OS session pointer is provided, always use it.
1351 */
1352 if (ppOsSessionPtr)
1353 {
1354 pCur = *ppOsSessionPtr;
1355 if ( pCur
1356 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1357 || pCur->Process != Process
1358 || pCur->R0Process != R0Process) )
1359 pCur = NULL;
1360 }
1361 else
1362 {
1363 /*
1364 * Otherwise, do the hash table lookup.
1365 */
1366 pCur = pDevExt->apSessionHashTab[iHash];
1367 while ( pCur
1368 && ( pCur->Process != Process
1369 || pCur->R0Process != R0Process) )
1370 pCur = pCur->pCommonNextHash;
1371 }
1372
1373 /*
1374 * Retain the session.
1375 */
1376 if (pCur)
1377 {
1378 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1379 NOREF(cRefs);
1380 Assert(cRefs > 1 && cRefs < _1M);
1381 }
1382
1383 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1384
1385 return pCur;
1386}
1387
1388
1389/**
1390 * Retain a session to make sure it doesn't go away while it is in use.
1391 *
1392 * @returns New reference count on success, UINT32_MAX on failure.
1393 * @param pSession Session data.
1394 */
1395uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1396{
1397 uint32_t cRefs;
1398 AssertPtrReturn(pSession, UINT32_MAX);
1399 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1400
1401 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1402 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1403 return cRefs;
1404}
1405
1406
1407/**
1408 * Releases a given session.
1409 *
1410 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1411 * @param pSession Session data.
1412 */
1413uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1414{
1415 uint32_t cRefs;
1416 AssertPtrReturn(pSession, UINT32_MAX);
1417 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1418
1419 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1420 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1421 if (cRefs == 0)
1422 supdrvDestroySession(pSession->pDevExt, pSession);
1423 return cRefs;
1424}
1425
1426
1427/**
1428 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1429 *
1430 * @returns IPRT status code, see SUPR0ObjAddRef.
1431 * @param hHandleTable The handle table handle. Ignored.
1432 * @param pvObj The object pointer.
1433 * @param pvCtx Context, the handle type. Ignored.
1434 * @param pvUser Session pointer.
1435 */
1436static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1437{
1438 NOREF(pvCtx);
1439 NOREF(hHandleTable);
1440 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1441}
1442
1443
1444/**
1445 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1446 *
1447 * @param hHandleTable The handle table handle. Ignored.
1448 * @param h The handle value. Ignored.
1449 * @param pvObj The object pointer.
1450 * @param pvCtx Context, the handle type. Ignored.
1451 * @param pvUser Session pointer.
1452 */
1453static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1454{
1455 NOREF(pvCtx);
1456 NOREF(h);
1457 NOREF(hHandleTable);
1458 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1459}
1460
1461
1462/**
1463 * Fast path I/O Control worker.
1464 *
1465 * @returns VBox status code that should be passed down to ring-3 unchanged.
1466 * @param uOperation SUP_VMMR0_DO_XXX (not the I/O control number!).
1467 * @param idCpu VMCPU id.
1468 * @param pDevExt Device extention.
1469 * @param pSession Session data.
1470 */
1471int VBOXCALL supdrvIOCtlFast(uintptr_t uOperation, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1472{
1473 /*
1474 * Validate input and check that the VM has a session.
1475 */
1476 if (RT_LIKELY(RT_VALID_PTR(pSession)))
1477 {
1478 PVM pVM = pSession->pSessionVM;
1479 PGVM pGVM = pSession->pSessionGVM;
1480 if (RT_LIKELY( pGVM != NULL
1481 && pVM != NULL
1482 && pVM == pSession->pFastIoCtrlVM))
1483 {
1484 if (RT_LIKELY(pDevExt->pfnVMMR0EntryFast))
1485 {
1486 /*
1487 * Make the call.
1488 */
1489 pDevExt->pfnVMMR0EntryFast(pGVM, pVM, idCpu, uOperation);
1490 return VINF_SUCCESS;
1491 }
1492
1493 SUPR0Printf("supdrvIOCtlFast: pfnVMMR0EntryFast is NULL\n");
1494 }
1495 else
1496 SUPR0Printf("supdrvIOCtlFast: Misconfig session: pGVM=%p pVM=%p pFastIoCtrlVM=%p\n",
1497 pGVM, pVM, pSession->pFastIoCtrlVM);
1498 }
1499 else
1500 SUPR0Printf("supdrvIOCtlFast: Bad session pointer %p\n", pSession);
1501 return VERR_INTERNAL_ERROR;
1502}
1503
1504
1505/**
1506 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1507 *
1508 * Check if pszStr contains any character of pszChars. We would use strpbrk
1509 * here if this function would be contained in the RedHat kABI white list, see
1510 * http://www.kerneldrivers.org/RHEL5.
1511 *
1512 * @returns true if fine, false if not.
1513 * @param pszName The module name to check.
1514 */
1515static bool supdrvIsLdrModuleNameValid(const char *pszName)
1516{
1517 int chCur;
1518 while ((chCur = *pszName++) != '\0')
1519 {
1520 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1521 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1522 while (offInv-- > 0)
1523 if (s_szInvalidChars[offInv] == chCur)
1524 return false;
1525 }
1526 return true;
1527}
1528
1529
1530
1531/**
1532 * I/O Control inner worker (tracing reasons).
1533 *
1534 * @returns IPRT status code.
1535 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1536 *
1537 * @param uIOCtl Function number.
1538 * @param pDevExt Device extention.
1539 * @param pSession Session data.
1540 * @param pReqHdr The request header.
1541 */
1542static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1543{
1544 /*
1545 * Validation macros
1546 */
1547#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1548 do { \
1549 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1550 { \
1551 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1552 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1553 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1554 } \
1555 } while (0)
1556
1557#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1558
1559#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1560 do { \
1561 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1562 { \
1563 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1564 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1565 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1566 } \
1567 } while (0)
1568
1569#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1570 do { \
1571 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1572 { \
1573 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1574 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1575 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1576 } \
1577 } while (0)
1578
1579#define REQ_CHECK_EXPR(Name, expr) \
1580 do { \
1581 if (RT_UNLIKELY(!(expr))) \
1582 { \
1583 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1584 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1585 } \
1586 } while (0)
1587
1588#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1589 do { \
1590 if (RT_UNLIKELY(!(expr))) \
1591 { \
1592 OSDBGPRINT( fmt ); \
1593 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1594 } \
1595 } while (0)
1596
1597 /*
1598 * The switch.
1599 */
1600 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1601 {
1602 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1603 {
1604 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1605 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1606 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1607 {
1608 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1609 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1610 return 0;
1611 }
1612
1613#if 0
1614 /*
1615 * Call out to the OS specific code and let it do permission checks on the
1616 * client process.
1617 */
1618 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1619 {
1620 pReq->u.Out.u32Cookie = 0xffffffff;
1621 pReq->u.Out.u32SessionCookie = 0xffffffff;
1622 pReq->u.Out.u32SessionVersion = 0xffffffff;
1623 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1624 pReq->u.Out.pSession = NULL;
1625 pReq->u.Out.cFunctions = 0;
1626 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1627 return 0;
1628 }
1629#endif
1630
1631 /*
1632 * Match the version.
1633 * The current logic is very simple, match the major interface version.
1634 */
1635 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1636 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1637 {
1638 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1639 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1640 pReq->u.Out.u32Cookie = 0xffffffff;
1641 pReq->u.Out.u32SessionCookie = 0xffffffff;
1642 pReq->u.Out.u32SessionVersion = 0xffffffff;
1643 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1644 pReq->u.Out.pSession = NULL;
1645 pReq->u.Out.cFunctions = 0;
1646 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1647 return 0;
1648 }
1649
1650 /*
1651 * Fill in return data and be gone.
1652 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1653 * u32SessionVersion <= u32ReqVersion!
1654 */
1655 /** @todo Somehow validate the client and negotiate a secure cookie... */
1656 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1657 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1658 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1659 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1660 pReq->u.Out.pSession = pSession;
1661 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1662 pReq->Hdr.rc = VINF_SUCCESS;
1663 return 0;
1664 }
1665
1666 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1667 {
1668 /* validate */
1669 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1670 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1671
1672 /* execute */
1673 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1674 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1675 pReq->Hdr.rc = VINF_SUCCESS;
1676 return 0;
1677 }
1678
1679 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1680 {
1681 /* validate */
1682 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1683 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1684 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1685 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1686 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1687
1688 /* execute */
1689 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1690 if (RT_FAILURE(pReq->Hdr.rc))
1691 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1692 return 0;
1693 }
1694
1695 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1696 {
1697 /* validate */
1698 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1699 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1700
1701 /* execute */
1702 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1703 return 0;
1704 }
1705
1706 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1707 {
1708 /* validate */
1709 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1710 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1711
1712 /* execute */
1713 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1714 if (RT_FAILURE(pReq->Hdr.rc))
1715 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1716 return 0;
1717 }
1718
1719 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1720 {
1721 /* validate */
1722 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1723 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1724
1725 /* execute */
1726 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1727 return 0;
1728 }
1729
1730 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1731 {
1732 /* validate */
1733 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1734 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1735 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything > 0);
1736 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithEverything < 16*_1M);
1737 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1738 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithEverything);
1739 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1740 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1741 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1742 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1743
1744 /* execute */
1745 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1746 return 0;
1747 }
1748
1749 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1750 {
1751 /* validate */
1752 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1753 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1754 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithEverything), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1755 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1756 || ( pReq->u.In.cSymbols <= 16384
1757 && pReq->u.In.offSymbols >= pReq->u.In.cbImageBits
1758 && pReq->u.In.offSymbols < pReq->u.In.cbImageWithEverything
1759 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithEverything),
1760 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSymbols,
1761 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithEverything));
1762 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1763 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithEverything
1764 && pReq->u.In.offStrTab >= pReq->u.In.cbImageBits
1765 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything
1766 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithEverything),
1767 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offStrTab,
1768 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithEverything));
1769 REQ_CHECK_EXPR_FMT( pReq->u.In.cSegments >= 1
1770 && pReq->u.In.cSegments <= 128
1771 && pReq->u.In.cSegments <= (pReq->u.In.cbImageBits + PAGE_SIZE - 1) / PAGE_SIZE
1772 && pReq->u.In.offSegments >= pReq->u.In.cbImageBits
1773 && pReq->u.In.offSegments < pReq->u.In.cbImageWithEverything
1774 && pReq->u.In.offSegments + pReq->u.In.cSegments * sizeof(SUPLDRSEG) <= pReq->u.In.cbImageWithEverything,
1775 ("SUP_IOCTL_LDR_LOAD: offSegments=%#lx cSegments=%#lx cbImageWithEverything=%#lx\n", (long)pReq->u.In.offSegments,
1776 (long)pReq->u.In.cSegments, (long)pReq->u.In.cbImageWithEverything));
1777
1778 if (pReq->u.In.cSymbols)
1779 {
1780 uint32_t i;
1781 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1782 for (i = 0; i < pReq->u.In.cSymbols; i++)
1783 {
1784 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithEverything,
1785 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithEverything));
1786 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1787 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1788 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1789 pReq->u.In.cbStrTab - paSyms[i].offName),
1790 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithEverything));
1791 }
1792 }
1793 {
1794 uint32_t i;
1795 uint32_t offPrevEnd = 0;
1796 PSUPLDRSEG paSegs = (PSUPLDRSEG)&pReq->u.In.abImage[pReq->u.In.offSegments];
1797 for (i = 0; i < pReq->u.In.cSegments; i++)
1798 {
1799 REQ_CHECK_EXPR_FMT(paSegs[i].off < pReq->u.In.cbImageBits && !(paSegs[i].off & PAGE_OFFSET_MASK),
1800 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)pReq->u.In.cbImageBits));
1801 REQ_CHECK_EXPR_FMT(paSegs[i].cb <= pReq->u.In.cbImageBits,
1802 ("SUP_IOCTL_LDR_LOAD: seg #%ld: cb %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].cb, (long)pReq->u.In.cbImageBits));
1803 REQ_CHECK_EXPR_FMT(paSegs[i].off + paSegs[i].cb <= pReq->u.In.cbImageBits,
1804 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx = %#lx (max=%#lx)\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb, (long)(paSegs[i].off + paSegs[i].cb), (long)pReq->u.In.cbImageBits));
1805 REQ_CHECK_EXPR_FMT(paSegs[i].fProt != 0,
1806 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx + cb %#lx\n", (long)i, (long)paSegs[i].off, (long)paSegs[i].cb));
1807 REQ_CHECK_EXPR_FMT(paSegs[i].fUnused == 0, ("SUP_IOCTL_LDR_LOAD: seg #%ld: fUnused=1\n", (long)i));
1808 REQ_CHECK_EXPR_FMT(offPrevEnd == paSegs[i].off,
1809 ("SUP_IOCTL_LDR_LOAD: seg #%ld: off %#lx offPrevEnd %#lx\n", (long)i, (long)paSegs[i].off, (long)offPrevEnd));
1810 offPrevEnd = paSegs[i].off + paSegs[i].cb;
1811 }
1812 REQ_CHECK_EXPR_FMT(offPrevEnd == pReq->u.In.cbImageBits,
1813 ("SUP_IOCTL_LDR_LOAD: offPrevEnd %#lx cbImageBits %#lx\n", (long)i, (long)offPrevEnd, (long)pReq->u.In.cbImageBits));
1814 }
1815 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fFlags & ~SUPLDRLOAD_F_VALID_MASK),
1816 ("SUP_IOCTL_LDR_LOAD: fFlags=%#x\n", (unsigned)pReq->u.In.fFlags));
1817
1818 /* execute */
1819 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1820 return 0;
1821 }
1822
1823 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1824 {
1825 /* validate */
1826 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1827 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1828
1829 /* execute */
1830 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1831 return 0;
1832 }
1833
1834 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1835 {
1836 /* validate */
1837 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1838
1839 /* execute */
1840 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1841 return 0;
1842 }
1843
1844 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1845 {
1846 /* validate */
1847 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1848 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1849 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1850
1851 /* execute */
1852 pReq->Hdr.rc = supdrvIOCtl_LdrQuerySymbol(pDevExt, pSession, pReq);
1853 return 0;
1854 }
1855
1856 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1857 {
1858 /* validate */
1859 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1860 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1861 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1862
1863 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1864 {
1865 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1866
1867 /* execute */
1868 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1869 {
1870 if (pReq->u.In.pVMR0 == NULL)
1871 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1872 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1873 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1874 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1875 pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1876 else
1877 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1878 }
1879 else
1880 pReq->Hdr.rc = VERR_WRONG_ORDER;
1881 }
1882 else
1883 {
1884 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1885 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1886 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1887 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1888 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1889
1890 /* execute */
1891 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1892 {
1893 if (pReq->u.In.pVMR0 == NULL)
1894 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu,
1895 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1896 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1897 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1898 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1899 else
1900 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1901 }
1902 else
1903 pReq->Hdr.rc = VERR_WRONG_ORDER;
1904 }
1905
1906 if ( RT_FAILURE(pReq->Hdr.rc)
1907 && pReq->Hdr.rc != VERR_INTERRUPTED
1908 && pReq->Hdr.rc != VERR_TIMEOUT)
1909 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1910 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1911 else
1912 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1913 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1914 return 0;
1915 }
1916
1917 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1918 {
1919 /* validate */
1920 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1921 PSUPVMMR0REQHDR pVMMReq;
1922 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1923 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1924
1925 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1926 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1927 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1928 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1929 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1930
1931 /* execute */
1932 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1933 {
1934 if (pReq->u.In.pVMR0 == NULL)
1935 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(NULL, NULL, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1936 else if (pReq->u.In.pVMR0 == pSession->pSessionVM)
1937 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pSession->pSessionGVM, pSession->pSessionVM, pReq->u.In.idCpu,
1938 pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1939 else
1940 pReq->Hdr.rc = VERR_INVALID_VM_HANDLE;
1941 }
1942 else
1943 pReq->Hdr.rc = VERR_WRONG_ORDER;
1944
1945 if ( RT_FAILURE(pReq->Hdr.rc)
1946 && pReq->Hdr.rc != VERR_INTERRUPTED
1947 && pReq->Hdr.rc != VERR_TIMEOUT)
1948 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1949 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1950 else
1951 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1952 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1953 return 0;
1954 }
1955
1956 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1957 {
1958 /* validate */
1959 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1960 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1961
1962 /* execute */
1963 pReq->Hdr.rc = VINF_SUCCESS;
1964 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1965 return 0;
1966 }
1967
1968 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1969 {
1970 /* validate */
1971 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1972 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1973 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1974
1975 /* execute */
1976 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1977 if (RT_FAILURE(pReq->Hdr.rc))
1978 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1979 return 0;
1980 }
1981
1982 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1983 {
1984 /* validate */
1985 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1986 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1987
1988 /* execute */
1989 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1990 return 0;
1991 }
1992
1993 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1994 {
1995 /* validate */
1996 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1997 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1998
1999 /* execute */
2000 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
2001 if (RT_SUCCESS(pReq->Hdr.rc))
2002 pReq->u.Out.pGipR0 = pDevExt->pGip;
2003 return 0;
2004 }
2005
2006 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
2007 {
2008 /* validate */
2009 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
2010 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
2011
2012 /* execute */
2013 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
2014 return 0;
2015 }
2016
2017 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
2018 {
2019 /* validate */
2020 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
2021 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
2022 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
2023 || ( VALID_PTR(pReq->u.In.pVMR0)
2024 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
2025 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
2026
2027 /* execute */
2028 RTSpinlockAcquire(pDevExt->Spinlock);
2029 if (pSession->pSessionVM == pReq->u.In.pVMR0)
2030 {
2031 if (pSession->pFastIoCtrlVM == NULL)
2032 {
2033 pSession->pFastIoCtrlVM = pSession->pSessionVM;
2034 RTSpinlockRelease(pDevExt->Spinlock);
2035 pReq->Hdr.rc = VINF_SUCCESS;
2036 }
2037 else
2038 {
2039 RTSpinlockRelease(pDevExt->Spinlock);
2040 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pFastIoCtrlVM=%p! (pVMR0=%p)\n",
2041 pSession->pFastIoCtrlVM, pReq->u.In.pVMR0));
2042 pReq->Hdr.rc = VERR_ALREADY_EXISTS;
2043 }
2044 }
2045 else
2046 {
2047 RTSpinlockRelease(pDevExt->Spinlock);
2048 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: pSession->pSessionVM=%p vs pVMR0=%p)\n",
2049 pSession->pSessionVM, pReq->u.In.pVMR0));
2050 pReq->Hdr.rc = pSession->pSessionVM ? VERR_ACCESS_DENIED : VERR_WRONG_ORDER;
2051 }
2052 return 0;
2053 }
2054
2055 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
2056 {
2057 /* validate */
2058 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
2059 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
2060 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
2061 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
2062 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
2063 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
2064 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
2065 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
2066 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
2067
2068 /* execute */
2069 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
2070 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
2071 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
2072 &pReq->u.Out.aPages[0]);
2073 if (RT_FAILURE(pReq->Hdr.rc))
2074 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2075 return 0;
2076 }
2077
2078 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
2079 {
2080 /* validate */
2081 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
2082 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
2083 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
2084 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
2085 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2086 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
2087
2088 /* execute */
2089 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
2090 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
2091 if (RT_FAILURE(pReq->Hdr.rc))
2092 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2093 return 0;
2094 }
2095
2096 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
2097 {
2098 /* validate */
2099 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
2100 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
2101 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
2102 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
2103 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
2104 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
2105 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
2106
2107 /* execute */
2108 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
2109 return 0;
2110 }
2111
2112 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
2113 {
2114 /* validate */
2115 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
2116 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
2117
2118 /* execute */
2119 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
2120 return 0;
2121 }
2122
2123 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
2124 {
2125 /* validate */
2126 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
2127 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
2128 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
2129
2130 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
2131 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
2132 else
2133 {
2134 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
2135 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
2136 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
2137 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
2138 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
2139 }
2140 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
2141
2142 /* execute */
2143 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2144 return 0;
2145 }
2146
2147 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2148 {
2149 /* validate */
2150 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2151 size_t cbStrTab;
2152 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2153 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2154 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2155 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2156 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2157 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2158 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2159 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2160 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2161 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2162 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2163
2164 /* execute */
2165 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pReq);
2166 return 0;
2167 }
2168
2169 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2170 {
2171 /* validate */
2172 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2173 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2174 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2175
2176 /* execute */
2177 switch (pReq->u.In.uType)
2178 {
2179 case SUP_SEM_TYPE_EVENT:
2180 {
2181 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2182 switch (pReq->u.In.uOp)
2183 {
2184 case SUPSEMOP2_WAIT_MS_REL:
2185 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2186 break;
2187 case SUPSEMOP2_WAIT_NS_ABS:
2188 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2189 break;
2190 case SUPSEMOP2_WAIT_NS_REL:
2191 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2192 break;
2193 case SUPSEMOP2_SIGNAL:
2194 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2195 break;
2196 case SUPSEMOP2_CLOSE:
2197 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2198 break;
2199 case SUPSEMOP2_RESET:
2200 default:
2201 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2202 break;
2203 }
2204 break;
2205 }
2206
2207 case SUP_SEM_TYPE_EVENT_MULTI:
2208 {
2209 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2210 switch (pReq->u.In.uOp)
2211 {
2212 case SUPSEMOP2_WAIT_MS_REL:
2213 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2214 break;
2215 case SUPSEMOP2_WAIT_NS_ABS:
2216 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2217 break;
2218 case SUPSEMOP2_WAIT_NS_REL:
2219 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2220 break;
2221 case SUPSEMOP2_SIGNAL:
2222 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2223 break;
2224 case SUPSEMOP2_CLOSE:
2225 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2226 break;
2227 case SUPSEMOP2_RESET:
2228 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2229 break;
2230 default:
2231 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2232 break;
2233 }
2234 break;
2235 }
2236
2237 default:
2238 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2239 break;
2240 }
2241 return 0;
2242 }
2243
2244 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2245 {
2246 /* validate */
2247 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2248 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2249 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2250
2251 /* execute */
2252 switch (pReq->u.In.uType)
2253 {
2254 case SUP_SEM_TYPE_EVENT:
2255 {
2256 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2257 switch (pReq->u.In.uOp)
2258 {
2259 case SUPSEMOP3_CREATE:
2260 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2261 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2262 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2263 break;
2264 case SUPSEMOP3_GET_RESOLUTION:
2265 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2266 pReq->Hdr.rc = VINF_SUCCESS;
2267 pReq->Hdr.cbOut = sizeof(*pReq);
2268 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2269 break;
2270 default:
2271 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2272 break;
2273 }
2274 break;
2275 }
2276
2277 case SUP_SEM_TYPE_EVENT_MULTI:
2278 {
2279 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2280 switch (pReq->u.In.uOp)
2281 {
2282 case SUPSEMOP3_CREATE:
2283 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2284 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2285 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2286 break;
2287 case SUPSEMOP3_GET_RESOLUTION:
2288 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2289 pReq->Hdr.rc = VINF_SUCCESS;
2290 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2291 break;
2292 default:
2293 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2294 break;
2295 }
2296 break;
2297 }
2298
2299 default:
2300 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2301 break;
2302 }
2303 return 0;
2304 }
2305
2306 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2307 {
2308 /* validate */
2309 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2310 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2311
2312 /* execute */
2313 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2314 if (RT_FAILURE(pReq->Hdr.rc))
2315 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2316 return 0;
2317 }
2318
2319 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2320 {
2321 /* validate */
2322 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2323 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2324
2325 /* execute */
2326 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2327 return 0;
2328 }
2329
2330 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2331 {
2332 /* validate */
2333 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2334
2335 /* execute */
2336 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2337 return 0;
2338 }
2339
2340 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2341 {
2342 /* validate */
2343 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2344 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2345
2346 /* execute */
2347 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2348 return 0;
2349 }
2350
2351 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2352 {
2353 /* validate */
2354 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2355 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2356 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2357 return VERR_INVALID_PARAMETER;
2358
2359 /* execute */
2360 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2361 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2362 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2363 pReq->u.In.szName, pReq->u.In.fFlags);
2364 return 0;
2365 }
2366
2367 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2368 {
2369 /* validate */
2370 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2371 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2372
2373 /* execute */
2374 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2375 return 0;
2376 }
2377
2378 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2379 {
2380 /* validate */
2381 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2382 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2383
2384 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2385 pReqHdr->rc = VINF_SUCCESS;
2386 return 0;
2387 }
2388
2389 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2390 {
2391 /* validate */
2392 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2393 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2394 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2395 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2396
2397 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2398 return 0;
2399 }
2400
2401 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2402 {
2403 /* validate */
2404 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2405
2406 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2407 return 0;
2408 }
2409
2410 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2411 {
2412 /* validate */
2413 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2414 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2415
2416 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2417 return 0;
2418 }
2419
2420 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2421 {
2422 /* validate */
2423 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2424 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2425
2426 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2427 return 0;
2428 }
2429
2430 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2431 {
2432 /* validate */
2433 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2434 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2435
2436 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2437 return 0;
2438 }
2439
2440 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_UCODE_REV):
2441 {
2442 /* validate */
2443 PSUPUCODEREV pReq = (PSUPUCODEREV)pReqHdr;
2444 REQ_CHECK_SIZES(SUP_IOCTL_UCODE_REV);
2445
2446 /* execute */
2447 pReq->Hdr.rc = SUPR0QueryUcodeRev(pSession, &pReq->u.Out.MicrocodeRev);
2448 if (RT_FAILURE(pReq->Hdr.rc))
2449 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2450 return 0;
2451 }
2452
2453 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_HWVIRT_MSRS):
2454 {
2455 /* validate */
2456 PSUPGETHWVIRTMSRS pReq = (PSUPGETHWVIRTMSRS)pReqHdr;
2457 REQ_CHECK_SIZES(SUP_IOCTL_GET_HWVIRT_MSRS);
2458 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1 && !pReq->u.In.fReserved2,
2459 ("SUP_IOCTL_GET_HWVIRT_MSRS: fReserved0=%d fReserved1=%d fReserved2=%d\n", pReq->u.In.fReserved0,
2460 pReq->u.In.fReserved1, pReq->u.In.fReserved2));
2461
2462 /* execute */
2463 pReq->Hdr.rc = SUPR0GetHwvirtMsrs(&pReq->u.Out.HwvirtMsrs, 0 /* fCaps */, pReq->u.In.fForce);
2464 if (RT_FAILURE(pReq->Hdr.rc))
2465 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2466 return 0;
2467 }
2468
2469 default:
2470 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2471 break;
2472 }
2473 return VERR_GENERAL_FAILURE;
2474}
2475
2476
2477/**
2478 * I/O Control inner worker for the restricted operations.
2479 *
2480 * @returns IPRT status code.
2481 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2482 *
2483 * @param uIOCtl Function number.
2484 * @param pDevExt Device extention.
2485 * @param pSession Session data.
2486 * @param pReqHdr The request header.
2487 */
2488static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2489{
2490 /*
2491 * The switch.
2492 */
2493 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2494 {
2495 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2496 {
2497 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2498 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2499 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2500 {
2501 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2502 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2503 return 0;
2504 }
2505
2506 /*
2507 * Match the version.
2508 * The current logic is very simple, match the major interface version.
2509 */
2510 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2511 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2512 {
2513 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2514 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2515 pReq->u.Out.u32Cookie = 0xffffffff;
2516 pReq->u.Out.u32SessionCookie = 0xffffffff;
2517 pReq->u.Out.u32SessionVersion = 0xffffffff;
2518 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2519 pReq->u.Out.pSession = NULL;
2520 pReq->u.Out.cFunctions = 0;
2521 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2522 return 0;
2523 }
2524
2525 /*
2526 * Fill in return data and be gone.
2527 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2528 * u32SessionVersion <= u32ReqVersion!
2529 */
2530 /** @todo Somehow validate the client and negotiate a secure cookie... */
2531 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2532 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2533 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2534 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2535 pReq->u.Out.pSession = pSession;
2536 pReq->u.Out.cFunctions = 0;
2537 pReq->Hdr.rc = VINF_SUCCESS;
2538 return 0;
2539 }
2540
2541 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2542 {
2543 /* validate */
2544 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2545 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2546
2547 /* execute */
2548 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.fCaps);
2549 if (RT_FAILURE(pReq->Hdr.rc))
2550 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2551 return 0;
2552 }
2553
2554 default:
2555 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2556 break;
2557 }
2558 return VERR_GENERAL_FAILURE;
2559}
2560
2561
2562/**
2563 * I/O Control worker.
2564 *
2565 * @returns IPRT status code.
2566 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2567 *
2568 * @param uIOCtl Function number.
2569 * @param pDevExt Device extention.
2570 * @param pSession Session data.
2571 * @param pReqHdr The request header.
2572 * @param cbReq The size of the request buffer.
2573 */
2574int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2575{
2576 int rc;
2577 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2578
2579 /*
2580 * Validate the request.
2581 */
2582 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2583 {
2584 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2585 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2586 return VERR_INVALID_PARAMETER;
2587 }
2588 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2589 || pReqHdr->cbIn < sizeof(*pReqHdr)
2590 || pReqHdr->cbIn > cbReq
2591 || pReqHdr->cbOut < sizeof(*pReqHdr)
2592 || pReqHdr->cbOut > cbReq))
2593 {
2594 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2595 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2596 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2597 return VERR_INVALID_PARAMETER;
2598 }
2599 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2600 {
2601 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2602 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2603 return VERR_INVALID_PARAMETER;
2604 }
2605 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2606 {
2607 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2608 {
2609 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2610 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2611 return VERR_INVALID_PARAMETER;
2612 }
2613 }
2614 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2615 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2616 {
2617 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2618 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2619 return VERR_INVALID_PARAMETER;
2620 }
2621
2622 /*
2623 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2624 */
2625 if (pSession->fUnrestricted)
2626 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2627 else
2628 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2629
2630 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2631 return rc;
2632}
2633
2634
2635/**
2636 * Inter-Driver Communication (IDC) worker.
2637 *
2638 * @returns VBox status code.
2639 * @retval VINF_SUCCESS on success.
2640 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2641 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2642 *
2643 * @param uReq The request (function) code.
2644 * @param pDevExt Device extention.
2645 * @param pSession Session data.
2646 * @param pReqHdr The request header.
2647 */
2648int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2649{
2650 /*
2651 * The OS specific code has already validated the pSession
2652 * pointer, and the request size being greater or equal to
2653 * size of the header.
2654 *
2655 * So, just check that pSession is a kernel context session.
2656 */
2657 if (RT_UNLIKELY( pSession
2658 && pSession->R0Process != NIL_RTR0PROCESS))
2659 return VERR_INVALID_PARAMETER;
2660
2661/*
2662 * Validation macro.
2663 */
2664#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2665 do { \
2666 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2667 { \
2668 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2669 (long)pReqHdr->cb, (long)(cbExpect))); \
2670 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2671 } \
2672 } while (0)
2673
2674 switch (uReq)
2675 {
2676 case SUPDRV_IDC_REQ_CONNECT:
2677 {
2678 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2679 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2680
2681 /*
2682 * Validate the cookie and other input.
2683 */
2684 if (pReq->Hdr.pSession != NULL)
2685 {
2686 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2687 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2688 }
2689 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2690 {
2691 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2692 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2693 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2694 }
2695 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2696 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2697 {
2698 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2699 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2700 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2701 }
2702 if (pSession != NULL)
2703 {
2704 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2705 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2706 }
2707
2708 /*
2709 * Match the version.
2710 * The current logic is very simple, match the major interface version.
2711 */
2712 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2713 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2714 {
2715 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2716 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2717 pReq->u.Out.pSession = NULL;
2718 pReq->u.Out.uSessionVersion = 0xffffffff;
2719 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2720 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2721 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2722 return VINF_SUCCESS;
2723 }
2724
2725 pReq->u.Out.pSession = NULL;
2726 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2727 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2728 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2729
2730 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2731 if (RT_FAILURE(pReq->Hdr.rc))
2732 {
2733 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2734 return VINF_SUCCESS;
2735 }
2736
2737 pReq->u.Out.pSession = pSession;
2738 pReq->Hdr.pSession = pSession;
2739
2740 return VINF_SUCCESS;
2741 }
2742
2743 case SUPDRV_IDC_REQ_DISCONNECT:
2744 {
2745 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2746
2747 supdrvSessionRelease(pSession);
2748 return pReqHdr->rc = VINF_SUCCESS;
2749 }
2750
2751 case SUPDRV_IDC_REQ_GET_SYMBOL:
2752 {
2753 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2754 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2755
2756 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2757 return VINF_SUCCESS;
2758 }
2759
2760 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2761 {
2762 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2763 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2764
2765 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2766 return VINF_SUCCESS;
2767 }
2768
2769 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2770 {
2771 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2772 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2773
2774 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2775 return VINF_SUCCESS;
2776 }
2777
2778 default:
2779 Log(("Unknown IDC %#lx\n", (long)uReq));
2780 break;
2781 }
2782
2783#undef REQ_CHECK_IDC_SIZE
2784 return VERR_NOT_SUPPORTED;
2785}
2786
2787
2788/**
2789 * Register a object for reference counting.
2790 * The object is registered with one reference in the specified session.
2791 *
2792 * @returns Unique identifier on success (pointer).
2793 * All future reference must use this identifier.
2794 * @returns NULL on failure.
2795 * @param pSession The caller's session.
2796 * @param enmType The object type.
2797 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2798 * @param pvUser1 The first user argument.
2799 * @param pvUser2 The second user argument.
2800 */
2801SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2802{
2803 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2804 PSUPDRVOBJ pObj;
2805 PSUPDRVUSAGE pUsage;
2806
2807 /*
2808 * Validate the input.
2809 */
2810 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2811 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2812 AssertPtrReturn(pfnDestructor, NULL);
2813
2814 /*
2815 * Allocate and initialize the object.
2816 */
2817 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2818 if (!pObj)
2819 return NULL;
2820 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2821 pObj->enmType = enmType;
2822 pObj->pNext = NULL;
2823 pObj->cUsage = 1;
2824 pObj->pfnDestructor = pfnDestructor;
2825 pObj->pvUser1 = pvUser1;
2826 pObj->pvUser2 = pvUser2;
2827 pObj->CreatorUid = pSession->Uid;
2828 pObj->CreatorGid = pSession->Gid;
2829 pObj->CreatorProcess= pSession->Process;
2830 supdrvOSObjInitCreator(pObj, pSession);
2831
2832 /*
2833 * Allocate the usage record.
2834 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2835 */
2836 RTSpinlockAcquire(pDevExt->Spinlock);
2837
2838 pUsage = pDevExt->pUsageFree;
2839 if (pUsage)
2840 pDevExt->pUsageFree = pUsage->pNext;
2841 else
2842 {
2843 RTSpinlockRelease(pDevExt->Spinlock);
2844 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2845 if (!pUsage)
2846 {
2847 RTMemFree(pObj);
2848 return NULL;
2849 }
2850 RTSpinlockAcquire(pDevExt->Spinlock);
2851 }
2852
2853 /*
2854 * Insert the object and create the session usage record.
2855 */
2856 /* The object. */
2857 pObj->pNext = pDevExt->pObjs;
2858 pDevExt->pObjs = pObj;
2859
2860 /* The session record. */
2861 pUsage->cUsage = 1;
2862 pUsage->pObj = pObj;
2863 pUsage->pNext = pSession->pUsage;
2864 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2865 pSession->pUsage = pUsage;
2866
2867 RTSpinlockRelease(pDevExt->Spinlock);
2868
2869 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2870 return pObj;
2871}
2872
2873
2874/**
2875 * Increment the reference counter for the object associating the reference
2876 * with the specified session.
2877 *
2878 * @returns IPRT status code.
2879 * @param pvObj The identifier returned by SUPR0ObjRegister().
2880 * @param pSession The session which is referencing the object.
2881 *
2882 * @remarks The caller should not own any spinlocks and must carefully protect
2883 * itself against potential race with the destructor so freed memory
2884 * isn't accessed here.
2885 */
2886SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2887{
2888 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2889}
2890
2891
2892/**
2893 * Increment the reference counter for the object associating the reference
2894 * with the specified session.
2895 *
2896 * @returns IPRT status code.
2897 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2898 * couldn't be allocated. (If you see this you're not doing the right
2899 * thing and it won't ever work reliably.)
2900 *
2901 * @param pvObj The identifier returned by SUPR0ObjRegister().
2902 * @param pSession The session which is referencing the object.
2903 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2904 * first reference to an object in a session with this
2905 * argument set.
2906 *
2907 * @remarks The caller should not own any spinlocks and must carefully protect
2908 * itself against potential race with the destructor so freed memory
2909 * isn't accessed here.
2910 */
2911SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2912{
2913 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2914 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2915 int rc = VINF_SUCCESS;
2916 PSUPDRVUSAGE pUsagePre;
2917 PSUPDRVUSAGE pUsage;
2918
2919 /*
2920 * Validate the input.
2921 * Be ready for the destruction race (someone might be stuck in the
2922 * destructor waiting a lock we own).
2923 */
2924 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2925 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2926 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2927 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2928 VERR_INVALID_PARAMETER);
2929
2930 RTSpinlockAcquire(pDevExt->Spinlock);
2931
2932 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2933 {
2934 RTSpinlockRelease(pDevExt->Spinlock);
2935
2936 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2937 return VERR_WRONG_ORDER;
2938 }
2939
2940 /*
2941 * Preallocate the usage record if we can.
2942 */
2943 pUsagePre = pDevExt->pUsageFree;
2944 if (pUsagePre)
2945 pDevExt->pUsageFree = pUsagePre->pNext;
2946 else if (!fNoBlocking)
2947 {
2948 RTSpinlockRelease(pDevExt->Spinlock);
2949 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2950 if (!pUsagePre)
2951 return VERR_NO_MEMORY;
2952
2953 RTSpinlockAcquire(pDevExt->Spinlock);
2954 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2955 {
2956 RTSpinlockRelease(pDevExt->Spinlock);
2957
2958 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2959 return VERR_WRONG_ORDER;
2960 }
2961 }
2962
2963 /*
2964 * Reference the object.
2965 */
2966 pObj->cUsage++;
2967
2968 /*
2969 * Look for the session record.
2970 */
2971 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2972 {
2973 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2974 if (pUsage->pObj == pObj)
2975 break;
2976 }
2977 if (pUsage)
2978 pUsage->cUsage++;
2979 else if (pUsagePre)
2980 {
2981 /* create a new session record. */
2982 pUsagePre->cUsage = 1;
2983 pUsagePre->pObj = pObj;
2984 pUsagePre->pNext = pSession->pUsage;
2985 pSession->pUsage = pUsagePre;
2986 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2987
2988 pUsagePre = NULL;
2989 }
2990 else
2991 {
2992 pObj->cUsage--;
2993 rc = VERR_TRY_AGAIN;
2994 }
2995
2996 /*
2997 * Put any unused usage record into the free list..
2998 */
2999 if (pUsagePre)
3000 {
3001 pUsagePre->pNext = pDevExt->pUsageFree;
3002 pDevExt->pUsageFree = pUsagePre;
3003 }
3004
3005 RTSpinlockRelease(pDevExt->Spinlock);
3006
3007 return rc;
3008}
3009
3010
3011/**
3012 * Decrement / destroy a reference counter record for an object.
3013 *
3014 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
3015 *
3016 * @returns IPRT status code.
3017 * @retval VINF_SUCCESS if not destroyed.
3018 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
3019 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
3020 * string builds.
3021 *
3022 * @param pvObj The identifier returned by SUPR0ObjRegister().
3023 * @param pSession The session which is referencing the object.
3024 */
3025SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
3026{
3027 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3028 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3029 int rc = VERR_INVALID_PARAMETER;
3030 PSUPDRVUSAGE pUsage;
3031 PSUPDRVUSAGE pUsagePrev;
3032
3033 /*
3034 * Validate the input.
3035 */
3036 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3037 AssertMsgReturn(VALID_PTR(pObj)&& pObj->u32Magic == SUPDRVOBJ_MAGIC,
3038 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3039 VERR_INVALID_PARAMETER);
3040
3041 /*
3042 * Acquire the spinlock and look for the usage record.
3043 */
3044 RTSpinlockAcquire(pDevExt->Spinlock);
3045
3046 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
3047 pUsage;
3048 pUsagePrev = pUsage, pUsage = pUsage->pNext)
3049 {
3050 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
3051 if (pUsage->pObj == pObj)
3052 {
3053 rc = VINF_SUCCESS;
3054 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
3055 if (pUsage->cUsage > 1)
3056 {
3057 pObj->cUsage--;
3058 pUsage->cUsage--;
3059 }
3060 else
3061 {
3062 /*
3063 * Free the session record.
3064 */
3065 if (pUsagePrev)
3066 pUsagePrev->pNext = pUsage->pNext;
3067 else
3068 pSession->pUsage = pUsage->pNext;
3069 pUsage->pNext = pDevExt->pUsageFree;
3070 pDevExt->pUsageFree = pUsage;
3071
3072 /* What about the object? */
3073 if (pObj->cUsage > 1)
3074 pObj->cUsage--;
3075 else
3076 {
3077 /*
3078 * Object is to be destroyed, unlink it.
3079 */
3080 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
3081 rc = VINF_OBJECT_DESTROYED;
3082 if (pDevExt->pObjs == pObj)
3083 pDevExt->pObjs = pObj->pNext;
3084 else
3085 {
3086 PSUPDRVOBJ pObjPrev;
3087 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
3088 if (pObjPrev->pNext == pObj)
3089 {
3090 pObjPrev->pNext = pObj->pNext;
3091 break;
3092 }
3093 Assert(pObjPrev);
3094 }
3095 }
3096 }
3097 break;
3098 }
3099 }
3100
3101 RTSpinlockRelease(pDevExt->Spinlock);
3102
3103 /*
3104 * Call the destructor and free the object if required.
3105 */
3106 if (rc == VINF_OBJECT_DESTROYED)
3107 {
3108 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
3109 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
3110 if (pObj->pfnDestructor)
3111 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
3112 RTMemFree(pObj);
3113 }
3114
3115 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
3116 return rc;
3117}
3118
3119
3120/**
3121 * Verifies that the current process can access the specified object.
3122 *
3123 * @returns The following IPRT status code:
3124 * @retval VINF_SUCCESS if access was granted.
3125 * @retval VERR_PERMISSION_DENIED if denied access.
3126 * @retval VERR_INVALID_PARAMETER if invalid parameter.
3127 *
3128 * @param pvObj The identifier returned by SUPR0ObjRegister().
3129 * @param pSession The session which wishes to access the object.
3130 * @param pszObjName Object string name. This is optional and depends on the object type.
3131 *
3132 * @remark The caller is responsible for making sure the object isn't removed while
3133 * we're inside this function. If uncertain about this, just call AddRef before calling us.
3134 */
3135SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
3136{
3137 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
3138 int rc;
3139
3140 /*
3141 * Validate the input.
3142 */
3143 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3144 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
3145 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
3146 VERR_INVALID_PARAMETER);
3147
3148 /*
3149 * Check access. (returns true if a decision has been made.)
3150 */
3151 rc = VERR_INTERNAL_ERROR;
3152 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
3153 return rc;
3154
3155 /*
3156 * Default policy is to allow the user to access his own
3157 * stuff but nothing else.
3158 */
3159 if (pObj->CreatorUid == pSession->Uid)
3160 return VINF_SUCCESS;
3161 return VERR_PERMISSION_DENIED;
3162}
3163
3164
3165/**
3166 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionVM member.
3167 *
3168 * @returns The associated VM pointer.
3169 * @param pSession The session of the current thread.
3170 */
3171SUPR0DECL(PVM) SUPR0GetSessionVM(PSUPDRVSESSION pSession)
3172{
3173 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3174 return pSession->pSessionVM;
3175}
3176
3177
3178/**
3179 * API for the VMMR0 module to get the SUPDRVSESSION::pSessionGVM member.
3180 *
3181 * @returns The associated GVM pointer.
3182 * @param pSession The session of the current thread.
3183 */
3184SUPR0DECL(PGVM) SUPR0GetSessionGVM(PSUPDRVSESSION pSession)
3185{
3186 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
3187 return pSession->pSessionGVM;
3188}
3189
3190
3191/**
3192 * API for the VMMR0 module to work the SUPDRVSESSION::pSessionVM member.
3193 *
3194 * This will fail if there is already a VM associated with the session and pVM
3195 * isn't NULL.
3196 *
3197 * @retval VINF_SUCCESS
3198 * @retval VERR_ALREADY_EXISTS if there already is a VM associated with the
3199 * session.
3200 * @retval VERR_INVALID_PARAMETER if only one of the parameters are NULL or if
3201 * the session is invalid.
3202 *
3203 * @param pSession The session of the current thread.
3204 * @param pGVM The GVM to associate with the session. Pass NULL to
3205 * dissassociate.
3206 * @param pVM The VM to associate with the session. Pass NULL to
3207 * dissassociate.
3208 */
3209SUPR0DECL(int) SUPR0SetSessionVM(PSUPDRVSESSION pSession, PGVM pGVM, PVM pVM)
3210{
3211 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3212 AssertReturn((pGVM != NULL) == (pVM != NULL), VERR_INVALID_PARAMETER);
3213
3214 RTSpinlockAcquire(pSession->pDevExt->Spinlock);
3215 if (pGVM)
3216 {
3217 if (!pSession->pSessionGVM)
3218 {
3219 pSession->pSessionGVM = pGVM;
3220 pSession->pSessionVM = pVM;
3221 pSession->pFastIoCtrlVM = NULL;
3222 }
3223 else
3224 {
3225 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3226 SUPR0Printf("SUPR0SetSessionVM: Unable to associated GVM/VM %p/%p with session %p as it has %p/%p already!\n",
3227 pGVM, pVM, pSession, pSession->pSessionGVM, pSession->pSessionVM);
3228 return VERR_ALREADY_EXISTS;
3229 }
3230 }
3231 else
3232 {
3233 pSession->pSessionGVM = NULL;
3234 pSession->pSessionVM = NULL;
3235 pSession->pFastIoCtrlVM = NULL;
3236 }
3237 RTSpinlockRelease(pSession->pDevExt->Spinlock);
3238 return VINF_SUCCESS;
3239}
3240
3241
3242/** @copydoc RTLogGetDefaultInstanceEx
3243 * @remarks To allow overriding RTLogGetDefaultInstanceEx locally. */
3244SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogInstanceEx(uint32_t fFlagsAndGroup)
3245{
3246 return RTLogGetDefaultInstanceEx(fFlagsAndGroup);
3247}
3248
3249
3250/** @copydoc RTLogRelGetDefaultInstanceEx
3251 * @remarks To allow overriding RTLogRelGetDefaultInstanceEx locally. */
3252SUPR0DECL(struct RTLOGGER *) SUPR0GetDefaultLogRelInstanceEx(uint32_t fFlagsAndGroup)
3253{
3254 return RTLogRelGetDefaultInstanceEx(fFlagsAndGroup);
3255}
3256
3257
3258/**
3259 * Lock pages.
3260 *
3261 * @returns IPRT status code.
3262 * @param pSession Session to which the locked memory should be associated.
3263 * @param pvR3 Start of the memory range to lock.
3264 * This must be page aligned.
3265 * @param cPages Number of pages to lock.
3266 * @param paPages Where to put the physical addresses of locked memory.
3267 */
3268SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3269{
3270 int rc;
3271 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3272 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3273 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3274
3275 /*
3276 * Verify input.
3277 */
3278 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3279 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3280 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3281 || !pvR3)
3282 {
3283 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3284 return VERR_INVALID_PARAMETER;
3285 }
3286
3287 /*
3288 * Let IPRT do the job.
3289 */
3290 Mem.eType = MEMREF_TYPE_LOCKED;
3291 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3292 if (RT_SUCCESS(rc))
3293 {
3294 uint32_t iPage = cPages;
3295 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3296 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3297
3298 while (iPage-- > 0)
3299 {
3300 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3301 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3302 {
3303 AssertMsgFailed(("iPage=%d\n", iPage));
3304 rc = VERR_INTERNAL_ERROR;
3305 break;
3306 }
3307 }
3308 if (RT_SUCCESS(rc))
3309 rc = supdrvMemAdd(&Mem, pSession);
3310 if (RT_FAILURE(rc))
3311 {
3312 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3313 AssertRC(rc2);
3314 }
3315 }
3316
3317 return rc;
3318}
3319
3320
3321/**
3322 * Unlocks the memory pointed to by pv.
3323 *
3324 * @returns IPRT status code.
3325 * @param pSession Session to which the memory was locked.
3326 * @param pvR3 Memory to unlock.
3327 */
3328SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3329{
3330 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3331 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3332 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3333}
3334
3335
3336/**
3337 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3338 * backing.
3339 *
3340 * @returns IPRT status code.
3341 * @param pSession Session data.
3342 * @param cPages Number of pages to allocate.
3343 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3344 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3345 * @param pHCPhys Where to put the physical address of allocated memory.
3346 */
3347SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3348{
3349 int rc;
3350 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3351 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3352
3353 /*
3354 * Validate input.
3355 */
3356 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3357 if (!ppvR3 || !ppvR0 || !pHCPhys)
3358 {
3359 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3360 pSession, ppvR0, ppvR3, pHCPhys));
3361 return VERR_INVALID_PARAMETER;
3362
3363 }
3364 if (cPages < 1 || cPages >= 256)
3365 {
3366 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3367 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3368 }
3369
3370 /*
3371 * Let IPRT do the job.
3372 */
3373 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3374 if (RT_SUCCESS(rc))
3375 {
3376 int rc2;
3377 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3378 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3379 if (RT_SUCCESS(rc))
3380 {
3381 Mem.eType = MEMREF_TYPE_CONT;
3382 rc = supdrvMemAdd(&Mem, pSession);
3383 if (!rc)
3384 {
3385 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3386 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3387 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3388 return 0;
3389 }
3390
3391 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3392 AssertRC(rc2);
3393 }
3394 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3395 AssertRC(rc2);
3396 }
3397
3398 return rc;
3399}
3400
3401
3402/**
3403 * Frees memory allocated using SUPR0ContAlloc().
3404 *
3405 * @returns IPRT status code.
3406 * @param pSession The session to which the memory was allocated.
3407 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3408 */
3409SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3410{
3411 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3412 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3413 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3414}
3415
3416
3417/**
3418 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3419 *
3420 * The memory isn't zeroed.
3421 *
3422 * @returns IPRT status code.
3423 * @param pSession Session data.
3424 * @param cPages Number of pages to allocate.
3425 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3426 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3427 * @param paPages Where to put the physical addresses of allocated memory.
3428 */
3429SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3430{
3431 unsigned iPage;
3432 int rc;
3433 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3434 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3435
3436 /*
3437 * Validate input.
3438 */
3439 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3440 if (!ppvR3 || !ppvR0 || !paPages)
3441 {
3442 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3443 pSession, ppvR3, ppvR0, paPages));
3444 return VERR_INVALID_PARAMETER;
3445
3446 }
3447 if (cPages < 1 || cPages >= 256)
3448 {
3449 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3450 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3451 }
3452
3453 /*
3454 * Let IPRT do the work.
3455 */
3456 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3457 if (RT_SUCCESS(rc))
3458 {
3459 int rc2;
3460 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3461 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3462 if (RT_SUCCESS(rc))
3463 {
3464 Mem.eType = MEMREF_TYPE_LOW;
3465 rc = supdrvMemAdd(&Mem, pSession);
3466 if (!rc)
3467 {
3468 for (iPage = 0; iPage < cPages; iPage++)
3469 {
3470 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3471 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3472 }
3473 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3474 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3475 return 0;
3476 }
3477
3478 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3479 AssertRC(rc2);
3480 }
3481
3482 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3483 AssertRC(rc2);
3484 }
3485
3486 return rc;
3487}
3488
3489
3490/**
3491 * Frees memory allocated using SUPR0LowAlloc().
3492 *
3493 * @returns IPRT status code.
3494 * @param pSession The session to which the memory was allocated.
3495 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3496 */
3497SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3498{
3499 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3500 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3501 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3502}
3503
3504
3505
3506/**
3507 * Allocates a chunk of memory with both R0 and R3 mappings.
3508 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3509 *
3510 * @returns IPRT status code.
3511 * @param pSession The session to associated the allocation with.
3512 * @param cb Number of bytes to allocate.
3513 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3514 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3515 */
3516SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3517{
3518 int rc;
3519 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3520 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3521
3522 /*
3523 * Validate input.
3524 */
3525 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3526 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3527 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3528 if (cb < 1 || cb >= _4M)
3529 {
3530 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3531 return VERR_INVALID_PARAMETER;
3532 }
3533
3534 /*
3535 * Let IPRT do the work.
3536 */
3537 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3538 if (RT_SUCCESS(rc))
3539 {
3540 int rc2;
3541 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3542 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3543 if (RT_SUCCESS(rc))
3544 {
3545 Mem.eType = MEMREF_TYPE_MEM;
3546 rc = supdrvMemAdd(&Mem, pSession);
3547 if (!rc)
3548 {
3549 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3550 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3551 return VINF_SUCCESS;
3552 }
3553
3554 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3555 AssertRC(rc2);
3556 }
3557
3558 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3559 AssertRC(rc2);
3560 }
3561
3562 return rc;
3563}
3564
3565
3566/**
3567 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3568 *
3569 * @returns IPRT status code.
3570 * @param pSession The session to which the memory was allocated.
3571 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3572 * @param paPages Where to store the physical addresses.
3573 */
3574SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3575{
3576 PSUPDRVBUNDLE pBundle;
3577 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3578
3579 /*
3580 * Validate input.
3581 */
3582 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3583 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3584 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3585
3586 /*
3587 * Search for the address.
3588 */
3589 RTSpinlockAcquire(pSession->Spinlock);
3590 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3591 {
3592 if (pBundle->cUsed > 0)
3593 {
3594 unsigned i;
3595 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3596 {
3597 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3598 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3599 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3600 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3601 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3602 )
3603 )
3604 {
3605 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3606 size_t iPage;
3607 for (iPage = 0; iPage < cPages; iPage++)
3608 {
3609 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3610 paPages[iPage].uReserved = 0;
3611 }
3612 RTSpinlockRelease(pSession->Spinlock);
3613 return VINF_SUCCESS;
3614 }
3615 }
3616 }
3617 }
3618 RTSpinlockRelease(pSession->Spinlock);
3619 Log(("Failed to find %p!!!\n", (void *)uPtr));
3620 return VERR_INVALID_PARAMETER;
3621}
3622
3623
3624/**
3625 * Free memory allocated by SUPR0MemAlloc().
3626 *
3627 * @returns IPRT status code.
3628 * @param pSession The session owning the allocation.
3629 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3630 */
3631SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3632{
3633 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3634 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3635 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3636}
3637
3638
3639/**
3640 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3641 *
3642 * The memory is fixed and it's possible to query the physical addresses using
3643 * SUPR0MemGetPhys().
3644 *
3645 * @returns IPRT status code.
3646 * @param pSession The session to associated the allocation with.
3647 * @param cPages The number of pages to allocate.
3648 * @param fFlags Flags, reserved for the future. Must be zero.
3649 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3650 * NULL if no ring-3 mapping.
3651 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3652 * NULL if no ring-0 mapping.
3653 * @param paPages Where to store the addresses of the pages. Optional.
3654 */
3655SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3656{
3657 int rc;
3658 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3659 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3660
3661 /*
3662 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3663 */
3664 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3665 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3666 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3667 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3668 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3669 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3670 {
3671 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3672 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3673 }
3674
3675 /*
3676 * Let IPRT do the work.
3677 */
3678 if (ppvR0)
3679 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3680 else
3681 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3682 if (RT_SUCCESS(rc))
3683 {
3684 int rc2;
3685 if (ppvR3)
3686 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_WRITE | RTMEM_PROT_READ, NIL_RTR0PROCESS);
3687 else
3688 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3689 if (RT_SUCCESS(rc))
3690 {
3691 Mem.eType = MEMREF_TYPE_PAGE;
3692 rc = supdrvMemAdd(&Mem, pSession);
3693 if (!rc)
3694 {
3695 if (ppvR3)
3696 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3697 if (ppvR0)
3698 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3699 if (paPages)
3700 {
3701 uint32_t iPage = cPages;
3702 while (iPage-- > 0)
3703 {
3704 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3705 Assert(paPages[iPage] != NIL_RTHCPHYS);
3706 }
3707 }
3708 return VINF_SUCCESS;
3709 }
3710
3711 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3712 AssertRC(rc2);
3713 }
3714
3715 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3716 AssertRC(rc2);
3717 }
3718 return rc;
3719}
3720
3721
3722/**
3723 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3724 * space.
3725 *
3726 * @returns IPRT status code.
3727 * @param pSession The session to associated the allocation with.
3728 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3729 * @param offSub Where to start mapping. Must be page aligned.
3730 * @param cbSub How much to map. Must be page aligned.
3731 * @param fFlags Flags, MBZ.
3732 * @param ppvR0 Where to return the address of the ring-0 mapping on
3733 * success.
3734 */
3735SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3736 uint32_t fFlags, PRTR0PTR ppvR0)
3737{
3738 int rc;
3739 PSUPDRVBUNDLE pBundle;
3740 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3741 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3742
3743 /*
3744 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3745 */
3746 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3747 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3748 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3749 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3750 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3751 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3752
3753 /*
3754 * Find the memory object.
3755 */
3756 RTSpinlockAcquire(pSession->Spinlock);
3757 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3758 {
3759 if (pBundle->cUsed > 0)
3760 {
3761 unsigned i;
3762 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3763 {
3764 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3765 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3766 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3767 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3768 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3769 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3770 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3771 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3772 {
3773 hMemObj = pBundle->aMem[i].MemObj;
3774 break;
3775 }
3776 }
3777 }
3778 }
3779 RTSpinlockRelease(pSession->Spinlock);
3780
3781 rc = VERR_INVALID_PARAMETER;
3782 if (hMemObj != NIL_RTR0MEMOBJ)
3783 {
3784 /*
3785 * Do some further input validations before calling IPRT.
3786 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3787 */
3788 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3789 if ( offSub < cbMemObj
3790 && cbSub <= cbMemObj
3791 && offSub + cbSub <= cbMemObj)
3792 {
3793 RTR0MEMOBJ hMapObj;
3794 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3795 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3796 if (RT_SUCCESS(rc))
3797 *ppvR0 = RTR0MemObjAddress(hMapObj);
3798 }
3799 else
3800 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3801
3802 }
3803 return rc;
3804}
3805
3806
3807/**
3808 * Changes the page level protection of one or more pages previously allocated
3809 * by SUPR0PageAllocEx.
3810 *
3811 * @returns IPRT status code.
3812 * @param pSession The session to associated the allocation with.
3813 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3814 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3815 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3816 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3817 * @param offSub Where to start changing. Must be page aligned.
3818 * @param cbSub How much to change. Must be page aligned.
3819 * @param fProt The new page level protection, see RTMEM_PROT_*.
3820 */
3821SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3822{
3823 int rc;
3824 PSUPDRVBUNDLE pBundle;
3825 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3826 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3827 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3828
3829 /*
3830 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3831 */
3832 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3833 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3834 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3835 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3836 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3837
3838 /*
3839 * Find the memory object.
3840 */
3841 RTSpinlockAcquire(pSession->Spinlock);
3842 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3843 {
3844 if (pBundle->cUsed > 0)
3845 {
3846 unsigned i;
3847 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3848 {
3849 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3850 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3851 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3852 || pvR3 == NIL_RTR3PTR)
3853 && ( pvR0 == NIL_RTR0PTR
3854 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3855 && ( pvR3 == NIL_RTR3PTR
3856 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3857 {
3858 if (pvR0 != NIL_RTR0PTR)
3859 hMemObjR0 = pBundle->aMem[i].MemObj;
3860 if (pvR3 != NIL_RTR3PTR)
3861 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3862 break;
3863 }
3864 }
3865 }
3866 }
3867 RTSpinlockRelease(pSession->Spinlock);
3868
3869 rc = VERR_INVALID_PARAMETER;
3870 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3871 || hMemObjR3 != NIL_RTR0MEMOBJ)
3872 {
3873 /*
3874 * Do some further input validations before calling IPRT.
3875 */
3876 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3877 if ( offSub < cbMemObj
3878 && cbSub <= cbMemObj
3879 && offSub + cbSub <= cbMemObj)
3880 {
3881 rc = VINF_SUCCESS;
3882 if (hMemObjR3 != NIL_RTR0PTR)
3883 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3884 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3885 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3886 }
3887 else
3888 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3889
3890 }
3891 return rc;
3892
3893}
3894
3895
3896/**
3897 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3898 *
3899 * @returns IPRT status code.
3900 * @param pSession The session owning the allocation.
3901 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3902 * SUPR0PageAllocEx().
3903 */
3904SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3905{
3906 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3907 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3908 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3909}
3910
3911
3912/**
3913 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3914 *
3915 * @param pDevExt The device extension.
3916 * @param pszFile The source file where the caller detected the bad
3917 * context.
3918 * @param uLine The line number in @a pszFile.
3919 * @param pszExtra Optional additional message to give further hints.
3920 */
3921void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3922{
3923 uint32_t cCalls;
3924
3925 /*
3926 * Shorten the filename before displaying the message.
3927 */
3928 for (;;)
3929 {
3930 const char *pszTmp = strchr(pszFile, '/');
3931 if (!pszTmp)
3932 pszTmp = strchr(pszFile, '\\');
3933 if (!pszTmp)
3934 break;
3935 pszFile = pszTmp + 1;
3936 }
3937 if (RT_VALID_PTR(pszExtra) && *pszExtra)
3938 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
3939 else
3940 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
3941
3942 /*
3943 * Record the incident so that we stand a chance of blocking I/O controls
3944 * before panicing the system.
3945 */
3946 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
3947 if (cCalls > UINT32_MAX - _1K)
3948 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
3949}
3950
3951
3952/**
3953 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3954 *
3955 * @param pSession The session of the caller.
3956 * @param pszFile The source file where the caller detected the bad
3957 * context.
3958 * @param uLine The line number in @a pszFile.
3959 * @param pszExtra Optional additional message to give further hints.
3960 */
3961SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
3962{
3963 PSUPDRVDEVEXT pDevExt;
3964
3965 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
3966 pDevExt = pSession->pDevExt;
3967
3968 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
3969}
3970
3971
3972/**
3973 * Gets the paging mode of the current CPU.
3974 *
3975 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3976 */
3977SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3978{
3979 SUPPAGINGMODE enmMode;
3980
3981 RTR0UINTREG cr0 = ASMGetCR0();
3982 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3983 enmMode = SUPPAGINGMODE_INVALID;
3984 else
3985 {
3986 RTR0UINTREG cr4 = ASMGetCR4();
3987 uint32_t fNXEPlusLMA = 0;
3988 if (cr4 & X86_CR4_PAE)
3989 {
3990 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
3991 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
3992 {
3993 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3994 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3995 fNXEPlusLMA |= RT_BIT(0);
3996 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3997 fNXEPlusLMA |= RT_BIT(1);
3998 }
3999 }
4000
4001 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4002 {
4003 case 0:
4004 enmMode = SUPPAGINGMODE_32_BIT;
4005 break;
4006
4007 case X86_CR4_PGE:
4008 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4009 break;
4010
4011 case X86_CR4_PAE:
4012 enmMode = SUPPAGINGMODE_PAE;
4013 break;
4014
4015 case X86_CR4_PAE | RT_BIT(0):
4016 enmMode = SUPPAGINGMODE_PAE_NX;
4017 break;
4018
4019 case X86_CR4_PAE | X86_CR4_PGE:
4020 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4021 break;
4022
4023 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4024 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4025 break;
4026
4027 case RT_BIT(1) | X86_CR4_PAE:
4028 enmMode = SUPPAGINGMODE_AMD64;
4029 break;
4030
4031 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4032 enmMode = SUPPAGINGMODE_AMD64_NX;
4033 break;
4034
4035 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4036 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4037 break;
4038
4039 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4040 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4041 break;
4042
4043 default:
4044 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4045 enmMode = SUPPAGINGMODE_INVALID;
4046 break;
4047 }
4048 }
4049 return enmMode;
4050}
4051
4052
4053/**
4054 * Change CR4 and take care of the kernel CR4 shadow if applicable.
4055 *
4056 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
4057 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
4058 * for code with interrupts enabled.
4059 *
4060 * @returns the old CR4 value.
4061 *
4062 * @param fOrMask bits to be set in CR4.
4063 * @param fAndMask bits to be cleard in CR4.
4064 *
4065 * @remarks Must be called with preemption/interrupts disabled.
4066 */
4067SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
4068{
4069#ifdef RT_OS_LINUX
4070 return supdrvOSChangeCR4(fOrMask, fAndMask);
4071#else
4072 RTCCUINTREG uOld = ASMGetCR4();
4073 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
4074 if (uNew != uOld)
4075 ASMSetCR4(uNew);
4076 return uOld;
4077#endif
4078}
4079
4080
4081/**
4082 * Enables or disabled hardware virtualization extensions using native OS APIs.
4083 *
4084 * @returns VBox status code.
4085 * @retval VINF_SUCCESS on success.
4086 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4087 *
4088 * @param fEnable Whether to enable or disable.
4089 */
4090SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4091{
4092#ifdef RT_OS_DARWIN
4093 return supdrvOSEnableVTx(fEnable);
4094#else
4095 RT_NOREF1(fEnable);
4096 return VERR_NOT_SUPPORTED;
4097#endif
4098}
4099
4100
4101/**
4102 * Suspends hardware virtualization extensions using the native OS API.
4103 *
4104 * This is called prior to entering raw-mode context.
4105 *
4106 * @returns @c true if suspended, @c false if not.
4107 */
4108SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
4109{
4110#ifdef RT_OS_DARWIN
4111 return supdrvOSSuspendVTxOnCpu();
4112#else
4113 return false;
4114#endif
4115}
4116
4117
4118/**
4119 * Resumes hardware virtualization extensions using the native OS API.
4120 *
4121 * This is called after to entering raw-mode context.
4122 *
4123 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
4124 */
4125SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
4126{
4127#ifdef RT_OS_DARWIN
4128 supdrvOSResumeVTxOnCpu(fSuspended);
4129#else
4130 RT_NOREF1(fSuspended);
4131 Assert(!fSuspended);
4132#endif
4133}
4134
4135
4136SUPR0DECL(int) SUPR0GetCurrentGdtRw(RTHCUINTPTR *pGdtRw)
4137{
4138#ifdef RT_OS_LINUX
4139 return supdrvOSGetCurrentGdtRw(pGdtRw);
4140#else
4141 NOREF(pGdtRw);
4142 return VERR_NOT_IMPLEMENTED;
4143#endif
4144}
4145
4146
4147/**
4148 * Gets AMD-V and VT-x support for the calling CPU.
4149 *
4150 * @returns VBox status code.
4151 * @param pfCaps Where to store whether VT-x (SUPVTCAPS_VT_X) or AMD-V
4152 * (SUPVTCAPS_AMD_V) is supported.
4153 */
4154SUPR0DECL(int) SUPR0GetVTSupport(uint32_t *pfCaps)
4155{
4156 Assert(pfCaps);
4157 *pfCaps = 0;
4158
4159 /* Check if the CPU even supports CPUID (extremely ancient CPUs). */
4160 if (ASMHasCpuId())
4161 {
4162 /* Check the range of standard CPUID leafs. */
4163 uint32_t uMaxLeaf, uVendorEbx, uVendorEcx, uVendorEdx;
4164 ASMCpuId(0, &uMaxLeaf, &uVendorEbx, &uVendorEcx, &uVendorEdx);
4165 if (ASMIsValidStdRange(uMaxLeaf))
4166 {
4167 /* Query the standard CPUID leaf. */
4168 uint32_t fFeatEcx, fFeatEdx, uDummy;
4169 ASMCpuId(1, &uDummy, &uDummy, &fFeatEcx, &fFeatEdx);
4170
4171 /* Check if the vendor is Intel (or compatible). */
4172 if ( ASMIsIntelCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4173 || ASMIsViaCentaurCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4174 || ASMIsShanghaiCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4175 {
4176 /* Check VT-x support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4177 if ( (fFeatEcx & X86_CPUID_FEATURE_ECX_VMX)
4178 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4179 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4180 {
4181 *pfCaps = SUPVTCAPS_VT_X;
4182 return VINF_SUCCESS;
4183 }
4184 return VERR_VMX_NO_VMX;
4185 }
4186
4187 /* Check if the vendor is AMD (or compatible). */
4188 if ( ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)
4189 || ASMIsHygonCpuEx(uVendorEbx, uVendorEcx, uVendorEdx))
4190 {
4191 uint32_t fExtFeatEcx, uExtMaxId;
4192 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4193 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeatEcx, &uDummy);
4194
4195 /* Check AMD-V support. In addition, VirtualBox requires MSR and FXSAVE/FXRSTOR to function. */
4196 if ( ASMIsValidExtRange(uExtMaxId)
4197 && uExtMaxId >= 0x8000000a
4198 && (fExtFeatEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4199 && (fFeatEdx & X86_CPUID_FEATURE_EDX_MSR)
4200 && (fFeatEdx & X86_CPUID_FEATURE_EDX_FXSR))
4201 {
4202 *pfCaps = SUPVTCAPS_AMD_V;
4203 return VINF_SUCCESS;
4204 }
4205 return VERR_SVM_NO_SVM;
4206 }
4207 }
4208 }
4209 return VERR_UNSUPPORTED_CPU;
4210}
4211
4212
4213/**
4214 * Checks if Intel VT-x feature is usable on this CPU.
4215 *
4216 * @returns VBox status code.
4217 * @param pfIsSmxModeAmbiguous Where to return whether the SMX mode causes
4218 * ambiguity that makes us unsure whether we
4219 * really can use VT-x or not.
4220 *
4221 * @remarks Must be called with preemption disabled.
4222 * The caller is also expected to check that the CPU is an Intel (or
4223 * VIA/Shanghai) CPU -and- that it supports VT-x. Otherwise, this
4224 * function might throw a \#GP fault as it tries to read/write MSRs
4225 * that may not be present!
4226 */
4227SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
4228{
4229 uint64_t fFeatMsr;
4230 bool fMaybeSmxMode;
4231 bool fMsrLocked;
4232 bool fSmxVmxAllowed;
4233 bool fVmxAllowed;
4234 bool fIsSmxModeAmbiguous;
4235 int rc;
4236
4237 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4238
4239 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4240 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
4241 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4242 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4243 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4244 fIsSmxModeAmbiguous = false;
4245 rc = VERR_INTERNAL_ERROR_5;
4246
4247 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
4248 if (fMsrLocked)
4249 {
4250 if (fVmxAllowed && fSmxVmxAllowed)
4251 rc = VINF_SUCCESS;
4252 else if (!fVmxAllowed && !fSmxVmxAllowed)
4253 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
4254 else if (!fMaybeSmxMode)
4255 {
4256 if (fVmxAllowed)
4257 rc = VINF_SUCCESS;
4258 else
4259 rc = VERR_VMX_MSR_VMX_DISABLED;
4260 }
4261 else
4262 {
4263 /*
4264 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
4265 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
4266 * See @bugref{6873}.
4267 */
4268 Assert(fMaybeSmxMode == true);
4269 fIsSmxModeAmbiguous = true;
4270 rc = VINF_SUCCESS;
4271 }
4272 }
4273 else
4274 {
4275 /*
4276 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
4277 * this MSR can no longer be modified.
4278 *
4279 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
4280 * accurately. See @bugref{6873}.
4281 *
4282 * We need to check for SMX hardware support here, before writing the MSR as
4283 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
4284 * for it.
4285 */
4286 uint32_t fFeaturesECX, uDummy;
4287#ifdef VBOX_STRICT
4288 /* Callers should have verified these at some point. */
4289 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4290 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4291 Assert(ASMIsValidStdRange(uMaxId));
4292 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4293 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4294 || ASMIsShanghaiCpuEx( uVendorEBX, uVendorECX, uVendorEDX));
4295#endif
4296 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
4297 bool fSmxVmxHwSupport = false;
4298 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4299 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
4300 fSmxVmxHwSupport = true;
4301
4302 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
4303 | MSR_IA32_FEATURE_CONTROL_VMXON;
4304 if (fSmxVmxHwSupport)
4305 fFeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
4306
4307 /*
4308 * Commit.
4309 */
4310 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, fFeatMsr);
4311
4312 /*
4313 * Verify.
4314 */
4315 fFeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4316 fMsrLocked = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
4317 if (fMsrLocked)
4318 {
4319 fSmxVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
4320 fVmxAllowed = RT_BOOL(fFeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
4321 if ( fVmxAllowed
4322 && ( !fSmxVmxHwSupport
4323 || fSmxVmxAllowed))
4324 rc = VINF_SUCCESS;
4325 else
4326 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
4327 }
4328 else
4329 rc = VERR_VMX_MSR_LOCKING_FAILED;
4330 }
4331
4332 if (pfIsSmxModeAmbiguous)
4333 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
4334
4335 return rc;
4336}
4337
4338
4339/**
4340 * Checks if AMD-V SVM feature is usable on this CPU.
4341 *
4342 * @returns VBox status code.
4343 * @param fInitSvm If usable, try to initialize SVM on this CPU.
4344 *
4345 * @remarks Must be called with preemption disabled.
4346 */
4347SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4348{
4349 int rc;
4350 uint64_t fVmCr;
4351 uint64_t fEfer;
4352
4353 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4354 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4355 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4356 {
4357 rc = VINF_SUCCESS;
4358 if (fInitSvm)
4359 {
4360 /* Turn on SVM in the EFER MSR. */
4361 fEfer = ASMRdMsr(MSR_K6_EFER);
4362 if (fEfer & MSR_K6_EFER_SVME)
4363 rc = VERR_SVM_IN_USE;
4364 else
4365 {
4366 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4367
4368 /* Paranoia. */
4369 fEfer = ASMRdMsr(MSR_K6_EFER);
4370 if (fEfer & MSR_K6_EFER_SVME)
4371 {
4372 /* Restore previous value. */
4373 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4374 }
4375 else
4376 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4377 }
4378 }
4379 }
4380 else
4381 rc = VERR_SVM_DISABLED;
4382 return rc;
4383}
4384
4385
4386/**
4387 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4388 *
4389 * @returns VBox status code.
4390 * @retval VERR_VMX_NO_VMX
4391 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4392 * @retval VERR_VMX_MSR_VMX_DISABLED
4393 * @retval VERR_VMX_MSR_LOCKING_FAILED
4394 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4395 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4396 * @retval VERR_SVM_NO_SVM
4397 * @retval VERR_SVM_DISABLED
4398 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4399 * (centaur)/Shanghai CPU.
4400 *
4401 * @param pfCaps Where to store the capabilities.
4402 */
4403int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4404{
4405 int rc = VERR_UNSUPPORTED_CPU;
4406 bool fIsSmxModeAmbiguous = false;
4407 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4408
4409 /*
4410 * Input validation.
4411 */
4412 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4413 *pfCaps = 0;
4414
4415 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4416 RTThreadPreemptDisable(&PreemptState);
4417
4418 /* Check if VT-x/AMD-V is supported. */
4419 rc = SUPR0GetVTSupport(pfCaps);
4420 if (RT_SUCCESS(rc))
4421 {
4422 /* Check if VT-x is supported. */
4423 if (*pfCaps & SUPVTCAPS_VT_X)
4424 {
4425 /* Check if VT-x is usable. */
4426 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4427 if (RT_SUCCESS(rc))
4428 {
4429 /* Query some basic VT-x capabilities (mainly required by our GUI). */
4430 VMXCTLSMSR vtCaps;
4431 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4432 if (vtCaps.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4433 {
4434 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4435 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_EPT)
4436 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4437 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
4438 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4439 if (vtCaps.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING)
4440 *pfCaps |= SUPVTCAPS_VTX_VMCS_SHADOWING;
4441 }
4442 }
4443 }
4444 /* Check if AMD-V is supported. */
4445 else if (*pfCaps & SUPVTCAPS_AMD_V)
4446 {
4447 /* Check is SVM is usable. */
4448 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4449 if (RT_SUCCESS(rc))
4450 {
4451 /* Query some basic AMD-V capabilities (mainly required by our GUI). */
4452 uint32_t uDummy, fSvmFeatures;
4453 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4454 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4455 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4456 if (fSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VIRT_VMSAVE_VMLOAD)
4457 *pfCaps |= SUPVTCAPS_AMDV_VIRT_VMSAVE_VMLOAD;
4458 }
4459 }
4460 }
4461
4462 /* Restore preemption. */
4463 RTThreadPreemptRestore(&PreemptState);
4464
4465 /* After restoring preemption, if we may be in SMX mode, print a warning as it's difficult to debug such problems. */
4466 if (fIsSmxModeAmbiguous)
4467 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4468
4469 return rc;
4470}
4471
4472
4473/**
4474 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4475 *
4476 * @returns VBox status code.
4477 * @retval VERR_VMX_NO_VMX
4478 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4479 * @retval VERR_VMX_MSR_VMX_DISABLED
4480 * @retval VERR_VMX_MSR_LOCKING_FAILED
4481 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4482 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4483 * @retval VERR_SVM_NO_SVM
4484 * @retval VERR_SVM_DISABLED
4485 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4486 * (centaur)/Shanghai CPU.
4487 *
4488 * @param pSession The session handle.
4489 * @param pfCaps Where to store the capabilities.
4490 */
4491SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4492{
4493 /*
4494 * Input validation.
4495 */
4496 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4497 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4498
4499 /*
4500 * Call common worker.
4501 */
4502 return supdrvQueryVTCapsInternal(pfCaps);
4503}
4504
4505
4506/**
4507 * Queries the CPU microcode revision.
4508 *
4509 * @returns VBox status code.
4510 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4511 * readable microcode rev.
4512 *
4513 * @param puRevision Where to store the microcode revision.
4514 */
4515static int VBOXCALL supdrvQueryUcodeRev(uint32_t *puRevision)
4516{
4517 int rc = VERR_UNSUPPORTED_CPU;
4518 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4519
4520 /*
4521 * Input validation.
4522 */
4523 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4524
4525 *puRevision = 0;
4526
4527 /* Disable preemption so we make sure we don't migrate CPUs, just in case. */
4528 /* NB: We assume that there aren't mismatched microcode revs in the system. */
4529 RTThreadPreemptDisable(&PreemptState);
4530
4531 if (ASMHasCpuId())
4532 {
4533 uint32_t uDummy, uTFMSEAX;
4534 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4535
4536 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4537 ASMCpuId(1, &uTFMSEAX, &uDummy, &uDummy, &uDummy);
4538
4539 if (ASMIsValidStdRange(uMaxId))
4540 {
4541 uint64_t uRevMsr;
4542 if (ASMIsIntelCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4543 {
4544 /* Architectural MSR available on Pentium Pro and later. */
4545 if (ASMGetCpuFamily(uTFMSEAX) >= 6)
4546 {
4547 /* Revision is in the high dword. */
4548 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID);
4549 *puRevision = RT_HIDWORD(uRevMsr);
4550 rc = VINF_SUCCESS;
4551 }
4552 }
4553 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4554 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX))
4555 {
4556 /* Not well documented, but at least all AMD64 CPUs support this. */
4557 if (ASMGetCpuFamily(uTFMSEAX) >= 15)
4558 {
4559 /* Revision is in the low dword. */
4560 uRevMsr = ASMRdMsr(MSR_IA32_BIOS_SIGN_ID); /* Same MSR as Intel. */
4561 *puRevision = RT_LODWORD(uRevMsr);
4562 rc = VINF_SUCCESS;
4563 }
4564 }
4565 }
4566 }
4567
4568 RTThreadPreemptRestore(&PreemptState);
4569
4570 return rc;
4571}
4572
4573/**
4574 * Queries the CPU microcode revision.
4575 *
4576 * @returns VBox status code.
4577 * @retval VERR_UNSUPPORTED_CPU if not identifiable as a processor with
4578 * readable microcode rev.
4579 *
4580 * @param pSession The session handle.
4581 * @param puRevision Where to store the microcode revision.
4582 */
4583SUPR0DECL(int) SUPR0QueryUcodeRev(PSUPDRVSESSION pSession, uint32_t *puRevision)
4584{
4585 /*
4586 * Input validation.
4587 */
4588 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4589 AssertPtrReturn(puRevision, VERR_INVALID_POINTER);
4590
4591 /*
4592 * Call common worker.
4593 */
4594 return supdrvQueryUcodeRev(puRevision);
4595}
4596
4597
4598/**
4599 * Gets hardware-virtualization MSRs of the calling CPU.
4600 *
4601 * @returns VBox status code.
4602 * @param pMsrs Where to store the hardware-virtualization MSRs.
4603 * @param fCaps Hardware virtualization capabilities (SUPVTCAPS_XXX). Pass 0
4604 * to explicitly check for the presence of VT-x/AMD-V before
4605 * querying MSRs.
4606 * @param fForce Force querying of MSRs from the hardware.
4607 */
4608SUPR0DECL(int) SUPR0GetHwvirtMsrs(PSUPHWVIRTMSRS pMsrs, uint32_t fCaps, bool fForce)
4609{
4610 NOREF(fForce);
4611
4612 int rc;
4613 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4614
4615 /*
4616 * Input validation.
4617 */
4618 AssertPtrReturn(pMsrs, VERR_INVALID_POINTER);
4619
4620 /*
4621 * Disable preemption so we make sure we don't migrate CPUs and because
4622 * we access global data.
4623 */
4624 RTThreadPreemptDisable(&PreemptState);
4625
4626 /*
4627 * Query the MSRs from the hardware.
4628 */
4629 SUPHWVIRTMSRS Msrs;
4630 RT_ZERO(Msrs);
4631
4632 /* If the caller claims VT-x/AMD-V is supported, don't need to recheck it. */
4633 if (!(fCaps & (SUPVTCAPS_VT_X | SUPVTCAPS_AMD_V)))
4634 rc = SUPR0GetVTSupport(&fCaps);
4635 else
4636 rc = VINF_SUCCESS;
4637 if (RT_SUCCESS(rc))
4638 {
4639 if (fCaps & SUPVTCAPS_VT_X)
4640 {
4641 Msrs.u.vmx.u64FeatCtrl = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
4642 Msrs.u.vmx.u64Basic = ASMRdMsr(MSR_IA32_VMX_BASIC);
4643 Msrs.u.vmx.PinCtls.u = ASMRdMsr(MSR_IA32_VMX_PINBASED_CTLS);
4644 Msrs.u.vmx.ProcCtls.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4645 Msrs.u.vmx.ExitCtls.u = ASMRdMsr(MSR_IA32_VMX_EXIT_CTLS);
4646 Msrs.u.vmx.EntryCtls.u = ASMRdMsr(MSR_IA32_VMX_ENTRY_CTLS);
4647 Msrs.u.vmx.u64Misc = ASMRdMsr(MSR_IA32_VMX_MISC);
4648 Msrs.u.vmx.u64Cr0Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED0);
4649 Msrs.u.vmx.u64Cr0Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR0_FIXED1);
4650 Msrs.u.vmx.u64Cr4Fixed0 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED0);
4651 Msrs.u.vmx.u64Cr4Fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
4652 Msrs.u.vmx.u64VmcsEnum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
4653
4654 if (RT_BF_GET(Msrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
4655 {
4656 Msrs.u.vmx.TruePinCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS);
4657 Msrs.u.vmx.TrueProcCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
4658 Msrs.u.vmx.TrueEntryCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_ENTRY_CTLS);
4659 Msrs.u.vmx.TrueExitCtls.u = ASMRdMsr(MSR_IA32_VMX_TRUE_EXIT_CTLS);
4660 }
4661
4662 if (Msrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
4663 {
4664 Msrs.u.vmx.ProcCtls2.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4665
4666 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
4667 Msrs.u.vmx.u64EptVpidCaps = ASMRdMsr(MSR_IA32_VMX_EPT_VPID_CAP);
4668
4669 if (Msrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMFUNC)
4670 Msrs.u.vmx.u64VmFunc = ASMRdMsr(MSR_IA32_VMX_VMFUNC);
4671 }
4672 }
4673 else if (fCaps & SUPVTCAPS_AMD_V)
4674 {
4675 Msrs.u.svm.u64MsrHwcr = ASMRdMsr(MSR_K8_HWCR);
4676 Msrs.u.svm.u64MsrSmmAddr = ASMRdMsr(MSR_K7_SMM_ADDR);
4677 Msrs.u.svm.u64MsrSmmMask = ASMRdMsr(MSR_K7_SMM_MASK);
4678 }
4679 else
4680 {
4681 RTThreadPreemptRestore(&PreemptState);
4682 AssertMsgFailedReturn(("SUPR0GetVTSupport returns success but neither VT-x nor AMD-V reported!\n"),
4683 VERR_INTERNAL_ERROR_2);
4684 }
4685
4686 /*
4687 * Copy the MSRs out.
4688 */
4689 memcpy(pMsrs, &Msrs, sizeof(*pMsrs));
4690 }
4691
4692 RTThreadPreemptRestore(&PreemptState);
4693
4694 return rc;
4695}
4696
4697
4698/**
4699 * Register a component factory with the support driver.
4700 *
4701 * This is currently restricted to kernel sessions only.
4702 *
4703 * @returns VBox status code.
4704 * @retval VINF_SUCCESS on success.
4705 * @retval VERR_NO_MEMORY if we're out of memory.
4706 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4707 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4708 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4709 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4710 *
4711 * @param pSession The SUPDRV session (must be a ring-0 session).
4712 * @param pFactory Pointer to the component factory registration structure.
4713 *
4714 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4715 */
4716SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4717{
4718 PSUPDRVFACTORYREG pNewReg;
4719 const char *psz;
4720 int rc;
4721
4722 /*
4723 * Validate parameters.
4724 */
4725 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4726 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4727 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4728 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4729 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4730 AssertReturn(psz, VERR_INVALID_PARAMETER);
4731
4732 /*
4733 * Allocate and initialize a new registration structure.
4734 */
4735 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4736 if (pNewReg)
4737 {
4738 pNewReg->pNext = NULL;
4739 pNewReg->pFactory = pFactory;
4740 pNewReg->pSession = pSession;
4741 pNewReg->cchName = psz - &pFactory->szName[0];
4742
4743 /*
4744 * Add it to the tail of the list after checking for prior registration.
4745 */
4746 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4747 if (RT_SUCCESS(rc))
4748 {
4749 PSUPDRVFACTORYREG pPrev = NULL;
4750 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4751 while (pCur && pCur->pFactory != pFactory)
4752 {
4753 pPrev = pCur;
4754 pCur = pCur->pNext;
4755 }
4756 if (!pCur)
4757 {
4758 if (pPrev)
4759 pPrev->pNext = pNewReg;
4760 else
4761 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4762 rc = VINF_SUCCESS;
4763 }
4764 else
4765 rc = VERR_ALREADY_EXISTS;
4766
4767 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4768 }
4769
4770 if (RT_FAILURE(rc))
4771 RTMemFree(pNewReg);
4772 }
4773 else
4774 rc = VERR_NO_MEMORY;
4775 return rc;
4776}
4777
4778
4779/**
4780 * Deregister a component factory.
4781 *
4782 * @returns VBox status code.
4783 * @retval VINF_SUCCESS on success.
4784 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4785 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4786 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4787 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4788 *
4789 * @param pSession The SUPDRV session (must be a ring-0 session).
4790 * @param pFactory Pointer to the component factory registration structure
4791 * previously passed SUPR0ComponentRegisterFactory().
4792 *
4793 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4794 */
4795SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4796{
4797 int rc;
4798
4799 /*
4800 * Validate parameters.
4801 */
4802 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4803 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4804 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4805
4806 /*
4807 * Take the lock and look for the registration record.
4808 */
4809 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4810 if (RT_SUCCESS(rc))
4811 {
4812 PSUPDRVFACTORYREG pPrev = NULL;
4813 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4814 while (pCur && pCur->pFactory != pFactory)
4815 {
4816 pPrev = pCur;
4817 pCur = pCur->pNext;
4818 }
4819 if (pCur)
4820 {
4821 if (!pPrev)
4822 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4823 else
4824 pPrev->pNext = pCur->pNext;
4825
4826 pCur->pNext = NULL;
4827 pCur->pFactory = NULL;
4828 pCur->pSession = NULL;
4829 rc = VINF_SUCCESS;
4830 }
4831 else
4832 rc = VERR_NOT_FOUND;
4833
4834 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4835
4836 RTMemFree(pCur);
4837 }
4838 return rc;
4839}
4840
4841
4842/**
4843 * Queries a component factory.
4844 *
4845 * @returns VBox status code.
4846 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4847 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4848 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4849 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4850 *
4851 * @param pSession The SUPDRV session.
4852 * @param pszName The name of the component factory.
4853 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4854 * @param ppvFactoryIf Where to store the factory interface.
4855 */
4856SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4857{
4858 const char *pszEnd;
4859 size_t cchName;
4860 int rc;
4861
4862 /*
4863 * Validate parameters.
4864 */
4865 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4866
4867 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4868 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4869 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4870 cchName = pszEnd - pszName;
4871
4872 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4873 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4874 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4875
4876 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4877 *ppvFactoryIf = NULL;
4878
4879 /*
4880 * Take the lock and try all factories by this name.
4881 */
4882 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4883 if (RT_SUCCESS(rc))
4884 {
4885 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4886 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4887 while (pCur)
4888 {
4889 if ( pCur->cchName == cchName
4890 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4891 {
4892 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4893 if (pvFactory)
4894 {
4895 *ppvFactoryIf = pvFactory;
4896 rc = VINF_SUCCESS;
4897 break;
4898 }
4899 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4900 }
4901
4902 /* next */
4903 pCur = pCur->pNext;
4904 }
4905
4906 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4907 }
4908 return rc;
4909}
4910
4911
4912/**
4913 * Adds a memory object to the session.
4914 *
4915 * @returns IPRT status code.
4916 * @param pMem Memory tracking structure containing the
4917 * information to track.
4918 * @param pSession The session.
4919 */
4920static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
4921{
4922 PSUPDRVBUNDLE pBundle;
4923
4924 /*
4925 * Find free entry and record the allocation.
4926 */
4927 RTSpinlockAcquire(pSession->Spinlock);
4928 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4929 {
4930 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
4931 {
4932 unsigned i;
4933 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4934 {
4935 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
4936 {
4937 pBundle->cUsed++;
4938 pBundle->aMem[i] = *pMem;
4939 RTSpinlockRelease(pSession->Spinlock);
4940 return VINF_SUCCESS;
4941 }
4942 }
4943 AssertFailed(); /* !!this can't be happening!!! */
4944 }
4945 }
4946 RTSpinlockRelease(pSession->Spinlock);
4947
4948 /*
4949 * Need to allocate a new bundle.
4950 * Insert into the last entry in the bundle.
4951 */
4952 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
4953 if (!pBundle)
4954 return VERR_NO_MEMORY;
4955
4956 /* take last entry. */
4957 pBundle->cUsed++;
4958 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
4959
4960 /* insert into list. */
4961 RTSpinlockAcquire(pSession->Spinlock);
4962 pBundle->pNext = pSession->Bundle.pNext;
4963 pSession->Bundle.pNext = pBundle;
4964 RTSpinlockRelease(pSession->Spinlock);
4965
4966 return VINF_SUCCESS;
4967}
4968
4969
4970/**
4971 * Releases a memory object referenced by pointer and type.
4972 *
4973 * @returns IPRT status code.
4974 * @param pSession Session data.
4975 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
4976 * @param eType Memory type.
4977 */
4978static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
4979{
4980 PSUPDRVBUNDLE pBundle;
4981
4982 /*
4983 * Validate input.
4984 */
4985 if (!uPtr)
4986 {
4987 Log(("Illegal address %p\n", (void *)uPtr));
4988 return VERR_INVALID_PARAMETER;
4989 }
4990
4991 /*
4992 * Search for the address.
4993 */
4994 RTSpinlockAcquire(pSession->Spinlock);
4995 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4996 {
4997 if (pBundle->cUsed > 0)
4998 {
4999 unsigned i;
5000 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
5001 {
5002 if ( pBundle->aMem[i].eType == eType
5003 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
5004 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
5005 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
5006 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
5007 )
5008 {
5009 /* Make a copy of it and release it outside the spinlock. */
5010 SUPDRVMEMREF Mem = pBundle->aMem[i];
5011 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
5012 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
5013 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
5014 RTSpinlockRelease(pSession->Spinlock);
5015
5016 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
5017 {
5018 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
5019 AssertRC(rc); /** @todo figure out how to handle this. */
5020 }
5021 if (Mem.MemObj != NIL_RTR0MEMOBJ)
5022 {
5023 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
5024 AssertRC(rc); /** @todo figure out how to handle this. */
5025 }
5026 return VINF_SUCCESS;
5027 }
5028 }
5029 }
5030 }
5031 RTSpinlockRelease(pSession->Spinlock);
5032 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
5033 return VERR_INVALID_PARAMETER;
5034}
5035
5036
5037/**
5038 * Opens an image. If it's the first time it's opened the call must upload
5039 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
5040 *
5041 * This is the 1st step of the loading.
5042 *
5043 * @returns IPRT status code.
5044 * @param pDevExt Device globals.
5045 * @param pSession Session data.
5046 * @param pReq The open request.
5047 */
5048static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
5049{
5050 int rc;
5051 PSUPDRVLDRIMAGE pImage;
5052 void *pv;
5053 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
5054 SUPDRV_CHECK_SMAP_SETUP();
5055 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5056 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithEverything=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithEverything));
5057
5058 /*
5059 * Check if we got an instance of the image already.
5060 */
5061 supdrvLdrLock(pDevExt);
5062 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5063 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5064 {
5065 if ( pImage->szName[cchName] == '\0'
5066 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
5067 {
5068 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
5069 {
5070 /** @todo check cbImageBits and cbImageWithEverything here, if they differs
5071 * that indicates that the images are different. */
5072 pImage->cUsage++;
5073 pReq->u.Out.pvImageBase = pImage->pvImage;
5074 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
5075 pReq->u.Out.fNativeLoader = pImage->fNative;
5076 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5077 supdrvLdrUnlock(pDevExt);
5078 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5079 return VINF_SUCCESS;
5080 }
5081 supdrvLdrUnlock(pDevExt);
5082 Log(("supdrvIOCtl_LdrOpen: Too many existing references to '%s'!\n", pReq->u.In.szName));
5083 return VERR_TOO_MANY_REFERENCES;
5084 }
5085 }
5086 /* (not found - add it!) */
5087
5088 /* If the loader interface is locked down, make userland fail early */
5089 if (pDevExt->fLdrLockedDown)
5090 {
5091 supdrvLdrUnlock(pDevExt);
5092 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
5093 return VERR_PERMISSION_DENIED;
5094 }
5095
5096 /*
5097 * Allocate memory.
5098 */
5099 Assert(cchName < sizeof(pImage->szName));
5100 pv = RTMemAlloc(sizeof(SUPDRVLDRIMAGE));
5101 if (!pv)
5102 {
5103 supdrvLdrUnlock(pDevExt);
5104 Log(("supdrvIOCtl_LdrOpen: RTMemAlloc() failed\n"));
5105 return /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_2;
5106 }
5107 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5108
5109 /*
5110 * Setup and link in the LDR stuff.
5111 */
5112 pImage = (PSUPDRVLDRIMAGE)pv;
5113 pImage->pvImage = NULL;
5114#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5115 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
5116#else
5117 pImage->pvImageAlloc = NULL;
5118#endif
5119 pImage->cbImageWithEverything = pReq->u.In.cbImageWithEverything;
5120 pImage->cbImageBits = pReq->u.In.cbImageBits;
5121 pImage->cSymbols = 0;
5122 pImage->paSymbols = NULL;
5123 pImage->pachStrTab = NULL;
5124 pImage->cbStrTab = 0;
5125 pImage->cSegments = 0;
5126 pImage->paSegments = NULL;
5127 pImage->pfnModuleInit = NULL;
5128 pImage->pfnModuleTerm = NULL;
5129 pImage->pfnServiceReqHandler = NULL;
5130 pImage->uState = SUP_IOCTL_LDR_OPEN;
5131 pImage->cUsage = 1;
5132 pImage->pDevExt = pDevExt;
5133 pImage->pImageImport = NULL;
5134 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC;
5135 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
5136
5137 /*
5138 * Try load it using the native loader, if that isn't supported, fall back
5139 * on the older method.
5140 */
5141 pImage->fNative = true;
5142 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
5143 if (rc == VERR_NOT_SUPPORTED)
5144 {
5145#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5146 rc = RTR0MemObjAllocPage(&pImage->hMemObjImage, pImage->cbImageBits, true /*fExecutable*/);
5147 if (RT_SUCCESS(rc))
5148 {
5149 pImage->pvImage = RTR0MemObjAddress(pImage->hMemObjImage);
5150 pImage->fNative = false;
5151 }
5152#else
5153 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
5154 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
5155 pImage->fNative = false;
5156 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
5157#endif
5158 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5159 }
5160 if (RT_FAILURE(rc))
5161 {
5162 supdrvLdrUnlock(pDevExt);
5163 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
5164 RTMemFree(pImage);
5165 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
5166 return rc;
5167 }
5168 Assert(VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
5169
5170 /*
5171 * Link it.
5172 */
5173 pImage->pNext = pDevExt->pLdrImages;
5174 pDevExt->pLdrImages = pImage;
5175
5176 supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5177
5178 pReq->u.Out.pvImageBase = pImage->pvImage;
5179 pReq->u.Out.fNeedsLoading = true;
5180 pReq->u.Out.fNativeLoader = pImage->fNative;
5181 supdrvOSLdrNotifyOpened(pDevExt, pImage, pReq->u.In.szFilename);
5182
5183 supdrvLdrUnlock(pDevExt);
5184 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5185 return VINF_SUCCESS;
5186}
5187
5188
5189/**
5190 * Formats a load error message.
5191 *
5192 * @returns @a rc
5193 * @param rc Return code.
5194 * @param pReq The request.
5195 * @param pszFormat The error message format string.
5196 * @param ... Argument to the format string.
5197 */
5198int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
5199{
5200 va_list va;
5201 va_start(va, pszFormat);
5202 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
5203 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
5204 va_end(va);
5205 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
5206 return rc;
5207}
5208
5209
5210/**
5211 * Worker that validates a pointer to an image entrypoint.
5212 *
5213 * Calls supdrvLdrLoadError on error.
5214 *
5215 * @returns IPRT status code.
5216 * @param pDevExt The device globals.
5217 * @param pImage The loader image.
5218 * @param pv The pointer into the image.
5219 * @param fMayBeNull Whether it may be NULL.
5220 * @param pszSymbol The entrypoint name or log name. If the symbol is
5221 * capitalized it signifies a specific symbol, otherwise it
5222 * for logging.
5223 * @param pbImageBits The image bits prepared by ring-3.
5224 * @param pReq The request for passing to supdrvLdrLoadError.
5225 *
5226 * @note Will leave the loader lock on failure!
5227 */
5228static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv, bool fMayBeNull,
5229 const uint8_t *pbImageBits, const char *pszSymbol, PSUPLDRLOAD pReq)
5230{
5231 if (!fMayBeNull || pv)
5232 {
5233 uint32_t iSeg;
5234
5235 /* Must be within the image bits: */
5236 uintptr_t const uRva = (uintptr_t)pv - (uintptr_t)pImage->pvImage;
5237 if (uRva >= pImage->cbImageBits)
5238 {
5239 supdrvLdrUnlock(pDevExt);
5240 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5241 "Invalid entry point address %p given for %s: RVA %#zx, image size %#zx",
5242 pv, pszSymbol, uRva, pImage->cbImageBits);
5243 }
5244
5245 /* Must be in an executable segment: */
5246 for (iSeg = 0; iSeg < pImage->cSegments; iSeg++)
5247 if (uRva - pImage->paSegments[iSeg].off < (uintptr_t)pImage->paSegments[iSeg].cb)
5248 {
5249 if (pImage->paSegments[iSeg].fProt & SUPLDR_PROT_EXEC)
5250 break;
5251 supdrvLdrUnlock(pDevExt);
5252 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5253 "Bad entry point %p given for %s: not executable (seg #%u: %#RX32 LB %#RX32 prot %#x)",
5254 pv, pszSymbol, iSeg, pImage->paSegments[iSeg].off, pImage->paSegments[iSeg].cb,
5255 pImage->paSegments[iSeg].fProt);
5256 }
5257 if (iSeg >= pImage->cSegments)
5258 {
5259 supdrvLdrUnlock(pDevExt);
5260 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5261 "Bad entry point %p given for %s: no matching segment found (RVA %#zx)!",
5262 pv, pszSymbol, uRva);
5263 }
5264
5265 if (pImage->fNative)
5266 {
5267 /** @todo pass pReq along to the native code. */
5268 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits, pszSymbol);
5269 if (RT_FAILURE(rc))
5270 {
5271 supdrvLdrUnlock(pDevExt);
5272 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
5273 "Bad entry point address %p for %s: rc=%Rrc\n", pv, pszSymbol, rc);
5274 }
5275 }
5276 }
5277 return VINF_SUCCESS;
5278}
5279
5280
5281/**
5282 * Loads the image bits.
5283 *
5284 * This is the 2nd step of the loading.
5285 *
5286 * @returns IPRT status code.
5287 * @param pDevExt Device globals.
5288 * @param pSession Session data.
5289 * @param pReq The request.
5290 */
5291static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
5292{
5293 PSUPDRVLDRUSAGE pUsage;
5294 PSUPDRVLDRIMAGE pImage;
5295 PSUPDRVLDRIMAGE pImageImport;
5296 int rc;
5297 SUPDRV_CHECK_SMAP_SETUP();
5298 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithEverything=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithEverything));
5299 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5300
5301 /*
5302 * Find the ldr image.
5303 */
5304 supdrvLdrLock(pDevExt);
5305 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5306
5307 pUsage = pSession->pLdrUsage;
5308 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5309 pUsage = pUsage->pNext;
5310 if (!pUsage)
5311 {
5312 supdrvLdrUnlock(pDevExt);
5313 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
5314 }
5315 pImage = pUsage->pImage;
5316
5317 /*
5318 * Validate input.
5319 */
5320 if ( pImage->cbImageWithEverything != pReq->u.In.cbImageWithEverything
5321 || pImage->cbImageBits != pReq->u.In.cbImageBits)
5322 {
5323 supdrvLdrUnlock(pDevExt);
5324 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %u(prep) != %u(load) or %u != %u",
5325 pImage->cbImageWithEverything, pReq->u.In.cbImageWithEverything, pImage->cbImageBits, pReq->u.In.cbImageBits);
5326 }
5327
5328 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
5329 {
5330 unsigned uState = pImage->uState;
5331 supdrvLdrUnlock(pDevExt);
5332 if (uState != SUP_IOCTL_LDR_LOAD)
5333 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
5334 pReq->u.Out.uErrorMagic = 0;
5335 return VERR_ALREADY_LOADED;
5336 }
5337
5338 /* If the loader interface is locked down, don't load new images */
5339 if (pDevExt->fLdrLockedDown)
5340 {
5341 supdrvLdrUnlock(pDevExt);
5342 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
5343 }
5344
5345 /*
5346 * If the new image is a dependant of VMMR0.r0, resolve it via the
5347 * caller's usage list and make sure it's in ready state.
5348 */
5349 pImageImport = NULL;
5350 if (pReq->u.In.fFlags & SUPLDRLOAD_F_DEP_VMMR0)
5351 {
5352 PSUPDRVLDRUSAGE pUsageDependency = pSession->pLdrUsage;
5353 while (pUsageDependency && pUsageDependency->pImage->pvImage != pDevExt->pvVMMR0)
5354 pUsageDependency = pUsageDependency->pNext;
5355 if (!pUsageDependency || !pDevExt->pvVMMR0)
5356 {
5357 supdrvLdrUnlock(pDevExt);
5358 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 not loaded by session");
5359 }
5360 pImageImport = pUsageDependency->pImage;
5361 if (pImageImport->uState != SUP_IOCTL_LDR_LOAD)
5362 {
5363 supdrvLdrUnlock(pDevExt);
5364 return supdrvLdrLoadError(VERR_MODULE_NOT_FOUND, pReq, "VMMR0.r0 is not ready (state %#x)", pImageImport->uState);
5365 }
5366 }
5367
5368 /*
5369 * Copy the segments before we start using supdrvLdrValidatePointer for entrypoint validation.
5370 */
5371 pImage->cSegments = pReq->u.In.cSegments;
5372 {
5373 size_t cbSegments = pImage->cSegments * sizeof(SUPLDRSEG);
5374 pImage->paSegments = (PSUPLDRSEG)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSegments], cbSegments);
5375 if (pImage->paSegments) /* Align the last segment size to avoid upsetting RTR0MemObjProtect. */ /** @todo relax RTR0MemObjProtect */
5376 pImage->paSegments[pImage->cSegments - 1].cb = RT_ALIGN_32(pImage->paSegments[pImage->cSegments - 1].cb, PAGE_SIZE);
5377 else
5378 {
5379 supdrvLdrUnlock(pDevExt);
5380 return supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for segment table: %#x", cbSegments);
5381 }
5382 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5383 }
5384
5385 /*
5386 * Validate entrypoints.
5387 */
5388 switch (pReq->u.In.eEPType)
5389 {
5390 case SUPLDRLOADEP_NOTHING:
5391 break;
5392
5393 case SUPLDRLOADEP_VMMR0:
5394 if (pReq->u.In.EP.VMMR0.pvVMMR0 != pImage->pvImage)
5395 {
5396 supdrvLdrUnlock(pDevExt);
5397 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid pvVMMR0 pointer: %p, expected %p", pReq->u.In.EP.VMMR0.pvVMMR0, pImage->pvImage);
5398 }
5399 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "VMMR0EntryFast", pReq);
5400 if (RT_FAILURE(rc))
5401 return rc;
5402 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "VMMR0EntryEx", pReq);
5403 if (RT_FAILURE(rc))
5404 return rc;
5405 break;
5406
5407 case SUPLDRLOADEP_SERVICE:
5408 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq", pReq);
5409 if (RT_FAILURE(rc))
5410 return rc;
5411 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
5412 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
5413 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
5414 {
5415 supdrvLdrUnlock(pDevExt);
5416 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "apvReserved={%p,%p,%p} MBZ!",
5417 pReq->u.In.EP.Service.apvReserved[0], pReq->u.In.EP.Service.apvReserved[1],
5418 pReq->u.In.EP.Service.apvReserved[2]);
5419 }
5420 break;
5421
5422 default:
5423 supdrvLdrUnlock(pDevExt);
5424 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
5425 }
5426
5427 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "ModuleInit", pReq);
5428 if (RT_FAILURE(rc))
5429 return rc;
5430 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "ModuleTerm", pReq);
5431 if (RT_FAILURE(rc))
5432 return rc;
5433 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5434
5435 /*
5436 * Allocate and copy the tables if non-native.
5437 * (No need to do try/except as this is a buffered request.)
5438 */
5439 if (!pImage->fNative)
5440 {
5441 pImage->cbStrTab = pReq->u.In.cbStrTab;
5442 if (pImage->cbStrTab)
5443 {
5444 pImage->pachStrTab = (char *)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
5445 if (!pImage->pachStrTab)
5446 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
5447 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5448 }
5449
5450 pImage->cSymbols = pReq->u.In.cSymbols;
5451 if (RT_SUCCESS(rc) && pImage->cSymbols)
5452 {
5453 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
5454 pImage->paSymbols = (PSUPLDRSYM)RTMemDup(&pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
5455 if (!pImage->paSymbols)
5456 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
5457 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5458 }
5459 }
5460
5461 /*
5462 * Copy the bits and apply permissions / complete native loading.
5463 */
5464 if (RT_SUCCESS(rc))
5465 {
5466 pImage->uState = SUP_IOCTL_LDR_LOAD;
5467 pImage->pfnModuleInit = (PFNR0MODULEINIT)(uintptr_t)pReq->u.In.pfnModuleInit;
5468 pImage->pfnModuleTerm = (PFNR0MODULETERM)(uintptr_t)pReq->u.In.pfnModuleTerm;
5469
5470 if (pImage->fNative)
5471 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
5472 else
5473 {
5474#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
5475 uint32_t i;
5476 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5477
5478 for (i = 0; i < pImage->cSegments; i++)
5479 {
5480 rc = RTR0MemObjProtect(pImage->hMemObjImage, pImage->paSegments[i].off, pImage->paSegments[i].cb,
5481 pImage->paSegments[i].fProt);
5482 if (RT_SUCCESS(rc))
5483 continue;
5484 if (rc == VERR_NOT_SUPPORTED)
5485 rc = VINF_SUCCESS;
5486 else
5487 rc = supdrvLdrLoadError(rc, pReq, "RTR0MemObjProtect failed on seg#%u %#RX32 LB %#RX32 fProt=%#x",
5488 i, pImage->paSegments[i].off, pImage->paSegments[i].cb, pImage->paSegments[i].fProt);
5489 break;
5490 }
5491#else
5492 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
5493#endif
5494 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
5495 }
5496 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5497 }
5498
5499 /*
5500 * Update any entry points.
5501 */
5502 if (RT_SUCCESS(rc))
5503 {
5504 switch (pReq->u.In.eEPType)
5505 {
5506 default:
5507 case SUPLDRLOADEP_NOTHING:
5508 rc = VINF_SUCCESS;
5509 break;
5510 case SUPLDRLOADEP_VMMR0:
5511 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0,
5512 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
5513 break;
5514 case SUPLDRLOADEP_SERVICE:
5515 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)(uintptr_t)pReq->u.In.EP.Service.pfnServiceReq;
5516 rc = VINF_SUCCESS;
5517 break;
5518 }
5519 }
5520
5521 /*
5522 * On success call the module initialization.
5523 */
5524 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
5525 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
5526 {
5527 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
5528 pDevExt->pLdrInitImage = pImage;
5529 pDevExt->hLdrInitThread = RTThreadNativeSelf();
5530 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5531 rc = pImage->pfnModuleInit(pImage);
5532 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5533 pDevExt->pLdrInitImage = NULL;
5534 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
5535 if (RT_FAILURE(rc))
5536 {
5537 if (pDevExt->pvVMMR0 == pImage->pvImage)
5538 supdrvLdrUnsetVMMR0EPs(pDevExt);
5539 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
5540 }
5541 }
5542 if (RT_SUCCESS(rc))
5543 {
5544 /* Increase the usage counter of any import image. */
5545 if (pImageImport)
5546 {
5547 pImageImport->cUsage++;
5548 pImage->pImageImport = pImageImport;
5549 }
5550
5551 /* Done! */
5552 SUPR0Printf("vboxdrv: %RKv %s\n", pImage->pvImage, pImage->szName);
5553 pReq->u.Out.uErrorMagic = 0;
5554 pReq->u.Out.szError[0] = '\0';
5555 }
5556 else
5557 {
5558 /* Inform the tracing component in case ModuleInit registered TPs. */
5559 supdrvTracerModuleUnloading(pDevExt, pImage);
5560
5561 pImage->uState = SUP_IOCTL_LDR_OPEN;
5562 pImage->pfnModuleInit = NULL;
5563 pImage->pfnModuleTerm = NULL;
5564 pImage->pfnServiceReqHandler= NULL;
5565 pImage->cbStrTab = 0;
5566 RTMemFree(pImage->pachStrTab);
5567 pImage->pachStrTab = NULL;
5568 RTMemFree(pImage->paSymbols);
5569 pImage->paSymbols = NULL;
5570 pImage->cSymbols = 0;
5571 }
5572
5573 supdrvLdrUnlock(pDevExt);
5574 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
5575 return rc;
5576}
5577
5578
5579/**
5580 * Frees a previously loaded (prep'ed) image.
5581 *
5582 * @returns IPRT status code.
5583 * @param pDevExt Device globals.
5584 * @param pSession Session data.
5585 * @param pReq The request.
5586 */
5587static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
5588{
5589 int rc;
5590 PSUPDRVLDRUSAGE pUsagePrev;
5591 PSUPDRVLDRUSAGE pUsage;
5592 PSUPDRVLDRIMAGE pImage;
5593 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
5594
5595 /*
5596 * Find the ldr image.
5597 */
5598 supdrvLdrLock(pDevExt);
5599 pUsagePrev = NULL;
5600 pUsage = pSession->pLdrUsage;
5601 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5602 {
5603 pUsagePrev = pUsage;
5604 pUsage = pUsage->pNext;
5605 }
5606 if (!pUsage)
5607 {
5608 supdrvLdrUnlock(pDevExt);
5609 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
5610 return VERR_INVALID_HANDLE;
5611 }
5612 if (pUsage->cRing3Usage == 0)
5613 {
5614 supdrvLdrUnlock(pDevExt);
5615 Log(("SUP_IOCTL_LDR_FREE: No ring-3 reference to the image!\n"));
5616 return VERR_CALLER_NO_REFERENCE;
5617 }
5618
5619 /*
5620 * Check if we can remove anything.
5621 */
5622 rc = VINF_SUCCESS;
5623 pImage = pUsage->pImage;
5624 Log(("SUP_IOCTL_LDR_FREE: pImage=%p %s cUsage=%d r3=%d r0=%u\n",
5625 pImage, pImage->szName, pImage->cUsage, pUsage->cRing3Usage, pUsage->cRing0Usage));
5626 if (pImage->cUsage <= 1 || pUsage->cRing3Usage + pUsage->cRing0Usage <= 1)
5627 {
5628 /*
5629 * Check if there are any objects with destructors in the image, if
5630 * so leave it for the session cleanup routine so we get a chance to
5631 * clean things up in the right order and not leave them all dangling.
5632 */
5633 RTSpinlockAcquire(pDevExt->Spinlock);
5634 if (pImage->cUsage <= 1)
5635 {
5636 PSUPDRVOBJ pObj;
5637 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5638 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5639 {
5640 rc = VERR_DANGLING_OBJECTS;
5641 break;
5642 }
5643 }
5644 else
5645 {
5646 PSUPDRVUSAGE pGenUsage;
5647 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5648 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5649 {
5650 rc = VERR_DANGLING_OBJECTS;
5651 break;
5652 }
5653 }
5654 RTSpinlockRelease(pDevExt->Spinlock);
5655 if (rc == VINF_SUCCESS)
5656 {
5657 /* unlink it */
5658 if (pUsagePrev)
5659 pUsagePrev->pNext = pUsage->pNext;
5660 else
5661 pSession->pLdrUsage = pUsage->pNext;
5662
5663 /* free it */
5664 pUsage->pImage = NULL;
5665 pUsage->pNext = NULL;
5666 RTMemFree(pUsage);
5667
5668 /*
5669 * Dereference the image.
5670 */
5671 if (pImage->cUsage <= 1)
5672 supdrvLdrFree(pDevExt, pImage);
5673 else
5674 pImage->cUsage--;
5675 }
5676 else
5677 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
5678 }
5679 else
5680 {
5681 /*
5682 * Dereference both image and usage.
5683 */
5684 pImage->cUsage--;
5685 pUsage->cRing3Usage--;
5686 }
5687
5688 supdrvLdrUnlock(pDevExt);
5689 return rc;
5690}
5691
5692
5693/**
5694 * Lock down the image loader interface.
5695 *
5696 * @returns IPRT status code.
5697 * @param pDevExt Device globals.
5698 */
5699static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
5700{
5701 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
5702
5703 supdrvLdrLock(pDevExt);
5704 if (!pDevExt->fLdrLockedDown)
5705 {
5706 pDevExt->fLdrLockedDown = true;
5707 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
5708 }
5709 supdrvLdrUnlock(pDevExt);
5710
5711 return VINF_SUCCESS;
5712}
5713
5714
5715/**
5716 * Queries the address of a symbol in an open image.
5717 *
5718 * @returns IPRT status code.
5719 * @param pDevExt Device globals.
5720 * @param pSession Session data.
5721 * @param pReq The request buffer.
5722 */
5723static int supdrvIOCtl_LdrQuerySymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
5724{
5725 PSUPDRVLDRIMAGE pImage;
5726 PSUPDRVLDRUSAGE pUsage;
5727 uint32_t i;
5728 PSUPLDRSYM paSyms;
5729 const char *pchStrings;
5730 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
5731 void *pvSymbol = NULL;
5732 int rc = VERR_SYMBOL_NOT_FOUND;
5733 Log3(("supdrvIOCtl_LdrQuerySymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
5734
5735 /*
5736 * Find the ldr image.
5737 */
5738 supdrvLdrLock(pDevExt);
5739 pUsage = pSession->pLdrUsage;
5740 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5741 pUsage = pUsage->pNext;
5742 if (!pUsage)
5743 {
5744 supdrvLdrUnlock(pDevExt);
5745 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
5746 return VERR_INVALID_HANDLE;
5747 }
5748 pImage = pUsage->pImage;
5749 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
5750 {
5751 unsigned uState = pImage->uState;
5752 supdrvLdrUnlock(pDevExt);
5753 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
5754 return VERR_ALREADY_LOADED;
5755 }
5756
5757 /*
5758 * Search the image exports / symbol strings.
5759 *
5760 * Note! The int32_t is for native loading on solaris where the data
5761 * and text segments are in very different places.
5762 */
5763 if (pImage->fNative)
5764 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pReq->u.In.szSymbol, cbSymbol - 1, &pvSymbol);
5765 else
5766 {
5767 pchStrings = pImage->pachStrTab;
5768 paSyms = pImage->paSymbols;
5769 for (i = 0; i < pImage->cSymbols; i++)
5770 {
5771 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5772 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
5773 {
5774 pvSymbol = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
5775 rc = VINF_SUCCESS;
5776 break;
5777 }
5778 }
5779 }
5780 supdrvLdrUnlock(pDevExt);
5781 pReq->u.Out.pvSymbol = pvSymbol;
5782 return rc;
5783}
5784
5785
5786/**
5787 * Gets the address of a symbol in an open image or the support driver.
5788 *
5789 * @returns VINF_SUCCESS on success.
5790 * @returns
5791 * @param pDevExt Device globals.
5792 * @param pSession Session data.
5793 * @param pReq The request buffer.
5794 */
5795static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
5796{
5797 int rc = VINF_SUCCESS;
5798 const char *pszSymbol = pReq->u.In.pszSymbol;
5799 const char *pszModule = pReq->u.In.pszModule;
5800 size_t cbSymbol;
5801 char const *pszEnd;
5802 uint32_t i;
5803
5804 /*
5805 * Input validation.
5806 */
5807 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
5808 pszEnd = RTStrEnd(pszSymbol, 512);
5809 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5810 cbSymbol = pszEnd - pszSymbol + 1;
5811
5812 if (pszModule)
5813 {
5814 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
5815 pszEnd = RTStrEnd(pszModule, 64);
5816 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5817 }
5818 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
5819
5820
5821 if ( !pszModule
5822 || !strcmp(pszModule, "SupDrv"))
5823 {
5824 /*
5825 * Search the support driver export table.
5826 */
5827 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5828 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5829 {
5830 pReq->u.Out.pfnSymbol = (PFNRT)(uintptr_t)g_aFunctions[i].pfn;
5831 break;
5832 }
5833 }
5834 else
5835 {
5836 /*
5837 * Find the loader image.
5838 */
5839 PSUPDRVLDRIMAGE pImage;
5840
5841 supdrvLdrLock(pDevExt);
5842
5843 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5844 if (!strcmp(pImage->szName, pszModule))
5845 break;
5846 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
5847 {
5848 /*
5849 * Search the image exports / symbol strings.
5850 */
5851 if (pImage->fNative)
5852 {
5853 rc = supdrvOSLdrQuerySymbol(pDevExt, pImage, pszSymbol, cbSymbol - 1, (void **)&pReq->u.Out.pfnSymbol);
5854 if (RT_SUCCESS(rc))
5855 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5856 }
5857 else
5858 {
5859 const char *pchStrings = pImage->pachStrTab;
5860 PCSUPLDRSYM paSyms = pImage->paSymbols;
5861 rc = VERR_SYMBOL_NOT_FOUND;
5862 for (i = 0; i < pImage->cSymbols; i++)
5863 {
5864 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5865 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
5866 {
5867 /*
5868 * Found it! Calc the symbol address and add a reference to the module.
5869 */
5870 pReq->u.Out.pfnSymbol = (PFNRT)((uintptr_t)pImage->pvImage + (int32_t)paSyms[i].offSymbol);
5871 rc = supdrvLdrAddUsage(pSession, pImage, true /*fRing3Usage*/);
5872 break;
5873 }
5874 }
5875 }
5876 }
5877 else
5878 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
5879
5880 supdrvLdrUnlock(pDevExt);
5881 }
5882 return rc;
5883}
5884
5885
5886/**
5887 * Looks up a symbol in g_aFunctions
5888 *
5889 * @returns VINF_SUCCESS on success, VERR_SYMBOL_NOT_FOUND on failure.
5890 * @param pszSymbol The symbol to look up.
5891 * @param puValue Where to return the value.
5892 */
5893int VBOXCALL supdrvLdrGetExportedSymbol(const char *pszSymbol, uintptr_t *puValue)
5894{
5895 uint32_t i;
5896 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5897 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5898 {
5899 *puValue = (uintptr_t)g_aFunctions[i].pfn;
5900 return VINF_SUCCESS;
5901 }
5902
5903 if (!strcmp(pszSymbol, "g_SUPGlobalInfoPage"))
5904 {
5905 *puValue = (uintptr_t)g_pSUPGlobalInfoPage;
5906 return VINF_SUCCESS;
5907 }
5908
5909 return VERR_SYMBOL_NOT_FOUND;
5910}
5911
5912
5913/**
5914 * Updates the VMMR0 entry point pointers.
5915 *
5916 * @returns IPRT status code.
5917 * @param pDevExt Device globals.
5918 * @param pvVMMR0 VMMR0 image handle.
5919 * @param pvVMMR0EntryFast VMMR0EntryFast address.
5920 * @param pvVMMR0EntryEx VMMR0EntryEx address.
5921 * @remark Caller must own the loader mutex.
5922 */
5923static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
5924{
5925 int rc = VINF_SUCCESS;
5926 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryFast=%p\n", pvVMMR0, pvVMMR0EntryFast));
5927
5928
5929 /*
5930 * Check if not yet set.
5931 */
5932 if (!pDevExt->pvVMMR0)
5933 {
5934 pDevExt->pvVMMR0 = pvVMMR0;
5935 *(void **)&pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
5936 *(void **)&pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
5937 ASMCompilerBarrier(); /* the above isn't nice, so be careful... */
5938 }
5939 else
5940 {
5941 /*
5942 * Return failure or success depending on whether the values match or not.
5943 */
5944 if ( pDevExt->pvVMMR0 != pvVMMR0
5945 || (uintptr_t)pDevExt->pfnVMMR0EntryFast != (uintptr_t)pvVMMR0EntryFast
5946 || (uintptr_t)pDevExt->pfnVMMR0EntryEx != (uintptr_t)pvVMMR0EntryEx)
5947 {
5948 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
5949 rc = VERR_INVALID_PARAMETER;
5950 }
5951 }
5952 return rc;
5953}
5954
5955
5956/**
5957 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
5958 *
5959 * @param pDevExt Device globals.
5960 */
5961static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
5962{
5963 pDevExt->pvVMMR0 = NULL;
5964 pDevExt->pfnVMMR0EntryFast = NULL;
5965 pDevExt->pfnVMMR0EntryEx = NULL;
5966}
5967
5968
5969/**
5970 * Adds a usage reference in the specified session of an image.
5971 *
5972 * Called while owning the loader semaphore.
5973 *
5974 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
5975 * @param pSession Session in question.
5976 * @param pImage Image which the session is using.
5977 * @param fRing3Usage Set if it's ring-3 usage, clear if ring-0.
5978 */
5979static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage, bool fRing3Usage)
5980{
5981 PSUPDRVLDRUSAGE pUsage;
5982 LogFlow(("supdrvLdrAddUsage: pImage=%p %d\n", pImage, fRing3Usage));
5983
5984 /*
5985 * Referenced it already?
5986 */
5987 pUsage = pSession->pLdrUsage;
5988 while (pUsage)
5989 {
5990 if (pUsage->pImage == pImage)
5991 {
5992 if (fRing3Usage)
5993 pUsage->cRing3Usage++;
5994 else
5995 pUsage->cRing0Usage++;
5996 return VINF_SUCCESS;
5997 }
5998 pUsage = pUsage->pNext;
5999 }
6000
6001 /*
6002 * Allocate new usage record.
6003 */
6004 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
6005 AssertReturn(pUsage, /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_5);
6006 pUsage->cRing3Usage = fRing3Usage ? 1 : 0;
6007 pUsage->cRing0Usage = fRing3Usage ? 0 : 1;
6008 pUsage->pImage = pImage;
6009 pUsage->pNext = pSession->pLdrUsage;
6010 pSession->pLdrUsage = pUsage;
6011 return VINF_SUCCESS;
6012}
6013
6014
6015/**
6016 * Frees a load image.
6017 *
6018 * @param pDevExt Pointer to device extension.
6019 * @param pImage Pointer to the image we're gonna free.
6020 * This image must exit!
6021 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
6022 */
6023static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
6024{
6025 unsigned cLoops;
6026 for (cLoops = 0; ; cLoops++)
6027 {
6028 PSUPDRVLDRIMAGE pImagePrev;
6029 PSUPDRVLDRIMAGE pImageImport;
6030 LogFlow(("supdrvLdrFree: pImage=%p %s [loop %u]\n", pImage, pImage->szName, cLoops));
6031 AssertBreak(cLoops < 2);
6032
6033 /*
6034 * Warn if we're releasing images while the image loader interface is
6035 * locked down -- we won't be able to reload them!
6036 */
6037 if (pDevExt->fLdrLockedDown)
6038 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
6039
6040 /* find it - arg. should've used doubly linked list. */
6041 Assert(pDevExt->pLdrImages);
6042 pImagePrev = NULL;
6043 if (pDevExt->pLdrImages != pImage)
6044 {
6045 pImagePrev = pDevExt->pLdrImages;
6046 while (pImagePrev->pNext != pImage)
6047 pImagePrev = pImagePrev->pNext;
6048 Assert(pImagePrev->pNext == pImage);
6049 }
6050
6051 /* unlink */
6052 if (pImagePrev)
6053 pImagePrev->pNext = pImage->pNext;
6054 else
6055 pDevExt->pLdrImages = pImage->pNext;
6056
6057 /* check if this is VMMR0.r0 unset its entry point pointers. */
6058 if (pDevExt->pvVMMR0 == pImage->pvImage)
6059 supdrvLdrUnsetVMMR0EPs(pDevExt);
6060
6061 /* check for objects with destructors in this image. (Shouldn't happen.) */
6062 if (pDevExt->pObjs)
6063 {
6064 unsigned cObjs = 0;
6065 PSUPDRVOBJ pObj;
6066 RTSpinlockAcquire(pDevExt->Spinlock);
6067 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
6068 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
6069 {
6070 pObj->pfnDestructor = NULL;
6071 cObjs++;
6072 }
6073 RTSpinlockRelease(pDevExt->Spinlock);
6074 if (cObjs)
6075 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
6076 }
6077
6078 /* call termination function if fully loaded. */
6079 if ( pImage->pfnModuleTerm
6080 && pImage->uState == SUP_IOCTL_LDR_LOAD)
6081 {
6082 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
6083 pDevExt->hLdrTermThread = RTThreadNativeSelf();
6084 pImage->pfnModuleTerm(pImage);
6085 pDevExt->hLdrTermThread = NIL_RTNATIVETHREAD;
6086 }
6087
6088 /* Inform the tracing component. */
6089 supdrvTracerModuleUnloading(pDevExt, pImage);
6090
6091 /* Do native unload if appropriate, then inform the native code about the
6092 unloading (mainly for non-native loading case). */
6093 if (pImage->fNative)
6094 supdrvOSLdrUnload(pDevExt, pImage);
6095 supdrvOSLdrNotifyUnloaded(pDevExt, pImage);
6096
6097 /* free the image */
6098 pImage->uMagic = SUPDRVLDRIMAGE_MAGIC_DEAD;
6099 pImage->cUsage = 0;
6100 pImage->pDevExt = NULL;
6101 pImage->pNext = NULL;
6102 pImage->uState = SUP_IOCTL_LDR_FREE;
6103#ifdef SUPDRV_USE_MEMOBJ_FOR_LDR_IMAGE
6104 RTR0MemObjFree(pImage->hMemObjImage, true /*fMappings*/);
6105 pImage->hMemObjImage = NIL_RTR0MEMOBJ;
6106#else
6107 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
6108 pImage->pvImageAlloc = NULL;
6109#endif
6110 pImage->pvImage = NULL;
6111 RTMemFree(pImage->pachStrTab);
6112 pImage->pachStrTab = NULL;
6113 RTMemFree(pImage->paSymbols);
6114 pImage->paSymbols = NULL;
6115 RTMemFree(pImage->paSegments);
6116 pImage->paSegments = NULL;
6117
6118 pImageImport = pImage->pImageImport;
6119 pImage->pImageImport = NULL;
6120
6121 RTMemFree(pImage);
6122
6123 /*
6124 * Deal with any import image.
6125 */
6126 if (!pImageImport)
6127 break;
6128 if (pImageImport->cUsage > 1)
6129 {
6130 pImageImport->cUsage--;
6131 break;
6132 }
6133 pImage = pImageImport;
6134 }
6135}
6136
6137
6138/**
6139 * Acquires the loader lock.
6140 *
6141 * @returns IPRT status code.
6142 * @param pDevExt The device extension.
6143 * @note Not recursive on all platforms yet.
6144 */
6145DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
6146{
6147#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6148 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
6149#else
6150 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
6151#endif
6152 AssertRC(rc);
6153 return rc;
6154}
6155
6156
6157/**
6158 * Releases the loader lock.
6159 *
6160 * @returns IPRT status code.
6161 * @param pDevExt The device extension.
6162 */
6163DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
6164{
6165#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6166 return RTSemMutexRelease(pDevExt->mtxLdr);
6167#else
6168 return RTSemFastMutexRelease(pDevExt->mtxLdr);
6169#endif
6170}
6171
6172
6173/**
6174 * Acquires the global loader lock.
6175 *
6176 * This can be useful when accessing structures being modified by the ModuleInit
6177 * and ModuleTerm. Use SUPR0LdrUnlock() to unlock.
6178 *
6179 * @returns VBox status code.
6180 * @param pSession The session doing the locking.
6181 *
6182 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6183 */
6184SUPR0DECL(int) SUPR0LdrLock(PSUPDRVSESSION pSession)
6185{
6186 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6187 return supdrvLdrLock(pSession->pDevExt);
6188}
6189
6190
6191/**
6192 * Releases the global loader lock.
6193 *
6194 * Must correspond to a SUPR0LdrLock call!
6195 *
6196 * @returns VBox status code.
6197 * @param pSession The session doing the locking.
6198 *
6199 * @note Cannot be used during ModuleInit or ModuleTerm callbacks.
6200 */
6201SUPR0DECL(int) SUPR0LdrUnlock(PSUPDRVSESSION pSession)
6202{
6203 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6204 return supdrvLdrUnlock(pSession->pDevExt);
6205}
6206
6207
6208/**
6209 * For checking lock ownership in Assert() statements during ModuleInit and
6210 * ModuleTerm.
6211 *
6212 * @returns Whether we own the loader lock or not.
6213 * @param hMod The module in question.
6214 * @param fWantToHear For hosts where it is difficult to know who owns the
6215 * lock, this will be returned instead.
6216 */
6217SUPR0DECL(bool) SUPR0LdrIsLockOwnerByMod(void *hMod, bool fWantToHear)
6218{
6219 PSUPDRVDEVEXT pDevExt;
6220 RTNATIVETHREAD hOwner;
6221
6222 PSUPDRVLDRIMAGE pImage = (PSUPDRVLDRIMAGE)hMod;
6223 AssertPtrReturn(pImage, fWantToHear);
6224 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, fWantToHear);
6225
6226 pDevExt = pImage->pDevExt;
6227 AssertPtrReturn(pDevExt, fWantToHear);
6228
6229 /*
6230 * Expecting this to be called at init/term time only, so this will be sufficient.
6231 */
6232 hOwner = pDevExt->hLdrInitThread;
6233 if (hOwner == NIL_RTNATIVETHREAD)
6234 hOwner = pDevExt->hLdrTermThread;
6235 if (hOwner != NIL_RTNATIVETHREAD)
6236 return hOwner == RTThreadNativeSelf();
6237
6238 /*
6239 * Neither of the two semaphore variants currently offers very good
6240 * introspection, so we wing it for now. This API is VBOX_STRICT only.
6241 */
6242#ifdef SUPDRV_USE_MUTEX_FOR_LDR
6243 return RTSemMutexIsOwned(pDevExt->mtxLdr) && fWantToHear;
6244#else
6245 return fWantToHear;
6246#endif
6247}
6248
6249
6250/**
6251 * Locates and retains the given module for ring-0 usage.
6252 *
6253 * @returns VBox status code.
6254 * @param pSession The session to associate the module reference with.
6255 * @param pszName The module name (no path).
6256 * @param phMod Where to return the module handle. The module is
6257 * referenced and a call to SUPR0LdrModRelease() is
6258 * necessary when done with it.
6259 */
6260SUPR0DECL(int) SUPR0LdrModByName(PSUPDRVSESSION pSession, const char *pszName, void **phMod)
6261{
6262 int rc;
6263 size_t cchName;
6264 PSUPDRVDEVEXT pDevExt;
6265
6266 /*
6267 * Validate input.
6268 */
6269 AssertPtrReturn(phMod, VERR_INVALID_POINTER);
6270 *phMod = NULL;
6271 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6272 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
6273 cchName = strlen(pszName);
6274 AssertReturn(cchName > 0, VERR_EMPTY_STRING);
6275 AssertReturn(cchName < RT_SIZEOFMEMB(SUPDRVLDRIMAGE, szName), VERR_MODULE_NOT_FOUND);
6276
6277 /*
6278 * Do the lookup.
6279 */
6280 pDevExt = pSession->pDevExt;
6281 rc = supdrvLdrLock(pDevExt);
6282 if (RT_SUCCESS(rc))
6283 {
6284 PSUPDRVLDRIMAGE pImage;
6285 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
6286 {
6287 if ( pImage->szName[cchName] == '\0'
6288 && !memcmp(pImage->szName, pszName, cchName))
6289 {
6290 /*
6291 * Check the state and make sure we don't overflow the reference counter before return it.
6292 */
6293 uint32_t uState = pImage->uState;
6294 if (uState == SUP_IOCTL_LDR_LOAD)
6295 {
6296 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6297 {
6298 pImage->cUsage++;
6299 supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6300 *phMod = pImage;
6301 supdrvLdrUnlock(pDevExt);
6302 return VINF_SUCCESS;
6303 }
6304 supdrvLdrUnlock(pDevExt);
6305 Log(("SUPR0LdrModByName: Too many existing references to '%s'!\n", pszName));
6306 return VERR_TOO_MANY_REFERENCES;
6307 }
6308 supdrvLdrUnlock(pDevExt);
6309 Log(("SUPR0LdrModByName: Module '%s' is not in the loaded state (%d)!\n", pszName, uState));
6310 return VERR_INVALID_STATE;
6311 }
6312 }
6313 supdrvLdrUnlock(pDevExt);
6314 Log(("SUPR0LdrModByName: Module '%s' not found!\n", pszName));
6315 rc = VERR_MODULE_NOT_FOUND;
6316 }
6317 return rc;
6318}
6319
6320
6321/**
6322 * Retains a ring-0 module reference.
6323 *
6324 * Release reference when done by calling SUPR0LdrModRelease().
6325 *
6326 * @returns VBox status code.
6327 * @param pSession The session to reference the module in. A usage
6328 * record is added if needed.
6329 * @param hMod The handle to the module to retain.
6330 */
6331SUPR0DECL(int) SUPR0LdrModRetain(PSUPDRVSESSION pSession, void *hMod)
6332{
6333 PSUPDRVDEVEXT pDevExt;
6334 PSUPDRVLDRIMAGE pImage;
6335 int rc;
6336
6337 /* Validate input a little. */
6338 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6339 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6340 pImage = (PSUPDRVLDRIMAGE)hMod;
6341 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6342
6343 /* Reference the module: */
6344 pDevExt = pSession->pDevExt;
6345 rc = supdrvLdrLock(pDevExt);
6346 if (RT_SUCCESS(rc))
6347 {
6348 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6349 {
6350 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
6351 {
6352 rc = supdrvLdrAddUsage(pSession, pImage, false /*fRing3Usage*/);
6353 if (RT_SUCCESS(rc))
6354 {
6355 pImage->cUsage++;
6356 rc = VINF_SUCCESS;
6357 }
6358 }
6359 else
6360 AssertFailedStmt(rc = VERR_TOO_MANY_REFERENCES);
6361 }
6362 else
6363 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6364 supdrvLdrUnlock(pDevExt);
6365 }
6366 return rc;
6367}
6368
6369
6370/**
6371 * Releases a ring-0 module reference retained by SUPR0LdrModByName() or
6372 * SUPR0LdrModRetain().
6373 *
6374 * @returns VBox status code.
6375 * @param pSession The session that the module was retained in.
6376 * @param hMod The module handle. NULL is silently ignored.
6377 */
6378SUPR0DECL(int) SUPR0LdrModRelease(PSUPDRVSESSION pSession, void *hMod)
6379{
6380 PSUPDRVDEVEXT pDevExt;
6381 PSUPDRVLDRIMAGE pImage;
6382 int rc;
6383
6384 /*
6385 * Validate input.
6386 */
6387 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
6388 if (!hMod)
6389 return VINF_SUCCESS;
6390 AssertPtrReturn(hMod, VERR_INVALID_HANDLE);
6391 pImage = (PSUPDRVLDRIMAGE)hMod;
6392 AssertReturn(pImage->uMagic == SUPDRVLDRIMAGE_MAGIC, VERR_INVALID_HANDLE);
6393
6394 /*
6395 * Take the loader lock and revalidate the module:
6396 */
6397 pDevExt = pSession->pDevExt;
6398 rc = supdrvLdrLock(pDevExt);
6399 if (RT_SUCCESS(rc))
6400 {
6401 if (pImage->uMagic == SUPDRVLDRIMAGE_MAGIC)
6402 {
6403 /*
6404 * Find the usage record for the module:
6405 */
6406 PSUPDRVLDRUSAGE pPrevUsage = NULL;
6407 PSUPDRVLDRUSAGE pUsage;
6408
6409 rc = VERR_MODULE_NOT_FOUND;
6410 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6411 {
6412 if (pUsage->pImage == pImage)
6413 {
6414 /*
6415 * Drop a ring-0 reference:
6416 */
6417 Assert(pImage->cUsage >= pUsage->cRing0Usage + pUsage->cRing3Usage);
6418 if (pUsage->cRing0Usage > 0)
6419 {
6420 if (pImage->cUsage > 1)
6421 {
6422 pImage->cUsage -= 1;
6423 pUsage->cRing0Usage -= 1;
6424 rc = VINF_SUCCESS;
6425 }
6426 else
6427 {
6428 supdrvLdrFree(pDevExt, pImage);
6429
6430 if (pPrevUsage)
6431 pPrevUsage->pNext = pUsage->pNext;
6432 else
6433 pSession->pLdrUsage = pUsage->pNext;
6434 pUsage->pNext = NULL;
6435 pUsage->pImage = NULL;
6436 pUsage->cRing0Usage = 0;
6437 pUsage->cRing3Usage = 0;
6438 RTMemFree(pUsage);
6439
6440 rc = VINF_OBJECT_DESTROYED;
6441 }
6442 }
6443 else
6444 AssertFailedStmt(rc = VERR_CALLER_NO_REFERENCE);
6445 break;
6446 }
6447 pPrevUsage = pUsage;
6448 }
6449 }
6450 else
6451 AssertFailedStmt(rc = VERR_INVALID_HANDLE);
6452 supdrvLdrUnlock(pDevExt);
6453 }
6454 return rc;
6455
6456}
6457
6458
6459/**
6460 * Implements the service call request.
6461 *
6462 * @returns VBox status code.
6463 * @param pDevExt The device extension.
6464 * @param pSession The calling session.
6465 * @param pReq The request packet, valid.
6466 */
6467static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
6468{
6469#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
6470 int rc;
6471
6472 /*
6473 * Find the module first in the module referenced by the calling session.
6474 */
6475 rc = supdrvLdrLock(pDevExt);
6476 if (RT_SUCCESS(rc))
6477 {
6478 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
6479 PSUPDRVLDRUSAGE pUsage;
6480
6481 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
6482 if ( pUsage->pImage->pfnServiceReqHandler
6483 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
6484 {
6485 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
6486 break;
6487 }
6488 supdrvLdrUnlock(pDevExt);
6489
6490 if (pfnServiceReqHandler)
6491 {
6492 /*
6493 * Call it.
6494 */
6495 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
6496 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
6497 else
6498 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
6499 }
6500 else
6501 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
6502 }
6503
6504 /* log it */
6505 if ( RT_FAILURE(rc)
6506 && rc != VERR_INTERRUPTED
6507 && rc != VERR_TIMEOUT)
6508 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6509 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6510 else
6511 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
6512 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
6513 return rc;
6514#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6515 RT_NOREF3(pDevExt, pSession, pReq);
6516 return VERR_NOT_IMPLEMENTED;
6517#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
6518}
6519
6520
6521/**
6522 * Implements the logger settings request.
6523 *
6524 * @returns VBox status code.
6525 * @param pReq The request.
6526 */
6527static int supdrvIOCtl_LoggerSettings(PSUPLOGGERSETTINGS pReq)
6528{
6529 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
6530 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
6531 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
6532 PRTLOGGER pLogger = NULL;
6533 int rc;
6534
6535 /*
6536 * Some further validation.
6537 */
6538 switch (pReq->u.In.fWhat)
6539 {
6540 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6541 case SUPLOGGERSETTINGS_WHAT_CREATE:
6542 break;
6543
6544 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6545 if (*pszGroup || *pszFlags || *pszDest)
6546 return VERR_INVALID_PARAMETER;
6547 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
6548 return VERR_ACCESS_DENIED;
6549 break;
6550
6551 default:
6552 return VERR_INTERNAL_ERROR;
6553 }
6554
6555 /*
6556 * Get the logger.
6557 */
6558 switch (pReq->u.In.fWhich)
6559 {
6560 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6561 pLogger = RTLogGetDefaultInstance();
6562 break;
6563
6564 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6565 pLogger = RTLogRelGetDefaultInstance();
6566 break;
6567
6568 default:
6569 return VERR_INTERNAL_ERROR;
6570 }
6571
6572 /*
6573 * Do the job.
6574 */
6575 switch (pReq->u.In.fWhat)
6576 {
6577 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
6578 if (pLogger)
6579 {
6580 rc = RTLogFlags(pLogger, pszFlags);
6581 if (RT_SUCCESS(rc))
6582 rc = RTLogGroupSettings(pLogger, pszGroup);
6583 NOREF(pszDest);
6584 }
6585 else
6586 rc = VERR_NOT_FOUND;
6587 break;
6588
6589 case SUPLOGGERSETTINGS_WHAT_CREATE:
6590 {
6591 if (pLogger)
6592 rc = VERR_ALREADY_EXISTS;
6593 else
6594 {
6595 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
6596
6597 rc = RTLogCreate(&pLogger,
6598 0 /* fFlags */,
6599 pszGroup,
6600 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
6601 ? "VBOX_LOG"
6602 : "VBOX_RELEASE_LOG",
6603 RT_ELEMENTS(s_apszGroups),
6604 s_apszGroups,
6605 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
6606 NULL);
6607 if (RT_SUCCESS(rc))
6608 {
6609 rc = RTLogFlags(pLogger, pszFlags);
6610 NOREF(pszDest);
6611 if (RT_SUCCESS(rc))
6612 {
6613 switch (pReq->u.In.fWhich)
6614 {
6615 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6616 pLogger = RTLogSetDefaultInstance(pLogger);
6617 break;
6618 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6619 pLogger = RTLogRelSetDefaultInstance(pLogger);
6620 break;
6621 }
6622 }
6623 RTLogDestroy(pLogger);
6624 }
6625 }
6626 break;
6627 }
6628
6629 case SUPLOGGERSETTINGS_WHAT_DESTROY:
6630 switch (pReq->u.In.fWhich)
6631 {
6632 case SUPLOGGERSETTINGS_WHICH_DEBUG:
6633 pLogger = RTLogSetDefaultInstance(NULL);
6634 break;
6635 case SUPLOGGERSETTINGS_WHICH_RELEASE:
6636 pLogger = RTLogRelSetDefaultInstance(NULL);
6637 break;
6638 }
6639 rc = RTLogDestroy(pLogger);
6640 break;
6641
6642 default:
6643 {
6644 rc = VERR_INTERNAL_ERROR;
6645 break;
6646 }
6647 }
6648
6649 return rc;
6650}
6651
6652
6653/**
6654 * Implements the MSR prober operations.
6655 *
6656 * @returns VBox status code.
6657 * @param pDevExt The device extension.
6658 * @param pReq The request.
6659 */
6660static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
6661{
6662#ifdef SUPDRV_WITH_MSR_PROBER
6663 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
6664 int rc;
6665
6666 switch (pReq->u.In.enmOp)
6667 {
6668 case SUPMSRPROBEROP_READ:
6669 {
6670 uint64_t uValue;
6671 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
6672 if (RT_SUCCESS(rc))
6673 {
6674 pReq->u.Out.uResults.Read.uValue = uValue;
6675 pReq->u.Out.uResults.Read.fGp = false;
6676 }
6677 else if (rc == VERR_ACCESS_DENIED)
6678 {
6679 pReq->u.Out.uResults.Read.uValue = 0;
6680 pReq->u.Out.uResults.Read.fGp = true;
6681 rc = VINF_SUCCESS;
6682 }
6683 break;
6684 }
6685
6686 case SUPMSRPROBEROP_WRITE:
6687 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
6688 if (RT_SUCCESS(rc))
6689 pReq->u.Out.uResults.Write.fGp = false;
6690 else if (rc == VERR_ACCESS_DENIED)
6691 {
6692 pReq->u.Out.uResults.Write.fGp = true;
6693 rc = VINF_SUCCESS;
6694 }
6695 break;
6696
6697 case SUPMSRPROBEROP_MODIFY:
6698 case SUPMSRPROBEROP_MODIFY_FASTER:
6699 rc = supdrvOSMsrProberModify(idCpu, pReq);
6700 break;
6701
6702 default:
6703 return VERR_INVALID_FUNCTION;
6704 }
6705 RT_NOREF1(pDevExt);
6706 return rc;
6707#else
6708 RT_NOREF2(pDevExt, pReq);
6709 return VERR_NOT_IMPLEMENTED;
6710#endif
6711}
6712
6713
6714/**
6715 * Resume built-in keyboard on MacBook Air and Pro hosts.
6716 * If there is no built-in keyboard device, return success anyway.
6717 *
6718 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
6719 */
6720static int supdrvIOCtl_ResumeSuspendedKbds(void)
6721{
6722#if defined(RT_OS_DARWIN)
6723 return supdrvDarwinResumeSuspendedKbds();
6724#else
6725 return VERR_NOT_IMPLEMENTED;
6726#endif
6727}
6728
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette