VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp@ 57735

Last change on this file since 57735 was 57735, checked in by vboxsync, 9 years ago

SUPDrv.cpp: Fixed SUP_IOCTL_LDR_LOAD regression from the szError info return change. When loading very small ELF images, like 32-bit tstRTR0MemUserKernel.r0, the total input size may end up being smaller than the return size and thereby the total request size. Thus the first cbIn validation fails as it checks it agains sizeof(SUPLDRLOAD) instead of a minimal image. The whole purpose of that test is to check whether cbImageWithTabs can safely be accessed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 215.6 KB
Line 
1/* $Id: SUPDrv.cpp 57735 2015-09-14 12:40:08Z vboxsync $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define LOG_GROUP LOG_GROUP_SUP_DRV
32#define SUPDRV_AGNOSTIC
33#include "SUPDrvInternal.h"
34#ifndef PAGE_SHIFT
35# include <iprt/param.h>
36#endif
37#include <iprt/asm.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/asm-math.h>
40#include <iprt/cpuset.h>
41#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
42# include <iprt/dbg.h>
43#endif
44#include <iprt/handletable.h>
45#include <iprt/mem.h>
46#include <iprt/mp.h>
47#include <iprt/power.h>
48#include <iprt/process.h>
49#include <iprt/semaphore.h>
50#include <iprt/spinlock.h>
51#include <iprt/thread.h>
52#include <iprt/uuid.h>
53#include <iprt/net.h>
54#include <iprt/crc.h>
55#include <iprt/string.h>
56#include <iprt/timer.h>
57#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
58# include <iprt/rand.h>
59# include <iprt/path.h>
60#endif
61#include <iprt/uint128.h>
62#include <iprt/x86.h>
63
64#include <VBox/param.h>
65#include <VBox/log.h>
66#include <VBox/err.h>
67#include <VBox/vmm/hm_svm.h>
68#include <VBox/vmm/hm_vmx.h>
69
70#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
71# include "dtrace/SUPDrv.h"
72#else
73# define VBOXDRV_SESSION_CREATE(pvSession, fUser) do { } while (0)
74# define VBOXDRV_SESSION_CLOSE(pvSession) do { } while (0)
75# define VBOXDRV_IOCTL_ENTRY(pvSession, uIOCtl, pvReqHdr) do { } while (0)
76# define VBOXDRV_IOCTL_RETURN(pvSession, uIOCtl, pvReqHdr, rcRet, rcReq) do { } while (0)
77#endif
78
79/*
80 * Logging assignments:
81 * Log - useful stuff, like failures.
82 * LogFlow - program flow, except the really noisy bits.
83 * Log2 - Cleanup.
84 * Log3 - Loader flow noise.
85 * Log4 - Call VMMR0 flow noise.
86 * Log5 - Native yet-to-be-defined noise.
87 * Log6 - Native ioctl flow noise.
88 *
89 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
90 * instantiation in log-vbox.c(pp).
91 */
92
93
94/*********************************************************************************************************************************
95* Defined Constants And Macros *
96*********************************************************************************************************************************/
97/** @def VBOX_SVN_REV
98 * The makefile should define this if it can. */
99#ifndef VBOX_SVN_REV
100# define VBOX_SVN_REV 0
101#endif
102
103/** @ SUPDRV_CHECK_SMAP_SETUP
104 * SMAP check setup. */
105/** @def SUPDRV_CHECK_SMAP_CHECK
106 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it
107 * will be logged and @a a_BadExpr is executed. */
108#if defined(RT_OS_DARWIN) || defined(RT_OS_LINUX)
109# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
110# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) \
111 do { \
112 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
113 { \
114 RTCCUINTREG fEfl = ASMGetFlags(); \
115 if (RT_LIKELY(fEfl & X86_EFL_AC)) \
116 { /* likely */ } \
117 else \
118 { \
119 supdrvBadContext(a_pDevExt, "SUPDrv.cpp", __LINE__, "EFLAGS.AC is 0!"); \
120 a_BadExpr; \
121 } \
122 } \
123 } while (0)
124#else
125# define SUPDRV_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
126# define SUPDRV_CHECK_SMAP_CHECK(a_pDevExt, a_BadExpr) NOREF(fKernelFeatures)
127#endif
128
129
130/*********************************************************************************************************************************
131* Internal Functions *
132*********************************************************************************************************************************/
133static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
134static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
135static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
136static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
137static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
138static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
139static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
140static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt);
141static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
142static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
143static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
144static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
145static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
146static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
147DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
148DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
149static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
150static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq);
151static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq);
152static int supdrvIOCtl_ResumeSuspendedKbds(void);
153
154
155/*********************************************************************************************************************************
156* Global Variables *
157*********************************************************************************************************************************/
158/**
159 * Array of the R0 SUP API.
160 *
161 * While making changes to these exports, make sure to update the IOC
162 * minor version (SUPDRV_IOC_VERSION).
163 *
164 * @remarks This array is processed by SUPR0-def-pe.sed and SUPR0-def-lx.sed to
165 * produce definition files from which import libraries are generated.
166 * Take care when commenting things and especially with \#ifdef'ing.
167 */
168static SUPFUNC g_aFunctions[] =
169{
170/* SED: START */
171 /* name function */
172 /* Entries with absolute addresses determined at runtime, fixup
173 code makes ugly ASSUMPTIONS about the order here: */
174 { "SUPR0AbsIs64bit", (void *)0 },
175 { "SUPR0Abs64bitKernelCS", (void *)0 },
176 { "SUPR0Abs64bitKernelSS", (void *)0 },
177 { "SUPR0Abs64bitKernelDS", (void *)0 },
178 { "SUPR0AbsKernelCS", (void *)0 },
179 { "SUPR0AbsKernelSS", (void *)0 },
180 { "SUPR0AbsKernelDS", (void *)0 },
181 { "SUPR0AbsKernelES", (void *)0 },
182 { "SUPR0AbsKernelFS", (void *)0 },
183 { "SUPR0AbsKernelGS", (void *)0 },
184 /* Normal function pointers: */
185 { "g_pSUPGlobalInfoPage", (void *)&g_pSUPGlobalInfoPage }, /* SED: DATA */
186 { "SUPGetGIP", (void *)SUPGetGIP },
187 { "SUPReadTscWithDelta", (void *)SUPReadTscWithDelta },
188 { "SUPGetTscDeltaSlow", (void *)SUPGetTscDeltaSlow },
189 { "SUPGetCpuHzFromGipForAsyncMode", (void *)SUPGetCpuHzFromGipForAsyncMode },
190 { "SUPIsTscFreqCompatible", (void *)SUPIsTscFreqCompatible },
191 { "SUPIsTscFreqCompatibleEx", (void *)SUPIsTscFreqCompatibleEx },
192 { "SUPR0BadContext", (void *)SUPR0BadContext },
193 { "SUPR0ComponentDeregisterFactory", (void *)SUPR0ComponentDeregisterFactory },
194 { "SUPR0ComponentQueryFactory", (void *)SUPR0ComponentQueryFactory },
195 { "SUPR0ComponentRegisterFactory", (void *)SUPR0ComponentRegisterFactory },
196 { "SUPR0ContAlloc", (void *)SUPR0ContAlloc },
197 { "SUPR0ContFree", (void *)SUPR0ContFree },
198 { "SUPR0ChangeCR4", (void *)SUPR0ChangeCR4 },
199 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
200 { "SUPR0SuspendVTxOnCpu", (void *)SUPR0SuspendVTxOnCpu },
201 { "SUPR0ResumeVTxOnCpu", (void *)SUPR0ResumeVTxOnCpu },
202 { "SUPR0GetKernelFeatures", (void *)SUPR0GetKernelFeatures },
203 { "SUPR0GetPagingMode", (void *)SUPR0GetPagingMode },
204 { "SUPR0GetSvmUsability", (void *)SUPR0GetSvmUsability },
205 { "SUPR0GetVmxUsability", (void *)SUPR0GetVmxUsability },
206 { "SUPR0LockMem", (void *)SUPR0LockMem },
207 { "SUPR0LowAlloc", (void *)SUPR0LowAlloc },
208 { "SUPR0LowFree", (void *)SUPR0LowFree },
209 { "SUPR0MemAlloc", (void *)SUPR0MemAlloc },
210 { "SUPR0MemFree", (void *)SUPR0MemFree },
211 { "SUPR0MemGetPhys", (void *)SUPR0MemGetPhys },
212 { "SUPR0ObjAddRef", (void *)SUPR0ObjAddRef },
213 { "SUPR0ObjAddRefEx", (void *)SUPR0ObjAddRefEx },
214 { "SUPR0ObjRegister", (void *)SUPR0ObjRegister },
215 { "SUPR0ObjRelease", (void *)SUPR0ObjRelease },
216 { "SUPR0ObjVerifyAccess", (void *)SUPR0ObjVerifyAccess },
217 { "SUPR0PageAllocEx", (void *)SUPR0PageAllocEx },
218 { "SUPR0PageFree", (void *)SUPR0PageFree },
219 { "SUPR0Printf", (void *)SUPR0Printf },
220 { "SUPR0TscDeltaMeasureBySetIndex", (void *)SUPR0TscDeltaMeasureBySetIndex },
221 { "SUPR0TracerDeregisterDrv", (void *)SUPR0TracerDeregisterDrv },
222 { "SUPR0TracerDeregisterImpl", (void *)SUPR0TracerDeregisterImpl },
223 { "SUPR0TracerFireProbe", (void *)SUPR0TracerFireProbe },
224 { "SUPR0TracerRegisterDrv", (void *)SUPR0TracerRegisterDrv },
225 { "SUPR0TracerRegisterImpl", (void *)SUPR0TracerRegisterImpl },
226 { "SUPR0TracerRegisterModule", (void *)SUPR0TracerRegisterModule },
227 { "SUPR0TracerUmodProbeFire", (void *)SUPR0TracerUmodProbeFire },
228 { "SUPR0UnlockMem", (void *)SUPR0UnlockMem },
229 { "SUPSemEventClose", (void *)SUPSemEventClose },
230 { "SUPSemEventCreate", (void *)SUPSemEventCreate },
231 { "SUPSemEventGetResolution", (void *)SUPSemEventGetResolution },
232 { "SUPSemEventMultiClose", (void *)SUPSemEventMultiClose },
233 { "SUPSemEventMultiCreate", (void *)SUPSemEventMultiCreate },
234 { "SUPSemEventMultiGetResolution", (void *)SUPSemEventMultiGetResolution },
235 { "SUPSemEventMultiReset", (void *)SUPSemEventMultiReset },
236 { "SUPSemEventMultiSignal", (void *)SUPSemEventMultiSignal },
237 { "SUPSemEventMultiWait", (void *)SUPSemEventMultiWait },
238 { "SUPSemEventMultiWaitNoResume", (void *)SUPSemEventMultiWaitNoResume },
239 { "SUPSemEventMultiWaitNsAbsIntr", (void *)SUPSemEventMultiWaitNsAbsIntr },
240 { "SUPSemEventMultiWaitNsRelIntr", (void *)SUPSemEventMultiWaitNsRelIntr },
241 { "SUPSemEventSignal", (void *)SUPSemEventSignal },
242 { "SUPSemEventWait", (void *)SUPSemEventWait },
243 { "SUPSemEventWaitNoResume", (void *)SUPSemEventWaitNoResume },
244 { "SUPSemEventWaitNsAbsIntr", (void *)SUPSemEventWaitNsAbsIntr },
245 { "SUPSemEventWaitNsRelIntr", (void *)SUPSemEventWaitNsRelIntr },
246
247 { "RTAssertAreQuiet", (void *)RTAssertAreQuiet },
248 { "RTAssertMayPanic", (void *)RTAssertMayPanic },
249 { "RTAssertMsg1", (void *)RTAssertMsg1 },
250 { "RTAssertMsg2AddV", (void *)RTAssertMsg2AddV },
251 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
252 { "RTAssertSetMayPanic", (void *)RTAssertSetMayPanic },
253 { "RTAssertSetQuiet", (void *)RTAssertSetQuiet },
254 { "RTCrc32", (void *)RTCrc32 },
255 { "RTCrc32Finish", (void *)RTCrc32Finish },
256 { "RTCrc32Process", (void *)RTCrc32Process },
257 { "RTCrc32Start", (void *)RTCrc32Start },
258 { "RTErrConvertFromErrno", (void *)RTErrConvertFromErrno },
259 { "RTErrConvertToErrno", (void *)RTErrConvertToErrno },
260 { "RTHandleTableAllocWithCtx", (void *)RTHandleTableAllocWithCtx },
261 { "RTHandleTableCreate", (void *)RTHandleTableCreate },
262 { "RTHandleTableCreateEx", (void *)RTHandleTableCreateEx },
263 { "RTHandleTableDestroy", (void *)RTHandleTableDestroy },
264 { "RTHandleTableFreeWithCtx", (void *)RTHandleTableFreeWithCtx },
265 { "RTHandleTableLookupWithCtx", (void *)RTHandleTableLookupWithCtx },
266 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
267 { "RTLogDefaultInstanceEx", (void *)RTLogDefaultInstanceEx },
268 { "RTLogGetDefaultInstance", (void *)RTLogGetDefaultInstance },
269 { "RTLogGetDefaultInstanceEx", (void *)RTLogGetDefaultInstanceEx },
270 { "RTLogLoggerExV", (void *)RTLogLoggerExV },
271 { "RTLogPrintfV", (void *)RTLogPrintfV },
272 { "RTLogRelGetDefaultInstance", (void *)RTLogRelGetDefaultInstance },
273 { "RTLogRelGetDefaultInstanceEx", (void *)RTLogRelGetDefaultInstanceEx },
274 { "RTLogSetDefaultInstanceThread", (void *)RTLogSetDefaultInstanceThread },
275 { "RTMemAllocExTag", (void *)RTMemAllocExTag },
276 { "RTMemAllocTag", (void *)RTMemAllocTag },
277 { "RTMemAllocVarTag", (void *)RTMemAllocVarTag },
278 { "RTMemAllocZTag", (void *)RTMemAllocZTag },
279 { "RTMemAllocZVarTag", (void *)RTMemAllocZVarTag },
280 { "RTMemDupExTag", (void *)RTMemDupExTag },
281 { "RTMemDupTag", (void *)RTMemDupTag },
282 { "RTMemFree", (void *)RTMemFree },
283 { "RTMemFreeEx", (void *)RTMemFreeEx },
284 { "RTMemReallocTag", (void *)RTMemReallocTag },
285 { "RTMpCpuId", (void *)RTMpCpuId },
286 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
287 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
288 { "RTMpCurSetIndex", (void *)RTMpCurSetIndex },
289 { "RTMpCurSetIndexAndId", (void *)RTMpCurSetIndexAndId },
290 { "RTMpGetArraySize", (void *)RTMpGetArraySize },
291 { "RTMpGetCount", (void *)RTMpGetCount },
292 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
293 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
294 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
295 { "RTMpGetSet", (void *)RTMpGetSet },
296 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
297 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
298 { "RTMpIsCpuWorkPending", (void *)RTMpIsCpuWorkPending },
299 { "RTMpNotificationDeregister", (void *)RTMpNotificationDeregister },
300 { "RTMpNotificationRegister", (void *)RTMpNotificationRegister },
301 { "RTMpOnAll", (void *)RTMpOnAll },
302 { "RTMpOnOthers", (void *)RTMpOnOthers },
303 { "RTMpOnSpecific", (void *)RTMpOnSpecific },
304 { "RTMpPokeCpu", (void *)RTMpPokeCpu },
305 { "RTNetIPv4AddDataChecksum", (void *)RTNetIPv4AddDataChecksum },
306 { "RTNetIPv4AddTCPChecksum", (void *)RTNetIPv4AddTCPChecksum },
307 { "RTNetIPv4AddUDPChecksum", (void *)RTNetIPv4AddUDPChecksum },
308 { "RTNetIPv4FinalizeChecksum", (void *)RTNetIPv4FinalizeChecksum },
309 { "RTNetIPv4HdrChecksum", (void *)RTNetIPv4HdrChecksum },
310 { "RTNetIPv4IsDHCPValid", (void *)RTNetIPv4IsDHCPValid },
311 { "RTNetIPv4IsHdrValid", (void *)RTNetIPv4IsHdrValid },
312 { "RTNetIPv4IsTCPSizeValid", (void *)RTNetIPv4IsTCPSizeValid },
313 { "RTNetIPv4IsTCPValid", (void *)RTNetIPv4IsTCPValid },
314 { "RTNetIPv4IsUDPSizeValid", (void *)RTNetIPv4IsUDPSizeValid },
315 { "RTNetIPv4IsUDPValid", (void *)RTNetIPv4IsUDPValid },
316 { "RTNetIPv4PseudoChecksum", (void *)RTNetIPv4PseudoChecksum },
317 { "RTNetIPv4PseudoChecksumBits", (void *)RTNetIPv4PseudoChecksumBits },
318 { "RTNetIPv4TCPChecksum", (void *)RTNetIPv4TCPChecksum },
319 { "RTNetIPv4UDPChecksum", (void *)RTNetIPv4UDPChecksum },
320 { "RTNetIPv6PseudoChecksum", (void *)RTNetIPv6PseudoChecksum },
321 { "RTNetIPv6PseudoChecksumBits", (void *)RTNetIPv6PseudoChecksumBits },
322 { "RTNetIPv6PseudoChecksumEx", (void *)RTNetIPv6PseudoChecksumEx },
323 { "RTNetTCPChecksum", (void *)RTNetTCPChecksum },
324 { "RTNetUDPChecksum", (void *)RTNetUDPChecksum },
325 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
326 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
327 { "RTProcSelf", (void *)RTProcSelf },
328 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
329#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
330 { "RTR0DbgKrnlInfoOpen", (void *)RTR0DbgKrnlInfoOpen }, /* only-darwin, only-solaris */
331 { "RTR0DbgKrnlInfoQueryMember", (void *)RTR0DbgKrnlInfoQueryMember }, /* only-darwin, only-solaris */
332 { "RTR0DbgKrnlInfoQuerySymbol", (void *)RTR0DbgKrnlInfoQuerySymbol }, /* only-darwin, only-solaris */
333 { "RTR0DbgKrnlInfoRelease", (void *)RTR0DbgKrnlInfoRelease }, /* only-darwin, only-solaris */
334 { "RTR0DbgKrnlInfoRetain", (void *)RTR0DbgKrnlInfoRetain }, /* only-darwin, only-solaris */
335#endif
336 { "RTR0MemAreKrnlAndUsrDifferent", (void *)RTR0MemAreKrnlAndUsrDifferent },
337 { "RTR0MemKernelIsValidAddr", (void *)RTR0MemKernelIsValidAddr },
338 { "RTR0MemKernelCopyFrom", (void *)RTR0MemKernelCopyFrom },
339 { "RTR0MemKernelCopyTo", (void *)RTR0MemKernelCopyTo },
340 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
341 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
342 { "RTR0MemObjAllocContTag", (void *)RTR0MemObjAllocContTag },
343 { "RTR0MemObjAllocLowTag", (void *)RTR0MemObjAllocLowTag },
344 { "RTR0MemObjAllocPageTag", (void *)RTR0MemObjAllocPageTag },
345 { "RTR0MemObjAllocPhysExTag", (void *)RTR0MemObjAllocPhysExTag },
346 { "RTR0MemObjAllocPhysNCTag", (void *)RTR0MemObjAllocPhysNCTag },
347 { "RTR0MemObjAllocPhysTag", (void *)RTR0MemObjAllocPhysTag },
348 { "RTR0MemObjEnterPhysTag", (void *)RTR0MemObjEnterPhysTag },
349 { "RTR0MemObjFree", (void *)RTR0MemObjFree },
350 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
351 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
352 { "RTR0MemObjLockUserTag", (void *)RTR0MemObjLockUserTag },
353 { "RTR0MemObjMapKernelExTag", (void *)RTR0MemObjMapKernelExTag },
354 { "RTR0MemObjMapKernelTag", (void *)RTR0MemObjMapKernelTag },
355 { "RTR0MemObjMapUserTag", (void *)RTR0MemObjMapUserTag },
356 { "RTR0MemObjProtect", (void *)RTR0MemObjProtect },
357 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
358 { "RTR0MemUserCopyFrom", (void *)RTR0MemUserCopyFrom },
359 { "RTR0MemUserCopyTo", (void *)RTR0MemUserCopyTo },
360 { "RTR0MemUserIsValidAddr", (void *)RTR0MemUserIsValidAddr },
361 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
362 { "RTSemEventCreate", (void *)RTSemEventCreate },
363 { "RTSemEventDestroy", (void *)RTSemEventDestroy },
364 { "RTSemEventGetResolution", (void *)RTSemEventGetResolution },
365 { "RTSemEventMultiCreate", (void *)RTSemEventMultiCreate },
366 { "RTSemEventMultiDestroy", (void *)RTSemEventMultiDestroy },
367 { "RTSemEventMultiGetResolution", (void *)RTSemEventMultiGetResolution },
368 { "RTSemEventMultiReset", (void *)RTSemEventMultiReset },
369 { "RTSemEventMultiSignal", (void *)RTSemEventMultiSignal },
370 { "RTSemEventMultiWait", (void *)RTSemEventMultiWait },
371 { "RTSemEventMultiWaitEx", (void *)RTSemEventMultiWaitEx },
372 { "RTSemEventMultiWaitExDebug", (void *)RTSemEventMultiWaitExDebug },
373 { "RTSemEventMultiWaitNoResume", (void *)RTSemEventMultiWaitNoResume },
374 { "RTSemEventSignal", (void *)RTSemEventSignal },
375 { "RTSemEventWait", (void *)RTSemEventWait },
376 { "RTSemEventWaitEx", (void *)RTSemEventWaitEx },
377 { "RTSemEventWaitExDebug", (void *)RTSemEventWaitExDebug },
378 { "RTSemEventWaitNoResume", (void *)RTSemEventWaitNoResume },
379 { "RTSemFastMutexCreate", (void *)RTSemFastMutexCreate },
380 { "RTSemFastMutexDestroy", (void *)RTSemFastMutexDestroy },
381 { "RTSemFastMutexRelease", (void *)RTSemFastMutexRelease },
382 { "RTSemFastMutexRequest", (void *)RTSemFastMutexRequest },
383 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
384 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
385 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
386 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
387 { "RTSemMutexRequestDebug", (void *)RTSemMutexRequestDebug },
388 { "RTSemMutexRequestNoResume", (void *)RTSemMutexRequestNoResume },
389 { "RTSemMutexRequestNoResumeDebug", (void *)RTSemMutexRequestNoResumeDebug },
390 { "RTSpinlockAcquire", (void *)RTSpinlockAcquire },
391 { "RTSpinlockCreate", (void *)RTSpinlockCreate },
392 { "RTSpinlockDestroy", (void *)RTSpinlockDestroy },
393 { "RTSpinlockRelease", (void *)RTSpinlockRelease },
394 { "RTStrCopy", (void *)RTStrCopy },
395 { "RTStrDupTag", (void *)RTStrDupTag },
396 { "RTStrFormat", (void *)RTStrFormat },
397 { "RTStrFormatNumber", (void *)RTStrFormatNumber },
398 { "RTStrFormatTypeDeregister", (void *)RTStrFormatTypeDeregister },
399 { "RTStrFormatTypeRegister", (void *)RTStrFormatTypeRegister },
400 { "RTStrFormatTypeSetUser", (void *)RTStrFormatTypeSetUser },
401 { "RTStrFormatV", (void *)RTStrFormatV },
402 { "RTStrFree", (void *)RTStrFree },
403 { "RTStrNCmp", (void *)RTStrNCmp },
404 { "RTStrPrintf", (void *)RTStrPrintf },
405 { "RTStrPrintfEx", (void *)RTStrPrintfEx },
406 { "RTStrPrintfExV", (void *)RTStrPrintfExV },
407 { "RTStrPrintfV", (void *)RTStrPrintfV },
408 { "RTThreadCreate", (void *)RTThreadCreate },
409 { "RTThreadCtxHookIsEnabled", (void *)RTThreadCtxHookIsEnabled },
410 { "RTThreadCtxHookCreate", (void *)RTThreadCtxHookCreate },
411 { "RTThreadCtxHookDestroy", (void *)RTThreadCtxHookDestroy },
412 { "RTThreadCtxHookDisable", (void *)RTThreadCtxHookDisable },
413 { "RTThreadCtxHookEnable", (void *)RTThreadCtxHookEnable },
414 { "RTThreadGetName", (void *)RTThreadGetName },
415 { "RTThreadGetNative", (void *)RTThreadGetNative },
416 { "RTThreadGetType", (void *)RTThreadGetType },
417 { "RTThreadIsInInterrupt", (void *)RTThreadIsInInterrupt },
418 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
419 { "RTThreadPreemptDisable", (void *)RTThreadPreemptDisable },
420 { "RTThreadPreemptIsEnabled", (void *)RTThreadPreemptIsEnabled },
421 { "RTThreadPreemptIsPending", (void *)RTThreadPreemptIsPending },
422 { "RTThreadPreemptIsPendingTrusty", (void *)RTThreadPreemptIsPendingTrusty },
423 { "RTThreadPreemptIsPossible", (void *)RTThreadPreemptIsPossible },
424 { "RTThreadPreemptRestore", (void *)RTThreadPreemptRestore },
425 { "RTThreadSelf", (void *)RTThreadSelf },
426 { "RTThreadSelfName", (void *)RTThreadSelfName },
427 { "RTThreadSleep", (void *)RTThreadSleep },
428 { "RTThreadUserReset", (void *)RTThreadUserReset },
429 { "RTThreadUserSignal", (void *)RTThreadUserSignal },
430 { "RTThreadUserWait", (void *)RTThreadUserWait },
431 { "RTThreadUserWaitNoResume", (void *)RTThreadUserWaitNoResume },
432 { "RTThreadWait", (void *)RTThreadWait },
433 { "RTThreadWaitNoResume", (void *)RTThreadWaitNoResume },
434 { "RTThreadYield", (void *)RTThreadYield },
435 { "RTTimeMilliTS", (void *)RTTimeMilliTS },
436 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
437 { "RTTimeNow", (void *)RTTimeNow },
438 { "RTTimerCanDoHighResolution", (void *)RTTimerCanDoHighResolution },
439 { "RTTimerChangeInterval", (void *)RTTimerChangeInterval },
440 { "RTTimerCreate", (void *)RTTimerCreate },
441 { "RTTimerCreateEx", (void *)RTTimerCreateEx },
442 { "RTTimerDestroy", (void *)RTTimerDestroy },
443 { "RTTimerGetSystemGranularity", (void *)RTTimerGetSystemGranularity },
444 { "RTTimerReleaseSystemGranularity", (void *)RTTimerReleaseSystemGranularity },
445 { "RTTimerRequestSystemGranularity", (void *)RTTimerRequestSystemGranularity },
446 { "RTTimerStart", (void *)RTTimerStart },
447 { "RTTimerStop", (void *)RTTimerStop },
448 { "RTTimeSystemMilliTS", (void *)RTTimeSystemMilliTS },
449 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
450 { "RTUuidCompare", (void *)RTUuidCompare },
451 { "RTUuidCompareStr", (void *)RTUuidCompareStr },
452 { "RTUuidFromStr", (void *)RTUuidFromStr },
453/* SED: END */
454};
455
456#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
457/**
458 * Drag in the rest of IRPT since we share it with the
459 * rest of the kernel modules on darwin.
460 */
461PFNRT g_apfnVBoxDrvIPRTDeps[] =
462{
463 /* VBoxNetAdp */
464 (PFNRT)RTRandBytes,
465 /* VBoxUSB */
466 (PFNRT)RTPathStripFilename,
467 NULL
468};
469#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_SOLARIS */
470
471
472/**
473 * Initializes the device extentsion structure.
474 *
475 * @returns IPRT status code.
476 * @param pDevExt The device extension to initialize.
477 * @param cbSession The size of the session structure. The size of
478 * SUPDRVSESSION may be smaller when SUPDRV_AGNOSTIC is
479 * defined because we're skipping the OS specific members
480 * then.
481 */
482int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt, size_t cbSession)
483{
484 int rc;
485
486#ifdef SUPDRV_WITH_RELEASE_LOGGER
487 /*
488 * Create the release log.
489 */
490 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
491 PRTLOGGER pRelLogger;
492 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
493 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
494 if (RT_SUCCESS(rc))
495 RTLogRelSetDefaultInstance(pRelLogger);
496 /** @todo Add native hook for getting logger config parameters and setting
497 * them. On linux we should use the module parameter stuff... */
498#endif
499
500 /*
501 * Initialize it.
502 */
503 memset(pDevExt, 0, sizeof(*pDevExt)); /* Does not wipe OS specific tail section of the structure. */
504 pDevExt->Spinlock = NIL_RTSPINLOCK;
505 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
506 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
507#ifdef SUPDRV_USE_MUTEX_FOR_LDR
508 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
509#else
510 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
511#endif
512#ifdef SUPDRV_USE_MUTEX_FOR_GIP
513 pDevExt->mtxGip = NIL_RTSEMMUTEX;
514 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
515#else
516 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
517 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
518#endif
519
520 rc = RTSpinlockCreate(&pDevExt->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvDevExt");
521 if (RT_SUCCESS(rc))
522 rc = RTSpinlockCreate(&pDevExt->hGipSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvGip");
523 if (RT_SUCCESS(rc))
524 rc = RTSpinlockCreate(&pDevExt->hSessionHashTabSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "SUPDrvSession");
525
526 if (RT_SUCCESS(rc))
527#ifdef SUPDRV_USE_MUTEX_FOR_LDR
528 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
529#else
530 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
531#endif
532 if (RT_SUCCESS(rc))
533#ifdef SUPDRV_USE_MUTEX_FOR_GIP
534 rc = RTSemMutexCreate(&pDevExt->mtxTscDelta);
535#else
536 rc = RTSemFastMutexCreate(&pDevExt->mtxTscDelta);
537#endif
538 if (RT_SUCCESS(rc))
539 {
540 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
541 if (RT_SUCCESS(rc))
542 {
543#ifdef SUPDRV_USE_MUTEX_FOR_GIP
544 rc = RTSemMutexCreate(&pDevExt->mtxGip);
545#else
546 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
547#endif
548 if (RT_SUCCESS(rc))
549 {
550 rc = supdrvGipCreate(pDevExt);
551 if (RT_SUCCESS(rc))
552 {
553 rc = supdrvTracerInit(pDevExt);
554 if (RT_SUCCESS(rc))
555 {
556 pDevExt->pLdrInitImage = NULL;
557 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
558 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
559 pDevExt->cbSession = (uint32_t)cbSession;
560
561 /*
562 * Fixup the absolute symbols.
563 *
564 * Because of the table indexing assumptions we'll have a little #ifdef orgy
565 * here rather than distributing this to OS specific files. At least for now.
566 */
567#ifdef RT_OS_DARWIN
568# if ARCH_BITS == 32
569 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
570 {
571 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
572 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
573 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
574 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
575 }
576 else
577 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
578 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
579 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
580 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
581 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
582 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
583 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
584# else /* 64-bit darwin: */
585 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
586 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
587 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
588 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
589 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
590 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
591 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
592 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
593 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
594 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
595
596# endif
597#else /* !RT_OS_DARWIN */
598# if ARCH_BITS == 64
599 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
600 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
601 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
602 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
603# else
604 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
605# endif
606 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
607 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
608 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
609 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
610 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
611 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
612#endif /* !RT_OS_DARWIN */
613 return VINF_SUCCESS;
614 }
615
616 supdrvGipDestroy(pDevExt);
617 }
618
619#ifdef SUPDRV_USE_MUTEX_FOR_GIP
620 RTSemMutexDestroy(pDevExt->mtxGip);
621 pDevExt->mtxGip = NIL_RTSEMMUTEX;
622#else
623 RTSemFastMutexDestroy(pDevExt->mtxGip);
624 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
625#endif
626 }
627 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
628 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
629 }
630 }
631
632#ifdef SUPDRV_USE_MUTEX_FOR_GIP
633 RTSemMutexDestroy(pDevExt->mtxTscDelta);
634 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
635#else
636 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
637 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
638#endif
639#ifdef SUPDRV_USE_MUTEX_FOR_LDR
640 RTSemMutexDestroy(pDevExt->mtxLdr);
641 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
642#else
643 RTSemFastMutexDestroy(pDevExt->mtxLdr);
644 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
645#endif
646 RTSpinlockDestroy(pDevExt->Spinlock);
647 pDevExt->Spinlock = NIL_RTSPINLOCK;
648 RTSpinlockDestroy(pDevExt->hGipSpinlock);
649 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
650 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
651 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
652
653#ifdef SUPDRV_WITH_RELEASE_LOGGER
654 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
655 RTLogDestroy(RTLogSetDefaultInstance(NULL));
656#endif
657
658 return rc;
659}
660
661
662/**
663 * Delete the device extension (e.g. cleanup members).
664 *
665 * @param pDevExt The device extension to delete.
666 */
667void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
668{
669 PSUPDRVOBJ pObj;
670 PSUPDRVUSAGE pUsage;
671
672 /*
673 * Kill mutexes and spinlocks.
674 */
675#ifdef SUPDRV_USE_MUTEX_FOR_GIP
676 RTSemMutexDestroy(pDevExt->mtxGip);
677 pDevExt->mtxGip = NIL_RTSEMMUTEX;
678 RTSemMutexDestroy(pDevExt->mtxTscDelta);
679 pDevExt->mtxTscDelta = NIL_RTSEMMUTEX;
680#else
681 RTSemFastMutexDestroy(pDevExt->mtxGip);
682 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
683 RTSemFastMutexDestroy(pDevExt->mtxTscDelta);
684 pDevExt->mtxTscDelta = NIL_RTSEMFASTMUTEX;
685#endif
686#ifdef SUPDRV_USE_MUTEX_FOR_LDR
687 RTSemMutexDestroy(pDevExt->mtxLdr);
688 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
689#else
690 RTSemFastMutexDestroy(pDevExt->mtxLdr);
691 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
692#endif
693 RTSpinlockDestroy(pDevExt->Spinlock);
694 pDevExt->Spinlock = NIL_RTSPINLOCK;
695 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
696 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
697 RTSpinlockDestroy(pDevExt->hSessionHashTabSpinlock);
698 pDevExt->hSessionHashTabSpinlock = NIL_RTSPINLOCK;
699
700 /*
701 * Free lists.
702 */
703 /* objects. */
704 pObj = pDevExt->pObjs;
705 Assert(!pObj); /* (can trigger on forced unloads) */
706 pDevExt->pObjs = NULL;
707 while (pObj)
708 {
709 void *pvFree = pObj;
710 pObj = pObj->pNext;
711 RTMemFree(pvFree);
712 }
713
714 /* usage records. */
715 pUsage = pDevExt->pUsageFree;
716 pDevExt->pUsageFree = NULL;
717 while (pUsage)
718 {
719 void *pvFree = pUsage;
720 pUsage = pUsage->pNext;
721 RTMemFree(pvFree);
722 }
723
724 /* kill the GIP. */
725 supdrvGipDestroy(pDevExt);
726 RTSpinlockDestroy(pDevExt->hGipSpinlock);
727 pDevExt->hGipSpinlock = NIL_RTSPINLOCK;
728
729 supdrvTracerTerm(pDevExt);
730
731#ifdef SUPDRV_WITH_RELEASE_LOGGER
732 /* destroy the loggers. */
733 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
734 RTLogDestroy(RTLogSetDefaultInstance(NULL));
735#endif
736}
737
738
739/**
740 * Create session.
741 *
742 * @returns IPRT status code.
743 * @param pDevExt Device extension.
744 * @param fUser Flag indicating whether this is a user or kernel
745 * session.
746 * @param fUnrestricted Unrestricted access (system) or restricted access
747 * (user)?
748 * @param ppSession Where to store the pointer to the session data.
749 */
750int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, bool fUnrestricted, PSUPDRVSESSION *ppSession)
751{
752 int rc;
753 PSUPDRVSESSION pSession;
754
755 if (!SUP_IS_DEVEXT_VALID(pDevExt))
756 return VERR_INVALID_PARAMETER;
757
758 /*
759 * Allocate memory for the session data.
760 */
761 pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(pDevExt->cbSession);
762 if (pSession)
763 {
764 /* Initialize session data. */
765 rc = RTSpinlockCreate(&pSession->Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "SUPDrvSession");
766 if (!rc)
767 {
768 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
769 RTHANDLETABLE_FLAGS_LOCKED_IRQ_SAFE | RTHANDLETABLE_FLAGS_CONTEXT,
770 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
771 if (RT_SUCCESS(rc))
772 {
773 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
774 pSession->pDevExt = pDevExt;
775 pSession->u32Cookie = BIRD_INV;
776 pSession->fUnrestricted = fUnrestricted;
777 /*pSession->fInHashTable = false; */
778 pSession->cRefs = 1;
779 /*pSession->pCommonNextHash = NULL;
780 pSession->ppOsSessionPtr = NULL; */
781 if (fUser)
782 {
783 pSession->Process = RTProcSelf();
784 pSession->R0Process = RTR0ProcHandleSelf();
785 }
786 else
787 {
788 pSession->Process = NIL_RTPROCESS;
789 pSession->R0Process = NIL_RTR0PROCESS;
790 }
791 /*pSession->pLdrUsage = NULL;
792 pSession->pVM = NULL;
793 pSession->pUsage = NULL;
794 pSession->pGip = NULL;
795 pSession->fGipReferenced = false;
796 pSession->Bundle.cUsed = 0; */
797 pSession->Uid = NIL_RTUID;
798 pSession->Gid = NIL_RTGID;
799 /*pSession->uTracerData = 0;*/
800 pSession->hTracerCaller = NIL_RTNATIVETHREAD;
801 RTListInit(&pSession->TpProviders);
802 /*pSession->cTpProviders = 0;*/
803 /*pSession->cTpProbesFiring = 0;*/
804 RTListInit(&pSession->TpUmods);
805 /*RT_ZERO(pSession->apTpLookupTable);*/
806
807 VBOXDRV_SESSION_CREATE(pSession, fUser);
808 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
809 return VINF_SUCCESS;
810 }
811
812 RTSpinlockDestroy(pSession->Spinlock);
813 }
814 RTMemFree(pSession);
815 *ppSession = NULL;
816 Log(("Failed to create spinlock, rc=%d!\n", rc));
817 }
818 else
819 rc = VERR_NO_MEMORY;
820
821 return rc;
822}
823
824
825/**
826 * Cleans up the session in the context of the process to which it belongs, the
827 * caller will free the session and the session spinlock.
828 *
829 * This should normally occur when the session is closed or as the process
830 * exits. Careful reference counting in the OS specfic code makes sure that
831 * there cannot be any races between process/handle cleanup callbacks and
832 * threads doing I/O control calls.
833 *
834 * @param pDevExt The device extension.
835 * @param pSession Session data.
836 */
837static void supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
838{
839 int rc;
840 PSUPDRVBUNDLE pBundle;
841 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
842
843 Assert(!pSession->fInHashTable);
844 Assert(!pSession->ppOsSessionPtr);
845 AssertReleaseMsg(pSession->R0Process == RTR0ProcHandleSelf() || pSession->R0Process == NIL_RTR0PROCESS,
846 ("R0Process=%p cur=%p; Process=%u curpid=%u\n", RTR0ProcHandleSelf(), RTProcSelf()));
847
848 /*
849 * Remove logger instances related to this session.
850 */
851 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
852
853 /*
854 * Destroy the handle table.
855 */
856 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
857 AssertRC(rc);
858 pSession->hHandleTable = NIL_RTHANDLETABLE;
859
860 /*
861 * Release object references made in this session.
862 * In theory there should be noone racing us in this session.
863 */
864 Log2(("release objects - start\n"));
865 if (pSession->pUsage)
866 {
867 PSUPDRVUSAGE pUsage;
868 RTSpinlockAcquire(pDevExt->Spinlock);
869
870 while ((pUsage = pSession->pUsage) != NULL)
871 {
872 PSUPDRVOBJ pObj = pUsage->pObj;
873 pSession->pUsage = pUsage->pNext;
874
875 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
876 if (pUsage->cUsage < pObj->cUsage)
877 {
878 pObj->cUsage -= pUsage->cUsage;
879 RTSpinlockRelease(pDevExt->Spinlock);
880 }
881 else
882 {
883 /* Destroy the object and free the record. */
884 if (pDevExt->pObjs == pObj)
885 pDevExt->pObjs = pObj->pNext;
886 else
887 {
888 PSUPDRVOBJ pObjPrev;
889 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
890 if (pObjPrev->pNext == pObj)
891 {
892 pObjPrev->pNext = pObj->pNext;
893 break;
894 }
895 Assert(pObjPrev);
896 }
897 RTSpinlockRelease(pDevExt->Spinlock);
898
899 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
900 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
901 if (pObj->pfnDestructor)
902 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
903 RTMemFree(pObj);
904 }
905
906 /* free it and continue. */
907 RTMemFree(pUsage);
908
909 RTSpinlockAcquire(pDevExt->Spinlock);
910 }
911
912 RTSpinlockRelease(pDevExt->Spinlock);
913 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
914 }
915 Log2(("release objects - done\n"));
916
917 /*
918 * Do tracer cleanups related to this session.
919 */
920 Log2(("release tracer stuff - start\n"));
921 supdrvTracerCleanupSession(pDevExt, pSession);
922 Log2(("release tracer stuff - end\n"));
923
924 /*
925 * Release memory allocated in the session.
926 *
927 * We do not serialize this as we assume that the application will
928 * not allocated memory while closing the file handle object.
929 */
930 Log2(("freeing memory:\n"));
931 pBundle = &pSession->Bundle;
932 while (pBundle)
933 {
934 PSUPDRVBUNDLE pToFree;
935 unsigned i;
936
937 /*
938 * Check and unlock all entries in the bundle.
939 */
940 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
941 {
942 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
943 {
944 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
945 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
946 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
947 {
948 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
949 AssertRC(rc); /** @todo figure out how to handle this. */
950 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
951 }
952 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
953 AssertRC(rc); /** @todo figure out how to handle this. */
954 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
955 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
956 }
957 }
958
959 /*
960 * Advance and free previous bundle.
961 */
962 pToFree = pBundle;
963 pBundle = pBundle->pNext;
964
965 pToFree->pNext = NULL;
966 pToFree->cUsed = 0;
967 if (pToFree != &pSession->Bundle)
968 RTMemFree(pToFree);
969 }
970 Log2(("freeing memory - done\n"));
971
972 /*
973 * Deregister component factories.
974 */
975 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
976 Log2(("deregistering component factories:\n"));
977 if (pDevExt->pComponentFactoryHead)
978 {
979 PSUPDRVFACTORYREG pPrev = NULL;
980 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
981 while (pCur)
982 {
983 if (pCur->pSession == pSession)
984 {
985 /* unlink it */
986 PSUPDRVFACTORYREG pNext = pCur->pNext;
987 if (pPrev)
988 pPrev->pNext = pNext;
989 else
990 pDevExt->pComponentFactoryHead = pNext;
991
992 /* free it */
993 pCur->pNext = NULL;
994 pCur->pSession = NULL;
995 pCur->pFactory = NULL;
996 RTMemFree(pCur);
997
998 /* next */
999 pCur = pNext;
1000 }
1001 else
1002 {
1003 /* next */
1004 pPrev = pCur;
1005 pCur = pCur->pNext;
1006 }
1007 }
1008 }
1009 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
1010 Log2(("deregistering component factories - done\n"));
1011
1012 /*
1013 * Loaded images needs to be dereferenced and possibly freed up.
1014 */
1015 supdrvLdrLock(pDevExt);
1016 Log2(("freeing images:\n"));
1017 if (pSession->pLdrUsage)
1018 {
1019 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
1020 pSession->pLdrUsage = NULL;
1021 while (pUsage)
1022 {
1023 void *pvFree = pUsage;
1024 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
1025 if (pImage->cUsage > pUsage->cUsage)
1026 pImage->cUsage -= pUsage->cUsage;
1027 else
1028 supdrvLdrFree(pDevExt, pImage);
1029 pUsage->pImage = NULL;
1030 pUsage = pUsage->pNext;
1031 RTMemFree(pvFree);
1032 }
1033 }
1034 supdrvLdrUnlock(pDevExt);
1035 Log2(("freeing images - done\n"));
1036
1037 /*
1038 * Unmap the GIP.
1039 */
1040 Log2(("umapping GIP:\n"));
1041 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
1042 {
1043 SUPR0GipUnmap(pSession);
1044 pSession->fGipReferenced = 0;
1045 }
1046 Log2(("umapping GIP - done\n"));
1047}
1048
1049
1050/**
1051 * Common code for freeing a session when the reference count reaches zero.
1052 *
1053 * @param pDevExt Device extension.
1054 * @param pSession Session data.
1055 * This data will be freed by this routine.
1056 */
1057static void supdrvDestroySession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1058{
1059 VBOXDRV_SESSION_CLOSE(pSession);
1060
1061 /*
1062 * Cleanup the session first.
1063 */
1064 supdrvCleanupSession(pDevExt, pSession);
1065 supdrvOSCleanupSession(pDevExt, pSession);
1066
1067 /*
1068 * Free the rest of the session stuff.
1069 */
1070 RTSpinlockDestroy(pSession->Spinlock);
1071 pSession->Spinlock = NIL_RTSPINLOCK;
1072 pSession->pDevExt = NULL;
1073 RTMemFree(pSession);
1074 LogFlow(("supdrvDestroySession: returns\n"));
1075}
1076
1077
1078/**
1079 * Inserts the session into the global hash table.
1080 *
1081 * @retval VINF_SUCCESS on success.
1082 * @retval VERR_WRONG_ORDER if the session was already inserted (asserted).
1083 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1084 * session (asserted).
1085 * @retval VERR_DUPLICATE if there is already a session for that pid.
1086 *
1087 * @param pDevExt The device extension.
1088 * @param pSession The session.
1089 * @param ppOsSessionPtr Pointer to the OS session pointer, if any is
1090 * available and used. This will set to point to the
1091 * session while under the protection of the session
1092 * hash table spinlock. It will also be kept in
1093 * PSUPDRVSESSION::ppOsSessionPtr for lookup and
1094 * cleanup use.
1095 * @param pvUser Argument for supdrvOSSessionHashTabInserted.
1096 */
1097int VBOXCALL supdrvSessionHashTabInsert(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVSESSION *ppOsSessionPtr,
1098 void *pvUser)
1099{
1100 PSUPDRVSESSION pCur;
1101 unsigned iHash;
1102
1103 /*
1104 * Validate input.
1105 */
1106 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1107 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1108
1109 /*
1110 * Calculate the hash table index and acquire the spinlock.
1111 */
1112 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1113
1114 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1115
1116 /*
1117 * If there are a collisions, we need to carefully check if we got a
1118 * duplicate. There can only be one open session per process.
1119 */
1120 pCur = pDevExt->apSessionHashTab[iHash];
1121 if (pCur)
1122 {
1123 while (pCur && pCur->Process != pSession->Process)
1124 pCur = pCur->pCommonNextHash;
1125
1126 if (pCur)
1127 {
1128 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1129 if (pCur == pSession)
1130 {
1131 Assert(pSession->fInHashTable);
1132 AssertFailed();
1133 return VERR_WRONG_ORDER;
1134 }
1135 Assert(!pSession->fInHashTable);
1136 if (pCur->R0Process == pSession->R0Process)
1137 return VERR_RESOURCE_IN_USE;
1138 return VERR_DUPLICATE;
1139 }
1140 }
1141 Assert(!pSession->fInHashTable);
1142 Assert(!pSession->ppOsSessionPtr);
1143
1144 /*
1145 * Insert it, doing a callout to the OS specific code in case it has
1146 * anything it wishes to do while we're holding the spinlock.
1147 */
1148 pSession->pCommonNextHash = pDevExt->apSessionHashTab[iHash];
1149 pDevExt->apSessionHashTab[iHash] = pSession;
1150 pSession->fInHashTable = true;
1151 ASMAtomicIncS32(&pDevExt->cSessions);
1152
1153 pSession->ppOsSessionPtr = ppOsSessionPtr;
1154 if (ppOsSessionPtr)
1155 ASMAtomicWritePtr(ppOsSessionPtr, pSession);
1156
1157 supdrvOSSessionHashTabInserted(pDevExt, pSession, pvUser);
1158
1159 /*
1160 * Retain a reference for the pointer in the session table.
1161 */
1162 ASMAtomicIncU32(&pSession->cRefs);
1163
1164 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1165 return VINF_SUCCESS;
1166}
1167
1168
1169/**
1170 * Removes the session from the global hash table.
1171 *
1172 * @retval VINF_SUCCESS on success.
1173 * @retval VERR_NOT_FOUND if the session was already removed (asserted).
1174 * @retval VERR_INVALID_PARAMETER if the session handle is invalid or a ring-0
1175 * session (asserted).
1176 *
1177 * @param pDevExt The device extension.
1178 * @param pSession The session. The caller is expected to have a reference
1179 * to this so it won't croak on us when we release the hash
1180 * table reference.
1181 * @param pvUser OS specific context value for the
1182 * supdrvOSSessionHashTabInserted callback.
1183 */
1184int VBOXCALL supdrvSessionHashTabRemove(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, void *pvUser)
1185{
1186 PSUPDRVSESSION pCur;
1187 unsigned iHash;
1188 int32_t cRefs;
1189
1190 /*
1191 * Validate input.
1192 */
1193 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1194 AssertReturn(pSession->R0Process != NIL_RTR0PROCESS, VERR_INVALID_PARAMETER);
1195
1196 /*
1197 * Calculate the hash table index and acquire the spinlock.
1198 */
1199 iHash = SUPDRV_SESSION_HASH(pSession->Process);
1200
1201 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1202
1203 /*
1204 * Unlink it.
1205 */
1206 pCur = pDevExt->apSessionHashTab[iHash];
1207 if (pCur == pSession)
1208 pDevExt->apSessionHashTab[iHash] = pSession->pCommonNextHash;
1209 else
1210 {
1211 PSUPDRVSESSION pPrev = pCur;
1212 while (pCur && pCur != pSession)
1213 {
1214 pPrev = pCur;
1215 pCur = pCur->pCommonNextHash;
1216 }
1217 if (pCur)
1218 pPrev->pCommonNextHash = pCur->pCommonNextHash;
1219 else
1220 {
1221 Assert(!pSession->fInHashTable);
1222 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1223 return VERR_NOT_FOUND;
1224 }
1225 }
1226
1227 pSession->pCommonNextHash = NULL;
1228 pSession->fInHashTable = false;
1229
1230 ASMAtomicDecS32(&pDevExt->cSessions);
1231
1232 /*
1233 * Clear OS specific session pointer if available and do the OS callback.
1234 */
1235 if (pSession->ppOsSessionPtr)
1236 {
1237 ASMAtomicCmpXchgPtr(pSession->ppOsSessionPtr, NULL, pSession);
1238 pSession->ppOsSessionPtr = NULL;
1239 }
1240
1241 supdrvOSSessionHashTabRemoved(pDevExt, pSession, pvUser);
1242
1243 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1244
1245 /*
1246 * Drop the reference the hash table had to the session. This shouldn't
1247 * be the last reference!
1248 */
1249 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1250 Assert(cRefs > 0 && cRefs < _1M);
1251 if (cRefs == 0)
1252 supdrvDestroySession(pDevExt, pSession);
1253
1254 return VINF_SUCCESS;
1255}
1256
1257
1258/**
1259 * Looks up the session for the current process in the global hash table or in
1260 * OS specific pointer.
1261 *
1262 * @returns Pointer to the session with a reference that the caller must
1263 * release. If no valid session was found, NULL is returned.
1264 *
1265 * @param pDevExt The device extension.
1266 * @param Process The process ID.
1267 * @param R0Process The ring-0 process handle.
1268 * @param ppOsSessionPtr The OS session pointer if available. If not NULL,
1269 * this is used instead of the hash table. For
1270 * additional safety it must then be equal to the
1271 * SUPDRVSESSION::ppOsSessionPtr member.
1272 * This can be NULL even if the OS has a session
1273 * pointer.
1274 */
1275PSUPDRVSESSION VBOXCALL supdrvSessionHashTabLookup(PSUPDRVDEVEXT pDevExt, RTPROCESS Process, RTR0PROCESS R0Process,
1276 PSUPDRVSESSION *ppOsSessionPtr)
1277{
1278 PSUPDRVSESSION pCur;
1279 unsigned iHash;
1280
1281 /*
1282 * Validate input.
1283 */
1284 AssertReturn(R0Process != NIL_RTR0PROCESS, NULL);
1285
1286 /*
1287 * Calculate the hash table index and acquire the spinlock.
1288 */
1289 iHash = SUPDRV_SESSION_HASH(Process);
1290
1291 RTSpinlockAcquire(pDevExt->hSessionHashTabSpinlock);
1292
1293 /*
1294 * If an OS session pointer is provided, always use it.
1295 */
1296 if (ppOsSessionPtr)
1297 {
1298 pCur = *ppOsSessionPtr;
1299 if ( pCur
1300 && ( pCur->ppOsSessionPtr != ppOsSessionPtr
1301 || pCur->Process != Process
1302 || pCur->R0Process != R0Process) )
1303 pCur = NULL;
1304 }
1305 else
1306 {
1307 /*
1308 * Otherwise, do the hash table lookup.
1309 */
1310 pCur = pDevExt->apSessionHashTab[iHash];
1311 while ( pCur
1312 && ( pCur->Process != Process
1313 || pCur->R0Process != R0Process) )
1314 pCur = pCur->pCommonNextHash;
1315 }
1316
1317 /*
1318 * Retain the session.
1319 */
1320 if (pCur)
1321 {
1322 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
1323 NOREF(cRefs);
1324 Assert(cRefs > 1 && cRefs < _1M);
1325 }
1326
1327 RTSpinlockRelease(pDevExt->hSessionHashTabSpinlock);
1328
1329 return pCur;
1330}
1331
1332
1333/**
1334 * Retain a session to make sure it doesn't go away while it is in use.
1335 *
1336 * @returns New reference count on success, UINT32_MAX on failure.
1337 * @param pSession Session data.
1338 */
1339uint32_t VBOXCALL supdrvSessionRetain(PSUPDRVSESSION pSession)
1340{
1341 uint32_t cRefs;
1342 AssertPtrReturn(pSession, UINT32_MAX);
1343 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1344
1345 cRefs = ASMAtomicIncU32(&pSession->cRefs);
1346 AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1347 return cRefs;
1348}
1349
1350
1351/**
1352 * Releases a given session.
1353 *
1354 * @returns New reference count on success (0 if closed), UINT32_MAX on failure.
1355 * @param pSession Session data.
1356 */
1357uint32_t VBOXCALL supdrvSessionRelease(PSUPDRVSESSION pSession)
1358{
1359 uint32_t cRefs;
1360 AssertPtrReturn(pSession, UINT32_MAX);
1361 AssertReturn(SUP_IS_SESSION_VALID(pSession), UINT32_MAX);
1362
1363 cRefs = ASMAtomicDecU32(&pSession->cRefs);
1364 AssertMsg(cRefs < _1M, ("%#x %p\n", cRefs, pSession));
1365 if (cRefs == 0)
1366 supdrvDestroySession(pSession->pDevExt, pSession);
1367 return cRefs;
1368}
1369
1370
1371/**
1372 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1373 *
1374 * @returns IPRT status code, see SUPR0ObjAddRef.
1375 * @param hHandleTable The handle table handle. Ignored.
1376 * @param pvObj The object pointer.
1377 * @param pvCtx Context, the handle type. Ignored.
1378 * @param pvUser Session pointer.
1379 */
1380static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
1381{
1382 NOREF(pvCtx);
1383 NOREF(hHandleTable);
1384 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
1385}
1386
1387
1388/**
1389 * RTHandleTableDestroy callback used by supdrvCleanupSession.
1390 *
1391 * @param hHandleTable The handle table handle. Ignored.
1392 * @param h The handle value. Ignored.
1393 * @param pvObj The object pointer.
1394 * @param pvCtx Context, the handle type. Ignored.
1395 * @param pvUser Session pointer.
1396 */
1397static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
1398{
1399 NOREF(pvCtx);
1400 NOREF(h);
1401 NOREF(hHandleTable);
1402 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
1403}
1404
1405
1406/**
1407 * Fast path I/O Control worker.
1408 *
1409 * @returns VBox status code that should be passed down to ring-3 unchanged.
1410 * @param uIOCtl Function number.
1411 * @param idCpu VMCPU id.
1412 * @param pDevExt Device extention.
1413 * @param pSession Session data.
1414 */
1415int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
1416{
1417 /*
1418 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
1419 */
1420 if (RT_LIKELY( RT_VALID_PTR(pSession)
1421 && pSession->pVM
1422 && pDevExt->pfnVMMR0EntryFast))
1423 {
1424 switch (uIOCtl)
1425 {
1426 case SUP_IOCTL_FAST_DO_RAW_RUN:
1427 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
1428 break;
1429 case SUP_IOCTL_FAST_DO_HM_RUN:
1430 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HM_RUN);
1431 break;
1432 case SUP_IOCTL_FAST_DO_NOP:
1433 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
1434 break;
1435 default:
1436 return VERR_INTERNAL_ERROR;
1437 }
1438 return VINF_SUCCESS;
1439 }
1440 return VERR_INTERNAL_ERROR;
1441}
1442
1443
1444/**
1445 * Helper for supdrvIOCtl used to validate module names passed to SUP_IOCTL_LDR_OPEN.
1446 *
1447 * Check if pszStr contains any character of pszChars. We would use strpbrk
1448 * here if this function would be contained in the RedHat kABI white list, see
1449 * http://www.kerneldrivers.org/RHEL5.
1450 *
1451 * @returns true if fine, false if not.
1452 * @param pszName The module name to check.
1453 */
1454static bool supdrvIsLdrModuleNameValid(const char *pszName)
1455{
1456 int chCur;
1457 while ((chCur = *pszName++) != '\0')
1458 {
1459 static const char s_szInvalidChars[] = ";:()[]{}/\\|&*%#@!~`\"'";
1460 unsigned offInv = RT_ELEMENTS(s_szInvalidChars);
1461 while (offInv-- > 0)
1462 if (s_szInvalidChars[offInv] == chCur)
1463 return false;
1464 }
1465 return true;
1466}
1467
1468
1469
1470/**
1471 * I/O Control inner worker (tracing reasons).
1472 *
1473 * @returns IPRT status code.
1474 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1475 *
1476 * @param uIOCtl Function number.
1477 * @param pDevExt Device extention.
1478 * @param pSession Session data.
1479 * @param pReqHdr The request header.
1480 */
1481static int supdrvIOCtlInnerUnrestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1482{
1483 /*
1484 * Validation macros
1485 */
1486#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1487 do { \
1488 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1489 { \
1490 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1491 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1492 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1493 } \
1494 } while (0)
1495
1496#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1497
1498#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1499 do { \
1500 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1501 { \
1502 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1503 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
1504 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1505 } \
1506 } while (0)
1507
1508#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1509 do { \
1510 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1511 { \
1512 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1513 (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
1514 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1515 } \
1516 } while (0)
1517
1518#define REQ_CHECK_EXPR(Name, expr) \
1519 do { \
1520 if (RT_UNLIKELY(!(expr))) \
1521 { \
1522 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1523 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1524 } \
1525 } while (0)
1526
1527#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1528 do { \
1529 if (RT_UNLIKELY(!(expr))) \
1530 { \
1531 OSDBGPRINT( fmt ); \
1532 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1533 } \
1534 } while (0)
1535
1536 /*
1537 * The switch.
1538 */
1539 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1540 {
1541 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1542 {
1543 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1544 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1545 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1546 {
1547 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1548 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1549 return 0;
1550 }
1551
1552#if 0
1553 /*
1554 * Call out to the OS specific code and let it do permission checks on the
1555 * client process.
1556 */
1557 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1558 {
1559 pReq->u.Out.u32Cookie = 0xffffffff;
1560 pReq->u.Out.u32SessionCookie = 0xffffffff;
1561 pReq->u.Out.u32SessionVersion = 0xffffffff;
1562 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1563 pReq->u.Out.pSession = NULL;
1564 pReq->u.Out.cFunctions = 0;
1565 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1566 return 0;
1567 }
1568#endif
1569
1570 /*
1571 * Match the version.
1572 * The current logic is very simple, match the major interface version.
1573 */
1574 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1575 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1576 {
1577 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1578 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1579 pReq->u.Out.u32Cookie = 0xffffffff;
1580 pReq->u.Out.u32SessionCookie = 0xffffffff;
1581 pReq->u.Out.u32SessionVersion = 0xffffffff;
1582 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1583 pReq->u.Out.pSession = NULL;
1584 pReq->u.Out.cFunctions = 0;
1585 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1586 return 0;
1587 }
1588
1589 /*
1590 * Fill in return data and be gone.
1591 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1592 * u32SessionVersion <= u32ReqVersion!
1593 */
1594 /** @todo Somehow validate the client and negotiate a secure cookie... */
1595 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1596 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1597 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1598 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1599 pReq->u.Out.pSession = pSession;
1600 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1601 pReq->Hdr.rc = VINF_SUCCESS;
1602 return 0;
1603 }
1604
1605 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1606 {
1607 /* validate */
1608 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1609 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1610
1611 /* execute */
1612 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1613 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1614 pReq->Hdr.rc = VINF_SUCCESS;
1615 return 0;
1616 }
1617
1618 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1619 {
1620 /* validate */
1621 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1622 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1623 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1624 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1625 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1626
1627 /* execute */
1628 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1629 if (RT_FAILURE(pReq->Hdr.rc))
1630 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1631 return 0;
1632 }
1633
1634 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1635 {
1636 /* validate */
1637 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1638 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1639
1640 /* execute */
1641 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1642 return 0;
1643 }
1644
1645 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1646 {
1647 /* validate */
1648 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1649 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1650
1651 /* execute */
1652 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1653 if (RT_FAILURE(pReq->Hdr.rc))
1654 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1655 return 0;
1656 }
1657
1658 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1659 {
1660 /* validate */
1661 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1662 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1663
1664 /* execute */
1665 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1666 return 0;
1667 }
1668
1669 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1670 {
1671 /* validate */
1672 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1673 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1674 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs > 0);
1675 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs < 16*_1M);
1676 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1677 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1678 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithTabs);
1679 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1680 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1681 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, supdrvIsLdrModuleNameValid(pReq->u.In.szName));
1682 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, RTStrEnd(pReq->u.In.szFilename, sizeof(pReq->u.In.szFilename)));
1683
1684 /* execute */
1685 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1686 return 0;
1687 }
1688
1689 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1690 {
1691 /* validate */
1692 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1693 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= SUP_IOCTL_LDR_LOAD_SIZE_IN(32));
1694 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithTabs), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1695 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1696 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1697 || ( pReq->u.In.offSymbols < pReq->u.In.cbImageWithTabs
1698 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithTabs),
1699 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offSymbols,
1700 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithTabs));
1701 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1702 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithTabs
1703 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs
1704 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs),
1705 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offStrTab,
1706 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithTabs));
1707
1708 if (pReq->u.In.cSymbols)
1709 {
1710 uint32_t i;
1711 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.abImage[pReq->u.In.offSymbols];
1712 for (i = 0; i < pReq->u.In.cSymbols; i++)
1713 {
1714 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithTabs,
1715 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithTabs));
1716 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1717 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
1718 REQ_CHECK_EXPR_FMT(RTStrEnd((char const *)&pReq->u.In.abImage[pReq->u.In.offStrTab + paSyms[i].offName],
1719 pReq->u.In.cbStrTab - paSyms[i].offName),
1720 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
1721 }
1722 }
1723
1724 /* execute */
1725 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1726 return 0;
1727 }
1728
1729 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1730 {
1731 /* validate */
1732 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1733 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1734
1735 /* execute */
1736 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1737 return 0;
1738 }
1739
1740 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOCK_DOWN):
1741 {
1742 /* validate */
1743 REQ_CHECK_SIZES(SUP_IOCTL_LDR_LOCK_DOWN);
1744
1745 /* execute */
1746 pReqHdr->rc = supdrvIOCtl_LdrLockDown(pDevExt);
1747 return 0;
1748 }
1749
1750 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1751 {
1752 /* validate */
1753 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1754 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1755 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, RTStrEnd(pReq->u.In.szSymbol, sizeof(pReq->u.In.szSymbol)));
1756
1757 /* execute */
1758 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1759 return 0;
1760 }
1761
1762 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_NO_SIZE()):
1763 {
1764 /* validate */
1765 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1766 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1767 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1768
1769 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1770 {
1771 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1772
1773 /* execute */
1774 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1775 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1776 else
1777 pReq->Hdr.rc = VERR_WRONG_ORDER;
1778 }
1779 else
1780 {
1781 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1782 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1783 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1784 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1785 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1786
1787 /* execute */
1788 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1789 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1790 else
1791 pReq->Hdr.rc = VERR_WRONG_ORDER;
1792 }
1793
1794 if ( RT_FAILURE(pReq->Hdr.rc)
1795 && pReq->Hdr.rc != VERR_INTERRUPTED
1796 && pReq->Hdr.rc != VERR_TIMEOUT)
1797 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1798 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1799 else
1800 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1801 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1802 return 0;
1803 }
1804
1805 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0_BIG):
1806 {
1807 /* validate */
1808 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1809 PSUPVMMR0REQHDR pVMMReq;
1810 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1811 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1812
1813 pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1814 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR)),
1815 ("SUP_IOCTL_CALL_VMMR0_BIG: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_BIG_SIZE(sizeof(SUPVMMR0REQHDR))));
1816 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0_BIG, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1817 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0_BIG, SUP_IOCTL_CALL_VMMR0_BIG_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_BIG_SIZE_OUT(pVMMReq->cbReq));
1818
1819 /* execute */
1820 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1821 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1822 else
1823 pReq->Hdr.rc = VERR_WRONG_ORDER;
1824
1825 if ( RT_FAILURE(pReq->Hdr.rc)
1826 && pReq->Hdr.rc != VERR_INTERRUPTED
1827 && pReq->Hdr.rc != VERR_TIMEOUT)
1828 Log(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1829 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1830 else
1831 Log4(("SUP_IOCTL_CALL_VMMR0_BIG: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1832 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1833 return 0;
1834 }
1835
1836 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1837 {
1838 /* validate */
1839 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1840 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1841
1842 /* execute */
1843 pReq->Hdr.rc = VINF_SUCCESS;
1844 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1845 return 0;
1846 }
1847
1848 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1849 {
1850 /* validate */
1851 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1852 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1853 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1854
1855 /* execute */
1856 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1857 if (RT_FAILURE(pReq->Hdr.rc))
1858 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1859 return 0;
1860 }
1861
1862 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1863 {
1864 /* validate */
1865 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1866 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1867
1868 /* execute */
1869 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1870 return 0;
1871 }
1872
1873 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1874 {
1875 /* validate */
1876 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1877 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1878
1879 /* execute */
1880 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1881 if (RT_SUCCESS(pReq->Hdr.rc))
1882 pReq->u.Out.pGipR0 = pDevExt->pGip;
1883 return 0;
1884 }
1885
1886 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1887 {
1888 /* validate */
1889 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1890 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1891
1892 /* execute */
1893 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1894 return 0;
1895 }
1896
1897 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1898 {
1899 /* validate */
1900 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1901 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1902 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1903 || ( VALID_PTR(pReq->u.In.pVMR0)
1904 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1905 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1906 /* execute */
1907 pSession->pVM = pReq->u.In.pVMR0;
1908 pReq->Hdr.rc = VINF_SUCCESS;
1909 return 0;
1910 }
1911
1912 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1913 {
1914 /* validate */
1915 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1916 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1917 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1918 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1919 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1920 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1921 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1922 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1923 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1924
1925 /* execute */
1926 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1927 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1928 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1929 &pReq->u.Out.aPages[0]);
1930 if (RT_FAILURE(pReq->Hdr.rc))
1931 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1932 return 0;
1933 }
1934
1935 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1936 {
1937 /* validate */
1938 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1939 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1940 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1941 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1942 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1943 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1944
1945 /* execute */
1946 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1947 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1948 if (RT_FAILURE(pReq->Hdr.rc))
1949 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1950 return 0;
1951 }
1952
1953 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
1954 {
1955 /* validate */
1956 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
1957 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
1958 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
1959 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
1960 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
1961 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1962 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
1963
1964 /* execute */
1965 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
1966 return 0;
1967 }
1968
1969 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1970 {
1971 /* validate */
1972 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1973 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1974
1975 /* execute */
1976 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1977 return 0;
1978 }
1979
1980 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE_NO_SIZE()):
1981 {
1982 /* validate */
1983 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1984 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1985 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1986
1987 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1988 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1989 else
1990 {
1991 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1992 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1993 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1994 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1995 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1996 }
1997 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)));
1998
1999 /* execute */
2000 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
2001 return 0;
2002 }
2003
2004 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS_NO_SIZE()):
2005 {
2006 /* validate */
2007 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
2008 size_t cbStrTab;
2009 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
2010 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
2011 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
2012 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
2013 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
2014 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
2015 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
2016 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
2017 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
2018 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
2019 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
2020
2021 /* execute */
2022 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pDevExt, pSession, pReq);
2023 return 0;
2024 }
2025
2026 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP2):
2027 {
2028 /* validate */
2029 PSUPSEMOP2 pReq = (PSUPSEMOP2)pReqHdr;
2030 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP2, SUP_IOCTL_SEM_OP2_SIZE_IN, SUP_IOCTL_SEM_OP2_SIZE_OUT);
2031 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP2, pReq->u.In.uReserved == 0);
2032
2033 /* execute */
2034 switch (pReq->u.In.uType)
2035 {
2036 case SUP_SEM_TYPE_EVENT:
2037 {
2038 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2039 switch (pReq->u.In.uOp)
2040 {
2041 case SUPSEMOP2_WAIT_MS_REL:
2042 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.uArg.cRelMsTimeout);
2043 break;
2044 case SUPSEMOP2_WAIT_NS_ABS:
2045 pReq->Hdr.rc = SUPSemEventWaitNsAbsIntr(pSession, hEvent, pReq->u.In.uArg.uAbsNsTimeout);
2046 break;
2047 case SUPSEMOP2_WAIT_NS_REL:
2048 pReq->Hdr.rc = SUPSemEventWaitNsRelIntr(pSession, hEvent, pReq->u.In.uArg.cRelNsTimeout);
2049 break;
2050 case SUPSEMOP2_SIGNAL:
2051 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
2052 break;
2053 case SUPSEMOP2_CLOSE:
2054 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
2055 break;
2056 case SUPSEMOP2_RESET:
2057 default:
2058 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2059 break;
2060 }
2061 break;
2062 }
2063
2064 case SUP_SEM_TYPE_EVENT_MULTI:
2065 {
2066 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2067 switch (pReq->u.In.uOp)
2068 {
2069 case SUPSEMOP2_WAIT_MS_REL:
2070 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.uArg.cRelMsTimeout);
2071 break;
2072 case SUPSEMOP2_WAIT_NS_ABS:
2073 pReq->Hdr.rc = SUPSemEventMultiWaitNsAbsIntr(pSession, hEventMulti, pReq->u.In.uArg.uAbsNsTimeout);
2074 break;
2075 case SUPSEMOP2_WAIT_NS_REL:
2076 pReq->Hdr.rc = SUPSemEventMultiWaitNsRelIntr(pSession, hEventMulti, pReq->u.In.uArg.cRelNsTimeout);
2077 break;
2078 case SUPSEMOP2_SIGNAL:
2079 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
2080 break;
2081 case SUPSEMOP2_CLOSE:
2082 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
2083 break;
2084 case SUPSEMOP2_RESET:
2085 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
2086 break;
2087 default:
2088 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2089 break;
2090 }
2091 break;
2092 }
2093
2094 default:
2095 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2096 break;
2097 }
2098 return 0;
2099 }
2100
2101 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP3):
2102 {
2103 /* validate */
2104 PSUPSEMOP3 pReq = (PSUPSEMOP3)pReqHdr;
2105 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP3, SUP_IOCTL_SEM_OP3_SIZE_IN, SUP_IOCTL_SEM_OP3_SIZE_OUT);
2106 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, pReq->u.In.u32Reserved == 0 && pReq->u.In.u64Reserved == 0);
2107
2108 /* execute */
2109 switch (pReq->u.In.uType)
2110 {
2111 case SUP_SEM_TYPE_EVENT:
2112 {
2113 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
2114 switch (pReq->u.In.uOp)
2115 {
2116 case SUPSEMOP3_CREATE:
2117 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2118 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
2119 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
2120 break;
2121 case SUPSEMOP3_GET_RESOLUTION:
2122 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEvent == NIL_SUPSEMEVENT);
2123 pReq->Hdr.rc = VINF_SUCCESS;
2124 pReq->Hdr.cbOut = sizeof(*pReq);
2125 pReq->u.Out.cNsResolution = SUPSemEventGetResolution(pSession);
2126 break;
2127 default:
2128 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2129 break;
2130 }
2131 break;
2132 }
2133
2134 case SUP_SEM_TYPE_EVENT_MULTI:
2135 {
2136 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
2137 switch (pReq->u.In.uOp)
2138 {
2139 case SUPSEMOP3_CREATE:
2140 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2141 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
2142 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
2143 break;
2144 case SUPSEMOP3_GET_RESOLUTION:
2145 REQ_CHECK_EXPR(SUP_IOCTL_SEM_OP3, hEventMulti == NIL_SUPSEMEVENTMULTI);
2146 pReq->Hdr.rc = VINF_SUCCESS;
2147 pReq->u.Out.cNsResolution = SUPSemEventMultiGetResolution(pSession);
2148 break;
2149 default:
2150 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
2151 break;
2152 }
2153 break;
2154 }
2155
2156 default:
2157 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
2158 break;
2159 }
2160 return 0;
2161 }
2162
2163 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2164 {
2165 /* validate */
2166 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2167 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2168
2169 /* execute */
2170 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.Caps);
2171 if (RT_FAILURE(pReq->Hdr.rc))
2172 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2173 return 0;
2174 }
2175
2176 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_OPEN):
2177 {
2178 /* validate */
2179 PSUPTRACEROPEN pReq = (PSUPTRACEROPEN)pReqHdr;
2180 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_OPEN);
2181
2182 /* execute */
2183 pReq->Hdr.rc = supdrvIOCtl_TracerOpen(pDevExt, pSession, pReq->u.In.uCookie, pReq->u.In.uArg);
2184 return 0;
2185 }
2186
2187 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_CLOSE):
2188 {
2189 /* validate */
2190 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_CLOSE);
2191
2192 /* execute */
2193 pReqHdr->rc = supdrvIOCtl_TracerClose(pDevExt, pSession);
2194 return 0;
2195 }
2196
2197 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_IOCTL):
2198 {
2199 /* validate */
2200 PSUPTRACERIOCTL pReq = (PSUPTRACERIOCTL)pReqHdr;
2201 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_IOCTL);
2202
2203 /* execute */
2204 pReqHdr->rc = supdrvIOCtl_TracerIOCtl(pDevExt, pSession, pReq->u.In.uCmd, pReq->u.In.uArg, &pReq->u.Out.iRetVal);
2205 return 0;
2206 }
2207
2208 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_REG):
2209 {
2210 /* validate */
2211 PSUPTRACERUMODREG pReq = (PSUPTRACERUMODREG)pReqHdr;
2212 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_REG);
2213 if (!RTStrEnd(pReq->u.In.szName, sizeof(pReq->u.In.szName)))
2214 return VERR_INVALID_PARAMETER;
2215
2216 /* execute */
2217 pReqHdr->rc = supdrvIOCtl_TracerUmodRegister(pDevExt, pSession,
2218 pReq->u.In.R3PtrVtgHdr, pReq->u.In.uVtgHdrAddr,
2219 pReq->u.In.R3PtrStrTab, pReq->u.In.cbStrTab,
2220 pReq->u.In.szName, pReq->u.In.fFlags);
2221 return 0;
2222 }
2223
2224 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_DEREG):
2225 {
2226 /* validate */
2227 PSUPTRACERUMODDEREG pReq = (PSUPTRACERUMODDEREG)pReqHdr;
2228 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_DEREG);
2229
2230 /* execute */
2231 pReqHdr->rc = supdrvIOCtl_TracerUmodDeregister(pDevExt, pSession, pReq->u.In.pVtgHdr);
2232 return 0;
2233 }
2234
2235 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE):
2236 {
2237 /* validate */
2238 PSUPTRACERUMODFIREPROBE pReq = (PSUPTRACERUMODFIREPROBE)pReqHdr;
2239 REQ_CHECK_SIZES(SUP_IOCTL_TRACER_UMOD_FIRE_PROBE);
2240
2241 supdrvIOCtl_TracerUmodProbeFire(pDevExt, pSession, &pReq->u.In);
2242 pReqHdr->rc = VINF_SUCCESS;
2243 return 0;
2244 }
2245
2246 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_MSR_PROBER):
2247 {
2248 /* validate */
2249 PSUPMSRPROBER pReq = (PSUPMSRPROBER)pReqHdr;
2250 REQ_CHECK_SIZES(SUP_IOCTL_MSR_PROBER);
2251 REQ_CHECK_EXPR(SUP_IOCTL_MSR_PROBER,
2252 pReq->u.In.enmOp > SUPMSRPROBEROP_INVALID && pReq->u.In.enmOp < SUPMSRPROBEROP_END);
2253
2254 pReqHdr->rc = supdrvIOCtl_MsrProber(pDevExt, pReq);
2255 return 0;
2256 }
2257
2258 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_RESUME_SUSPENDED_KBDS):
2259 {
2260 /* validate */
2261 REQ_CHECK_SIZES(SUP_IOCTL_RESUME_SUSPENDED_KBDS);
2262
2263 pReqHdr->rc = supdrvIOCtl_ResumeSuspendedKbds();
2264 return 0;
2265 }
2266
2267 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_DELTA_MEASURE):
2268 {
2269 /* validate */
2270 PSUPTSCDELTAMEASURE pReq = (PSUPTSCDELTAMEASURE)pReqHdr;
2271 REQ_CHECK_SIZES(SUP_IOCTL_TSC_DELTA_MEASURE);
2272
2273 pReqHdr->rc = supdrvIOCtl_TscDeltaMeasure(pDevExt, pSession, pReq);
2274 return 0;
2275 }
2276
2277 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_TSC_READ):
2278 {
2279 /* validate */
2280 PSUPTSCREAD pReq = (PSUPTSCREAD)pReqHdr;
2281 REQ_CHECK_SIZES(SUP_IOCTL_TSC_READ);
2282
2283 pReqHdr->rc = supdrvIOCtl_TscRead(pDevExt, pSession, pReq);
2284 return 0;
2285 }
2286
2287 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_SET_FLAGS):
2288 {
2289 /* validate */
2290 PSUPGIPSETFLAGS pReq = (PSUPGIPSETFLAGS)pReqHdr;
2291 REQ_CHECK_SIZES(SUP_IOCTL_GIP_SET_FLAGS);
2292
2293 pReqHdr->rc = supdrvIOCtl_GipSetFlags(pDevExt, pSession, pReq->u.In.fOrMask, pReq->u.In.fAndMask);
2294 return 0;
2295 }
2296
2297 default:
2298 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2299 break;
2300 }
2301 return VERR_GENERAL_FAILURE;
2302}
2303
2304
2305/**
2306 * I/O Control inner worker for the restricted operations.
2307 *
2308 * @returns IPRT status code.
2309 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2310 *
2311 * @param uIOCtl Function number.
2312 * @param pDevExt Device extention.
2313 * @param pSession Session data.
2314 * @param pReqHdr The request header.
2315 */
2316static int supdrvIOCtlInnerRestricted(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
2317{
2318 /*
2319 * The switch.
2320 */
2321 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
2322 {
2323 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
2324 {
2325 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
2326 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
2327 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
2328 {
2329 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
2330 pReq->Hdr.rc = VERR_INVALID_MAGIC;
2331 return 0;
2332 }
2333
2334 /*
2335 * Match the version.
2336 * The current logic is very simple, match the major interface version.
2337 */
2338 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
2339 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
2340 {
2341 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2342 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
2343 pReq->u.Out.u32Cookie = 0xffffffff;
2344 pReq->u.Out.u32SessionCookie = 0xffffffff;
2345 pReq->u.Out.u32SessionVersion = 0xffffffff;
2346 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2347 pReq->u.Out.pSession = NULL;
2348 pReq->u.Out.cFunctions = 0;
2349 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2350 return 0;
2351 }
2352
2353 /*
2354 * Fill in return data and be gone.
2355 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
2356 * u32SessionVersion <= u32ReqVersion!
2357 */
2358 /** @todo Somehow validate the client and negotiate a secure cookie... */
2359 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
2360 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
2361 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
2362 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
2363 pReq->u.Out.pSession = pSession;
2364 pReq->u.Out.cFunctions = 0;
2365 pReq->Hdr.rc = VINF_SUCCESS;
2366 return 0;
2367 }
2368
2369 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
2370 {
2371 /* validate */
2372 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
2373 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
2374
2375 /* execute */
2376 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.Caps);
2377 if (RT_FAILURE(pReq->Hdr.rc))
2378 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
2379 return 0;
2380 }
2381
2382 default:
2383 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
2384 break;
2385 }
2386 return VERR_GENERAL_FAILURE;
2387}
2388
2389
2390/**
2391 * I/O Control worker.
2392 *
2393 * @returns IPRT status code.
2394 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2395 *
2396 * @param uIOCtl Function number.
2397 * @param pDevExt Device extention.
2398 * @param pSession Session data.
2399 * @param pReqHdr The request header.
2400 */
2401int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr, size_t cbReq)
2402{
2403 int rc;
2404 VBOXDRV_IOCTL_ENTRY(pSession, uIOCtl, pReqHdr);
2405
2406 /*
2407 * Validate the request.
2408 */
2409 if (RT_UNLIKELY(cbReq < sizeof(*pReqHdr)))
2410 {
2411 OSDBGPRINT(("vboxdrv: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
2412 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2413 return VERR_INVALID_PARAMETER;
2414 }
2415 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
2416 || pReqHdr->cbIn < sizeof(*pReqHdr)
2417 || pReqHdr->cbIn > cbReq
2418 || pReqHdr->cbOut < sizeof(*pReqHdr)
2419 || pReqHdr->cbOut > cbReq))
2420 {
2421 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
2422 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
2423 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2424 return VERR_INVALID_PARAMETER;
2425 }
2426 if (RT_UNLIKELY(!RT_VALID_PTR(pSession)))
2427 {
2428 OSDBGPRINT(("vboxdrv: Invalid pSession value %p (ioctl=%p)\n", pSession, (void *)uIOCtl));
2429 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2430 return VERR_INVALID_PARAMETER;
2431 }
2432 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
2433 {
2434 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
2435 {
2436 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
2437 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2438 return VERR_INVALID_PARAMETER;
2439 }
2440 }
2441 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
2442 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
2443 {
2444 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
2445 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, VERR_INVALID_PARAMETER, VINF_SUCCESS);
2446 return VERR_INVALID_PARAMETER;
2447 }
2448
2449 /*
2450 * Hand it to an inner function to avoid lots of unnecessary return tracepoints.
2451 */
2452 if (pSession->fUnrestricted)
2453 rc = supdrvIOCtlInnerUnrestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2454 else
2455 rc = supdrvIOCtlInnerRestricted(uIOCtl, pDevExt, pSession, pReqHdr);
2456
2457 VBOXDRV_IOCTL_RETURN(pSession, uIOCtl, pReqHdr, pReqHdr->rc, rc);
2458 return rc;
2459}
2460
2461
2462/**
2463 * Inter-Driver Communication (IDC) worker.
2464 *
2465 * @returns VBox status code.
2466 * @retval VINF_SUCCESS on success.
2467 * @retval VERR_INVALID_PARAMETER if the request is invalid.
2468 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
2469 *
2470 * @param uReq The request (function) code.
2471 * @param pDevExt Device extention.
2472 * @param pSession Session data.
2473 * @param pReqHdr The request header.
2474 */
2475int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
2476{
2477 /*
2478 * The OS specific code has already validated the pSession
2479 * pointer, and the request size being greater or equal to
2480 * size of the header.
2481 *
2482 * So, just check that pSession is a kernel context session.
2483 */
2484 if (RT_UNLIKELY( pSession
2485 && pSession->R0Process != NIL_RTR0PROCESS))
2486 return VERR_INVALID_PARAMETER;
2487
2488/*
2489 * Validation macro.
2490 */
2491#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
2492 do { \
2493 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
2494 { \
2495 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
2496 (long)pReqHdr->cb, (long)(cbExpect))); \
2497 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
2498 } \
2499 } while (0)
2500
2501 switch (uReq)
2502 {
2503 case SUPDRV_IDC_REQ_CONNECT:
2504 {
2505 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
2506 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
2507
2508 /*
2509 * Validate the cookie and other input.
2510 */
2511 if (pReq->Hdr.pSession != NULL)
2512 {
2513 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Hdr.pSession=%p expected NULL!\n", pReq->Hdr.pSession));
2514 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2515 }
2516 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
2517 {
2518 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2519 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
2520 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2521 }
2522 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
2523 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
2524 {
2525 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
2526 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2527 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2528 }
2529 if (pSession != NULL)
2530 {
2531 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pSession));
2532 return pReqHdr->rc = VERR_INVALID_PARAMETER;
2533 }
2534
2535 /*
2536 * Match the version.
2537 * The current logic is very simple, match the major interface version.
2538 */
2539 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
2540 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
2541 {
2542 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
2543 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
2544 pReq->u.Out.pSession = NULL;
2545 pReq->u.Out.uSessionVersion = 0xffffffff;
2546 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2547 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2548 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
2549 return VINF_SUCCESS;
2550 }
2551
2552 pReq->u.Out.pSession = NULL;
2553 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
2554 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
2555 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2556
2557 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, true /*fUnrestricted*/, &pSession);
2558 if (RT_FAILURE(pReq->Hdr.rc))
2559 {
2560 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
2561 return VINF_SUCCESS;
2562 }
2563
2564 pReq->u.Out.pSession = pSession;
2565 pReq->Hdr.pSession = pSession;
2566
2567 return VINF_SUCCESS;
2568 }
2569
2570 case SUPDRV_IDC_REQ_DISCONNECT:
2571 {
2572 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
2573
2574 supdrvSessionRelease(pSession);
2575 return pReqHdr->rc = VINF_SUCCESS;
2576 }
2577
2578 case SUPDRV_IDC_REQ_GET_SYMBOL:
2579 {
2580 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
2581 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
2582
2583 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
2584 return VINF_SUCCESS;
2585 }
2586
2587 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
2588 {
2589 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
2590 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
2591
2592 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
2593 return VINF_SUCCESS;
2594 }
2595
2596 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
2597 {
2598 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
2599 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
2600
2601 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
2602 return VINF_SUCCESS;
2603 }
2604
2605 default:
2606 Log(("Unknown IDC %#lx\n", (long)uReq));
2607 break;
2608 }
2609
2610#undef REQ_CHECK_IDC_SIZE
2611 return VERR_NOT_SUPPORTED;
2612}
2613
2614
2615/**
2616 * Register a object for reference counting.
2617 * The object is registered with one reference in the specified session.
2618 *
2619 * @returns Unique identifier on success (pointer).
2620 * All future reference must use this identifier.
2621 * @returns NULL on failure.
2622 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
2623 * @param pvUser1 The first user argument.
2624 * @param pvUser2 The second user argument.
2625 */
2626SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
2627{
2628 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2629 PSUPDRVOBJ pObj;
2630 PSUPDRVUSAGE pUsage;
2631
2632 /*
2633 * Validate the input.
2634 */
2635 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
2636 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
2637 AssertPtrReturn(pfnDestructor, NULL);
2638
2639 /*
2640 * Allocate and initialize the object.
2641 */
2642 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
2643 if (!pObj)
2644 return NULL;
2645 pObj->u32Magic = SUPDRVOBJ_MAGIC;
2646 pObj->enmType = enmType;
2647 pObj->pNext = NULL;
2648 pObj->cUsage = 1;
2649 pObj->pfnDestructor = pfnDestructor;
2650 pObj->pvUser1 = pvUser1;
2651 pObj->pvUser2 = pvUser2;
2652 pObj->CreatorUid = pSession->Uid;
2653 pObj->CreatorGid = pSession->Gid;
2654 pObj->CreatorProcess= pSession->Process;
2655 supdrvOSObjInitCreator(pObj, pSession);
2656
2657 /*
2658 * Allocate the usage record.
2659 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
2660 */
2661 RTSpinlockAcquire(pDevExt->Spinlock);
2662
2663 pUsage = pDevExt->pUsageFree;
2664 if (pUsage)
2665 pDevExt->pUsageFree = pUsage->pNext;
2666 else
2667 {
2668 RTSpinlockRelease(pDevExt->Spinlock);
2669 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
2670 if (!pUsage)
2671 {
2672 RTMemFree(pObj);
2673 return NULL;
2674 }
2675 RTSpinlockAcquire(pDevExt->Spinlock);
2676 }
2677
2678 /*
2679 * Insert the object and create the session usage record.
2680 */
2681 /* The object. */
2682 pObj->pNext = pDevExt->pObjs;
2683 pDevExt->pObjs = pObj;
2684
2685 /* The session record. */
2686 pUsage->cUsage = 1;
2687 pUsage->pObj = pObj;
2688 pUsage->pNext = pSession->pUsage;
2689 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
2690 pSession->pUsage = pUsage;
2691
2692 RTSpinlockRelease(pDevExt->Spinlock);
2693
2694 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
2695 return pObj;
2696}
2697
2698
2699/**
2700 * Increment the reference counter for the object associating the reference
2701 * with the specified session.
2702 *
2703 * @returns IPRT status code.
2704 * @param pvObj The identifier returned by SUPR0ObjRegister().
2705 * @param pSession The session which is referencing the object.
2706 *
2707 * @remarks The caller should not own any spinlocks and must carefully protect
2708 * itself against potential race with the destructor so freed memory
2709 * isn't accessed here.
2710 */
2711SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
2712{
2713 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
2714}
2715
2716
2717/**
2718 * Increment the reference counter for the object associating the reference
2719 * with the specified session.
2720 *
2721 * @returns IPRT status code.
2722 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
2723 * couldn't be allocated. (If you see this you're not doing the right
2724 * thing and it won't ever work reliably.)
2725 *
2726 * @param pvObj The identifier returned by SUPR0ObjRegister().
2727 * @param pSession The session which is referencing the object.
2728 * @param fNoBlocking Set if it's not OK to block. Never try to make the
2729 * first reference to an object in a session with this
2730 * argument set.
2731 *
2732 * @remarks The caller should not own any spinlocks and must carefully protect
2733 * itself against potential race with the destructor so freed memory
2734 * isn't accessed here.
2735 */
2736SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2737{
2738 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2739 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2740 int rc = VINF_SUCCESS;
2741 PSUPDRVUSAGE pUsagePre;
2742 PSUPDRVUSAGE pUsage;
2743
2744 /*
2745 * Validate the input.
2746 * Be ready for the destruction race (someone might be stuck in the
2747 * destructor waiting a lock we own).
2748 */
2749 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2750 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2751 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2752 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2753 VERR_INVALID_PARAMETER);
2754
2755 RTSpinlockAcquire(pDevExt->Spinlock);
2756
2757 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2758 {
2759 RTSpinlockRelease(pDevExt->Spinlock);
2760
2761 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2762 return VERR_WRONG_ORDER;
2763 }
2764
2765 /*
2766 * Preallocate the usage record if we can.
2767 */
2768 pUsagePre = pDevExt->pUsageFree;
2769 if (pUsagePre)
2770 pDevExt->pUsageFree = pUsagePre->pNext;
2771 else if (!fNoBlocking)
2772 {
2773 RTSpinlockRelease(pDevExt->Spinlock);
2774 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2775 if (!pUsagePre)
2776 return VERR_NO_MEMORY;
2777
2778 RTSpinlockAcquire(pDevExt->Spinlock);
2779 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2780 {
2781 RTSpinlockRelease(pDevExt->Spinlock);
2782
2783 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2784 return VERR_WRONG_ORDER;
2785 }
2786 }
2787
2788 /*
2789 * Reference the object.
2790 */
2791 pObj->cUsage++;
2792
2793 /*
2794 * Look for the session record.
2795 */
2796 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2797 {
2798 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2799 if (pUsage->pObj == pObj)
2800 break;
2801 }
2802 if (pUsage)
2803 pUsage->cUsage++;
2804 else if (pUsagePre)
2805 {
2806 /* create a new session record. */
2807 pUsagePre->cUsage = 1;
2808 pUsagePre->pObj = pObj;
2809 pUsagePre->pNext = pSession->pUsage;
2810 pSession->pUsage = pUsagePre;
2811 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2812
2813 pUsagePre = NULL;
2814 }
2815 else
2816 {
2817 pObj->cUsage--;
2818 rc = VERR_TRY_AGAIN;
2819 }
2820
2821 /*
2822 * Put any unused usage record into the free list..
2823 */
2824 if (pUsagePre)
2825 {
2826 pUsagePre->pNext = pDevExt->pUsageFree;
2827 pDevExt->pUsageFree = pUsagePre;
2828 }
2829
2830 RTSpinlockRelease(pDevExt->Spinlock);
2831
2832 return rc;
2833}
2834
2835
2836/**
2837 * Decrement / destroy a reference counter record for an object.
2838 *
2839 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
2840 *
2841 * @returns IPRT status code.
2842 * @retval VINF_SUCCESS if not destroyed.
2843 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
2844 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
2845 * string builds.
2846 *
2847 * @param pvObj The identifier returned by SUPR0ObjRegister().
2848 * @param pSession The session which is referencing the object.
2849 */
2850SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
2851{
2852 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2853 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2854 int rc = VERR_INVALID_PARAMETER;
2855 PSUPDRVUSAGE pUsage;
2856 PSUPDRVUSAGE pUsagePrev;
2857
2858 /*
2859 * Validate the input.
2860 */
2861 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2862 AssertMsgReturn(VALID_PTR(pObj)&& pObj->u32Magic == SUPDRVOBJ_MAGIC,
2863 ("Invalid pvObj=%p magic=%#x (expected %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2864 VERR_INVALID_PARAMETER);
2865
2866 /*
2867 * Acquire the spinlock and look for the usage record.
2868 */
2869 RTSpinlockAcquire(pDevExt->Spinlock);
2870
2871 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
2872 pUsage;
2873 pUsagePrev = pUsage, pUsage = pUsage->pNext)
2874 {
2875 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2876 if (pUsage->pObj == pObj)
2877 {
2878 rc = VINF_SUCCESS;
2879 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
2880 if (pUsage->cUsage > 1)
2881 {
2882 pObj->cUsage--;
2883 pUsage->cUsage--;
2884 }
2885 else
2886 {
2887 /*
2888 * Free the session record.
2889 */
2890 if (pUsagePrev)
2891 pUsagePrev->pNext = pUsage->pNext;
2892 else
2893 pSession->pUsage = pUsage->pNext;
2894 pUsage->pNext = pDevExt->pUsageFree;
2895 pDevExt->pUsageFree = pUsage;
2896
2897 /* What about the object? */
2898 if (pObj->cUsage > 1)
2899 pObj->cUsage--;
2900 else
2901 {
2902 /*
2903 * Object is to be destroyed, unlink it.
2904 */
2905 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
2906 rc = VINF_OBJECT_DESTROYED;
2907 if (pDevExt->pObjs == pObj)
2908 pDevExt->pObjs = pObj->pNext;
2909 else
2910 {
2911 PSUPDRVOBJ pObjPrev;
2912 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
2913 if (pObjPrev->pNext == pObj)
2914 {
2915 pObjPrev->pNext = pObj->pNext;
2916 break;
2917 }
2918 Assert(pObjPrev);
2919 }
2920 }
2921 }
2922 break;
2923 }
2924 }
2925
2926 RTSpinlockRelease(pDevExt->Spinlock);
2927
2928 /*
2929 * Call the destructor and free the object if required.
2930 */
2931 if (rc == VINF_OBJECT_DESTROYED)
2932 {
2933 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2934 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2935 if (pObj->pfnDestructor)
2936 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2937 RTMemFree(pObj);
2938 }
2939
2940 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2941 return rc;
2942}
2943
2944
2945/**
2946 * Verifies that the current process can access the specified object.
2947 *
2948 * @returns The following IPRT status code:
2949 * @retval VINF_SUCCESS if access was granted.
2950 * @retval VERR_PERMISSION_DENIED if denied access.
2951 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2952 *
2953 * @param pvObj The identifier returned by SUPR0ObjRegister().
2954 * @param pSession The session which wishes to access the object.
2955 * @param pszObjName Object string name. This is optional and depends on the object type.
2956 *
2957 * @remark The caller is responsible for making sure the object isn't removed while
2958 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2959 */
2960SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2961{
2962 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2963 int rc;
2964
2965 /*
2966 * Validate the input.
2967 */
2968 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2969 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2970 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2971 VERR_INVALID_PARAMETER);
2972
2973 /*
2974 * Check access. (returns true if a decision has been made.)
2975 */
2976 rc = VERR_INTERNAL_ERROR;
2977 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2978 return rc;
2979
2980 /*
2981 * Default policy is to allow the user to access his own
2982 * stuff but nothing else.
2983 */
2984 if (pObj->CreatorUid == pSession->Uid)
2985 return VINF_SUCCESS;
2986 return VERR_PERMISSION_DENIED;
2987}
2988
2989
2990/**
2991 * Lock pages.
2992 *
2993 * @returns IPRT status code.
2994 * @param pSession Session to which the locked memory should be associated.
2995 * @param pvR3 Start of the memory range to lock.
2996 * This must be page aligned.
2997 * @param cPages Number of pages to lock.
2998 * @param paPages Where to put the physical addresses of locked memory.
2999 */
3000SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
3001{
3002 int rc;
3003 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3004 const size_t cb = (size_t)cPages << PAGE_SHIFT;
3005 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
3006
3007 /*
3008 * Verify input.
3009 */
3010 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3011 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
3012 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
3013 || !pvR3)
3014 {
3015 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
3016 return VERR_INVALID_PARAMETER;
3017 }
3018
3019 /*
3020 * Let IPRT do the job.
3021 */
3022 Mem.eType = MEMREF_TYPE_LOCKED;
3023 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
3024 if (RT_SUCCESS(rc))
3025 {
3026 uint32_t iPage = cPages;
3027 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
3028 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
3029
3030 while (iPage-- > 0)
3031 {
3032 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3033 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
3034 {
3035 AssertMsgFailed(("iPage=%d\n", iPage));
3036 rc = VERR_INTERNAL_ERROR;
3037 break;
3038 }
3039 }
3040 if (RT_SUCCESS(rc))
3041 rc = supdrvMemAdd(&Mem, pSession);
3042 if (RT_FAILURE(rc))
3043 {
3044 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
3045 AssertRC(rc2);
3046 }
3047 }
3048
3049 return rc;
3050}
3051
3052
3053/**
3054 * Unlocks the memory pointed to by pv.
3055 *
3056 * @returns IPRT status code.
3057 * @param pSession Session to which the memory was locked.
3058 * @param pvR3 Memory to unlock.
3059 */
3060SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3061{
3062 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3063 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3064 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
3065}
3066
3067
3068/**
3069 * Allocates a chunk of page aligned memory with contiguous and fixed physical
3070 * backing.
3071 *
3072 * @returns IPRT status code.
3073 * @param pSession Session data.
3074 * @param cPages Number of pages to allocate.
3075 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
3076 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
3077 * @param pHCPhys Where to put the physical address of allocated memory.
3078 */
3079SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
3080{
3081 int rc;
3082 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3083 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
3084
3085 /*
3086 * Validate input.
3087 */
3088 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3089 if (!ppvR3 || !ppvR0 || !pHCPhys)
3090 {
3091 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
3092 pSession, ppvR0, ppvR3, pHCPhys));
3093 return VERR_INVALID_PARAMETER;
3094
3095 }
3096 if (cPages < 1 || cPages >= 256)
3097 {
3098 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3099 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3100 }
3101
3102 /*
3103 * Let IPRT do the job.
3104 */
3105 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
3106 if (RT_SUCCESS(rc))
3107 {
3108 int rc2;
3109 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3110 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
3111 if (RT_SUCCESS(rc))
3112 {
3113 Mem.eType = MEMREF_TYPE_CONT;
3114 rc = supdrvMemAdd(&Mem, pSession);
3115 if (!rc)
3116 {
3117 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3118 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3119 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
3120 return 0;
3121 }
3122
3123 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3124 AssertRC(rc2);
3125 }
3126 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3127 AssertRC(rc2);
3128 }
3129
3130 return rc;
3131}
3132
3133
3134/**
3135 * Frees memory allocated using SUPR0ContAlloc().
3136 *
3137 * @returns IPRT status code.
3138 * @param pSession The session to which the memory was allocated.
3139 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3140 */
3141SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3142{
3143 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3144 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3145 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
3146}
3147
3148
3149/**
3150 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
3151 *
3152 * The memory isn't zeroed.
3153 *
3154 * @returns IPRT status code.
3155 * @param pSession Session data.
3156 * @param cPages Number of pages to allocate.
3157 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
3158 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
3159 * @param paPages Where to put the physical addresses of allocated memory.
3160 */
3161SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
3162{
3163 unsigned iPage;
3164 int rc;
3165 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3166 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
3167
3168 /*
3169 * Validate input.
3170 */
3171 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3172 if (!ppvR3 || !ppvR0 || !paPages)
3173 {
3174 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
3175 pSession, ppvR3, ppvR0, paPages));
3176 return VERR_INVALID_PARAMETER;
3177
3178 }
3179 if (cPages < 1 || cPages >= 256)
3180 {
3181 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
3182 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3183 }
3184
3185 /*
3186 * Let IPRT do the work.
3187 */
3188 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
3189 if (RT_SUCCESS(rc))
3190 {
3191 int rc2;
3192 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3193 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
3194 if (RT_SUCCESS(rc))
3195 {
3196 Mem.eType = MEMREF_TYPE_LOW;
3197 rc = supdrvMemAdd(&Mem, pSession);
3198 if (!rc)
3199 {
3200 for (iPage = 0; iPage < cPages; iPage++)
3201 {
3202 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
3203 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
3204 }
3205 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3206 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3207 return 0;
3208 }
3209
3210 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3211 AssertRC(rc2);
3212 }
3213
3214 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3215 AssertRC(rc2);
3216 }
3217
3218 return rc;
3219}
3220
3221
3222/**
3223 * Frees memory allocated using SUPR0LowAlloc().
3224 *
3225 * @returns IPRT status code.
3226 * @param pSession The session to which the memory was allocated.
3227 * @param uPtr Pointer to the memory (ring-3 or ring-0).
3228 */
3229SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3230{
3231 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3232 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3233 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
3234}
3235
3236
3237
3238/**
3239 * Allocates a chunk of memory with both R0 and R3 mappings.
3240 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
3241 *
3242 * @returns IPRT status code.
3243 * @param pSession The session to associated the allocation with.
3244 * @param cb Number of bytes to allocate.
3245 * @param ppvR0 Where to store the address of the Ring-0 mapping.
3246 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3247 */
3248SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
3249{
3250 int rc;
3251 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3252 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
3253
3254 /*
3255 * Validate input.
3256 */
3257 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3258 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
3259 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3260 if (cb < 1 || cb >= _4M)
3261 {
3262 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
3263 return VERR_INVALID_PARAMETER;
3264 }
3265
3266 /*
3267 * Let IPRT do the work.
3268 */
3269 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
3270 if (RT_SUCCESS(rc))
3271 {
3272 int rc2;
3273 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3274 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
3275 if (RT_SUCCESS(rc))
3276 {
3277 Mem.eType = MEMREF_TYPE_MEM;
3278 rc = supdrvMemAdd(&Mem, pSession);
3279 if (!rc)
3280 {
3281 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3282 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3283 return VINF_SUCCESS;
3284 }
3285
3286 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3287 AssertRC(rc2);
3288 }
3289
3290 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3291 AssertRC(rc2);
3292 }
3293
3294 return rc;
3295}
3296
3297
3298/**
3299 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
3300 *
3301 * @returns IPRT status code.
3302 * @param pSession The session to which the memory was allocated.
3303 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3304 * @param paPages Where to store the physical addresses.
3305 */
3306SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
3307{
3308 PSUPDRVBUNDLE pBundle;
3309 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
3310
3311 /*
3312 * Validate input.
3313 */
3314 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3315 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
3316 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
3317
3318 /*
3319 * Search for the address.
3320 */
3321 RTSpinlockAcquire(pSession->Spinlock);
3322 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3323 {
3324 if (pBundle->cUsed > 0)
3325 {
3326 unsigned i;
3327 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3328 {
3329 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
3330 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3331 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3332 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3333 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
3334 )
3335 )
3336 {
3337 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
3338 size_t iPage;
3339 for (iPage = 0; iPage < cPages; iPage++)
3340 {
3341 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
3342 paPages[iPage].uReserved = 0;
3343 }
3344 RTSpinlockRelease(pSession->Spinlock);
3345 return VINF_SUCCESS;
3346 }
3347 }
3348 }
3349 }
3350 RTSpinlockRelease(pSession->Spinlock);
3351 Log(("Failed to find %p!!!\n", (void *)uPtr));
3352 return VERR_INVALID_PARAMETER;
3353}
3354
3355
3356/**
3357 * Free memory allocated by SUPR0MemAlloc().
3358 *
3359 * @returns IPRT status code.
3360 * @param pSession The session owning the allocation.
3361 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
3362 */
3363SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
3364{
3365 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
3366 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3367 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
3368}
3369
3370
3371/**
3372 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
3373 *
3374 * The memory is fixed and it's possible to query the physical addresses using
3375 * SUPR0MemGetPhys().
3376 *
3377 * @returns IPRT status code.
3378 * @param pSession The session to associated the allocation with.
3379 * @param cPages The number of pages to allocate.
3380 * @param fFlags Flags, reserved for the future. Must be zero.
3381 * @param ppvR3 Where to store the address of the Ring-3 mapping.
3382 * NULL if no ring-3 mapping.
3383 * @param ppvR3 Where to store the address of the Ring-0 mapping.
3384 * NULL if no ring-0 mapping.
3385 * @param paPages Where to store the addresses of the pages. Optional.
3386 */
3387SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
3388{
3389 int rc;
3390 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
3391 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
3392
3393 /*
3394 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3395 */
3396 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3397 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
3398 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3399 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
3400 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3401 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
3402 {
3403 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than %uMB (VBOX_MAX_ALLOC_PAGE_COUNT pages).\n", cPages, VBOX_MAX_ALLOC_PAGE_COUNT * (_1M / _4K)));
3404 return VERR_PAGE_COUNT_OUT_OF_RANGE;
3405 }
3406
3407 /*
3408 * Let IPRT do the work.
3409 */
3410 if (ppvR0)
3411 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
3412 else
3413 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
3414 if (RT_SUCCESS(rc))
3415 {
3416 int rc2;
3417 if (ppvR3)
3418 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
3419 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
3420 else
3421 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
3422 if (RT_SUCCESS(rc))
3423 {
3424 Mem.eType = MEMREF_TYPE_PAGE;
3425 rc = supdrvMemAdd(&Mem, pSession);
3426 if (!rc)
3427 {
3428 if (ppvR3)
3429 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
3430 if (ppvR0)
3431 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
3432 if (paPages)
3433 {
3434 uint32_t iPage = cPages;
3435 while (iPage-- > 0)
3436 {
3437 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
3438 Assert(paPages[iPage] != NIL_RTHCPHYS);
3439 }
3440 }
3441 return VINF_SUCCESS;
3442 }
3443
3444 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
3445 AssertRC(rc2);
3446 }
3447
3448 rc2 = RTR0MemObjFree(Mem.MemObj, false);
3449 AssertRC(rc2);
3450 }
3451 return rc;
3452}
3453
3454
3455/**
3456 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
3457 * space.
3458 *
3459 * @returns IPRT status code.
3460 * @param pSession The session to associated the allocation with.
3461 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3462 * @param offSub Where to start mapping. Must be page aligned.
3463 * @param cbSub How much to map. Must be page aligned.
3464 * @param fFlags Flags, MBZ.
3465 * @param ppvR0 Where to return the address of the ring-0 mapping on
3466 * success.
3467 */
3468SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
3469 uint32_t fFlags, PRTR0PTR ppvR0)
3470{
3471 int rc;
3472 PSUPDRVBUNDLE pBundle;
3473 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
3474 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
3475
3476 /*
3477 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3478 */
3479 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3480 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
3481 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3482 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3483 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3484 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3485
3486 /*
3487 * Find the memory object.
3488 */
3489 RTSpinlockAcquire(pSession->Spinlock);
3490 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3491 {
3492 if (pBundle->cUsed > 0)
3493 {
3494 unsigned i;
3495 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3496 {
3497 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3498 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3499 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3500 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
3501 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
3502 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3503 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
3504 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
3505 {
3506 hMemObj = pBundle->aMem[i].MemObj;
3507 break;
3508 }
3509 }
3510 }
3511 }
3512 RTSpinlockRelease(pSession->Spinlock);
3513
3514 rc = VERR_INVALID_PARAMETER;
3515 if (hMemObj != NIL_RTR0MEMOBJ)
3516 {
3517 /*
3518 * Do some further input validations before calling IPRT.
3519 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
3520 */
3521 size_t cbMemObj = RTR0MemObjSize(hMemObj);
3522 if ( offSub < cbMemObj
3523 && cbSub <= cbMemObj
3524 && offSub + cbSub <= cbMemObj)
3525 {
3526 RTR0MEMOBJ hMapObj;
3527 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
3528 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
3529 if (RT_SUCCESS(rc))
3530 *ppvR0 = RTR0MemObjAddress(hMapObj);
3531 }
3532 else
3533 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3534
3535 }
3536 return rc;
3537}
3538
3539
3540/**
3541 * Changes the page level protection of one or more pages previously allocated
3542 * by SUPR0PageAllocEx.
3543 *
3544 * @returns IPRT status code.
3545 * @param pSession The session to associated the allocation with.
3546 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
3547 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
3548 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
3549 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
3550 * @param offSub Where to start changing. Must be page aligned.
3551 * @param cbSub How much to change. Must be page aligned.
3552 * @param fProt The new page level protection, see RTMEM_PROT_*.
3553 */
3554SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
3555{
3556 int rc;
3557 PSUPDRVBUNDLE pBundle;
3558 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
3559 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
3560 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
3561
3562 /*
3563 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
3564 */
3565 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3566 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
3567 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3568 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3569 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
3570
3571 /*
3572 * Find the memory object.
3573 */
3574 RTSpinlockAcquire(pSession->Spinlock);
3575 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3576 {
3577 if (pBundle->cUsed > 0)
3578 {
3579 unsigned i;
3580 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3581 {
3582 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
3583 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3584 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3585 || pvR3 == NIL_RTR3PTR)
3586 && ( pvR0 == NIL_RTR0PTR
3587 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
3588 && ( pvR3 == NIL_RTR3PTR
3589 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
3590 {
3591 if (pvR0 != NIL_RTR0PTR)
3592 hMemObjR0 = pBundle->aMem[i].MemObj;
3593 if (pvR3 != NIL_RTR3PTR)
3594 hMemObjR3 = pBundle->aMem[i].MapObjR3;
3595 break;
3596 }
3597 }
3598 }
3599 }
3600 RTSpinlockRelease(pSession->Spinlock);
3601
3602 rc = VERR_INVALID_PARAMETER;
3603 if ( hMemObjR0 != NIL_RTR0MEMOBJ
3604 || hMemObjR3 != NIL_RTR0MEMOBJ)
3605 {
3606 /*
3607 * Do some further input validations before calling IPRT.
3608 */
3609 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
3610 if ( offSub < cbMemObj
3611 && cbSub <= cbMemObj
3612 && offSub + cbSub <= cbMemObj)
3613 {
3614 rc = VINF_SUCCESS;
3615 if (hMemObjR3 != NIL_RTR0PTR)
3616 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
3617 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
3618 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
3619 }
3620 else
3621 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
3622
3623 }
3624 return rc;
3625
3626}
3627
3628
3629/**
3630 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
3631 *
3632 * @returns IPRT status code.
3633 * @param pSession The session owning the allocation.
3634 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
3635 * SUPR0PageAllocEx().
3636 */
3637SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
3638{
3639 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
3640 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3641 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
3642}
3643
3644
3645/**
3646 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3647 *
3648 * @param pSession The session of the caller.
3649 * @param pszFile The source file where the caller detected the bad
3650 * context.
3651 * @param uLine The line number in @a pszFile.
3652 * @param pszExtra Optional additional message to give further hints.
3653 */
3654void VBOXCALL supdrvBadContext(PSUPDRVDEVEXT pDevExt, const char *pszFile, uint32_t uLine, const char *pszExtra)
3655{
3656 uint32_t cCalls;
3657
3658 /*
3659 * Shorten the filename before displaying the message.
3660 */
3661 for (;;)
3662 {
3663 const char *pszTmp = strchr(pszFile, '/');
3664 if (!pszTmp)
3665 pszTmp = strchr(pszFile, '\\');
3666 if (!pszTmp)
3667 break;
3668 pszFile = pszTmp + 1;
3669 }
3670 if (RT_VALID_PTR(pszExtra) && *pszExtra)
3671 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s: %s\n", uLine, pszFile, pszExtra);
3672 else
3673 SUPR0Printf("vboxdrv: Bad CPU context error at line %u in %s!\n", uLine, pszFile);
3674
3675 /*
3676 * Record the incident so that we stand a chance of blocking I/O controls
3677 * before panicing the system.
3678 */
3679 cCalls = ASMAtomicIncU32(&pDevExt->cBadContextCalls);
3680 if (cCalls > UINT32_MAX - _1K)
3681 ASMAtomicWriteU32(&pDevExt->cBadContextCalls, UINT32_MAX - _1K);
3682}
3683
3684
3685/**
3686 * Reports a bad context, currenctly that means EFLAGS.AC is 0 instead of 1.
3687 *
3688 * @param pSession The session of the caller.
3689 * @param pszFile The source file where the caller detected the bad
3690 * context.
3691 * @param uLine The line number in @a pszFile.
3692 * @param pszExtra Optional additional message to give further hints.
3693 */
3694SUPR0DECL(void) SUPR0BadContext(PSUPDRVSESSION pSession, const char *pszFile, uint32_t uLine, const char *pszExtra)
3695{
3696 PSUPDRVDEVEXT pDevExt;
3697
3698 AssertReturnVoid(SUP_IS_SESSION_VALID(pSession));
3699 pDevExt = pSession->pDevExt;
3700
3701 supdrvBadContext(pDevExt, pszFile, uLine, pszExtra);
3702}
3703
3704
3705/**
3706 * Gets the paging mode of the current CPU.
3707 *
3708 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3709 */
3710SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3711{
3712 SUPPAGINGMODE enmMode;
3713
3714 RTR0UINTREG cr0 = ASMGetCR0();
3715 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3716 enmMode = SUPPAGINGMODE_INVALID;
3717 else
3718 {
3719 RTR0UINTREG cr4 = ASMGetCR4();
3720 uint32_t fNXEPlusLMA = 0;
3721 if (cr4 & X86_CR4_PAE)
3722 {
3723 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
3724 if (fExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
3725 {
3726 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3727 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3728 fNXEPlusLMA |= RT_BIT(0);
3729 if ((fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3730 fNXEPlusLMA |= RT_BIT(1);
3731 }
3732 }
3733
3734 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
3735 {
3736 case 0:
3737 enmMode = SUPPAGINGMODE_32_BIT;
3738 break;
3739
3740 case X86_CR4_PGE:
3741 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
3742 break;
3743
3744 case X86_CR4_PAE:
3745 enmMode = SUPPAGINGMODE_PAE;
3746 break;
3747
3748 case X86_CR4_PAE | RT_BIT(0):
3749 enmMode = SUPPAGINGMODE_PAE_NX;
3750 break;
3751
3752 case X86_CR4_PAE | X86_CR4_PGE:
3753 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3754 break;
3755
3756 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
3757 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3758 break;
3759
3760 case RT_BIT(1) | X86_CR4_PAE:
3761 enmMode = SUPPAGINGMODE_AMD64;
3762 break;
3763
3764 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
3765 enmMode = SUPPAGINGMODE_AMD64_NX;
3766 break;
3767
3768 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
3769 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
3770 break;
3771
3772 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
3773 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
3774 break;
3775
3776 default:
3777 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
3778 enmMode = SUPPAGINGMODE_INVALID;
3779 break;
3780 }
3781 }
3782 return enmMode;
3783}
3784
3785
3786/**
3787 * Change CR4 and take care of the kernel CR4 shadow if applicable.
3788 *
3789 * CR4 shadow handling is required for Linux >= 4.0. Calling this function
3790 * instead of ASMSetCR4() is only necessary for semi-permanent CR4 changes
3791 * for code with interrupts enabled.
3792 *
3793 * @returns the old CR4 value.
3794 *
3795 * @param fOrMask bits to be set in CR4.
3796 * @param fAndMask bits to be cleard in CR4.
3797 *
3798 * @remarks Must be called with preemption/interrupts disabled.
3799 */
3800SUPR0DECL(RTCCUINTREG) SUPR0ChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
3801{
3802#ifdef RT_OS_LINUX
3803 return supdrvOSChangeCR4(fOrMask, fAndMask);
3804#else
3805 RTCCUINTREG uOld = ASMGetCR4();
3806 RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
3807 if (uNew != uOld)
3808 ASMSetCR4(uNew);
3809 return uOld;
3810#endif
3811}
3812
3813
3814/**
3815 * Enables or disabled hardware virtualization extensions using native OS APIs.
3816 *
3817 * @returns VBox status code.
3818 * @retval VINF_SUCCESS on success.
3819 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
3820 *
3821 * @param fEnable Whether to enable or disable.
3822 */
3823SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
3824{
3825#ifdef RT_OS_DARWIN
3826 return supdrvOSEnableVTx(fEnable);
3827#else
3828 return VERR_NOT_SUPPORTED;
3829#endif
3830}
3831
3832
3833/**
3834 * Suspends hardware virtualization extensions using the native OS API.
3835 *
3836 * This is called prior to entering raw-mode context.
3837 *
3838 * @returns @c true if suspended, @c false if not.
3839 */
3840SUPR0DECL(bool) SUPR0SuspendVTxOnCpu(void)
3841{
3842#ifdef RT_OS_DARWIN
3843 return supdrvOSSuspendVTxOnCpu();
3844#else
3845 return false;
3846#endif
3847}
3848
3849
3850/**
3851 * Resumes hardware virtualization extensions using the native OS API.
3852 *
3853 * This is called after to entering raw-mode context.
3854 *
3855 * @param fSuspended The return value of SUPR0SuspendVTxOnCpu.
3856 */
3857SUPR0DECL(void) SUPR0ResumeVTxOnCpu(bool fSuspended)
3858{
3859#ifdef RT_OS_DARWIN
3860 supdrvOSResumeVTxOnCpu(fSuspended);
3861#else
3862 Assert(!fSuspended);
3863#endif
3864}
3865
3866
3867/**
3868 * Checks if Intel VT-x feature is usable on this CPU.
3869 *
3870 * @returns VBox status code.
3871 * @param fIsSmxModeAmbiguous Where to write whether the SMX mode causes
3872 * ambiguity that makes us unsure whether we
3873 * really can use VT-x or not.
3874 *
3875 * @remarks Must be called with preemption disabled.
3876 * The caller is also expected to check that the CPU is an Intel (or
3877 * VIA) CPU -and- that it supports VT-x. Otherwise, this function
3878 * might throw a #GP fault as it tries to read/write MSRs that may not
3879 * be present!
3880 */
3881SUPR0DECL(int) SUPR0GetVmxUsability(bool *pfIsSmxModeAmbiguous)
3882{
3883 uint64_t u64FeatMsr;
3884 bool fMaybeSmxMode;
3885 bool fMsrLocked;
3886 bool fSmxVmxAllowed;
3887 bool fVmxAllowed;
3888 bool fIsSmxModeAmbiguous;
3889 int rc;
3890
3891 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
3892
3893 u64FeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
3894 fMaybeSmxMode = RT_BOOL(ASMGetCR4() & X86_CR4_SMXE);
3895 fMsrLocked = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
3896 fSmxVmxAllowed = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
3897 fVmxAllowed = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
3898 fIsSmxModeAmbiguous = false;
3899 rc = VERR_INTERNAL_ERROR_5;
3900
3901 /* Check if the LOCK bit is set but excludes the required VMXON bit. */
3902 if (fMsrLocked)
3903 {
3904 if (fVmxAllowed && fSmxVmxAllowed)
3905 rc = VINF_SUCCESS;
3906 else if (!fVmxAllowed && !fSmxVmxAllowed)
3907 rc = VERR_VMX_MSR_ALL_VMX_DISABLED;
3908 else if (!fMaybeSmxMode)
3909 {
3910 if (fVmxAllowed)
3911 rc = VINF_SUCCESS;
3912 else
3913 rc = VERR_VMX_MSR_VMX_DISABLED;
3914 }
3915 else
3916 {
3917 /*
3918 * CR4.SMXE is set but this doesn't mean the CPU is necessarily in SMX mode. We shall assume
3919 * that it is -not- and that it is a stupid BIOS/OS setting CR4.SMXE for no good reason.
3920 * See @bugref{6873}.
3921 */
3922 Assert(fMaybeSmxMode == true);
3923 fIsSmxModeAmbiguous = true;
3924 rc = VINF_SUCCESS;
3925 }
3926 }
3927 else
3928 {
3929 /*
3930 * MSR is not yet locked; we can change it ourselves here. Once the lock bit is set,
3931 * this MSR can no longer be modified.
3932 *
3933 * Set both the VMX and SMX_VMX bits (if supported) as we can't determine SMX mode
3934 * accurately. See @bugref{6873}.
3935 *
3936 * We need to check for SMX hardware support here, before writing the MSR as
3937 * otherwise we will #GP fault on CPUs that do not support it. Callers do not check
3938 * for it.
3939 */
3940 uint32_t fFeaturesECX, uDummy;
3941#ifdef VBOX_STRICT
3942 /* Callers should have verified these at some point. */
3943 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
3944 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
3945 Assert(ASMIsValidStdRange(uMaxId));
3946 Assert( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
3947 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX));
3948#endif
3949 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &uDummy);
3950 bool fSmxVmxHwSupport = false;
3951 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
3952 && (fFeaturesECX & X86_CPUID_FEATURE_ECX_SMX))
3953 fSmxVmxHwSupport = true;
3954
3955 u64FeatMsr |= MSR_IA32_FEATURE_CONTROL_LOCK
3956 | MSR_IA32_FEATURE_CONTROL_VMXON;
3957 if (fSmxVmxHwSupport)
3958 u64FeatMsr |= MSR_IA32_FEATURE_CONTROL_SMX_VMXON;
3959
3960 /*
3961 * Commit.
3962 */
3963 ASMWrMsr(MSR_IA32_FEATURE_CONTROL, u64FeatMsr);
3964
3965 /*
3966 * Verify.
3967 */
3968 u64FeatMsr = ASMRdMsr(MSR_IA32_FEATURE_CONTROL);
3969 fMsrLocked = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_LOCK);
3970 if (fMsrLocked)
3971 {
3972 fSmxVmxAllowed = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_SMX_VMXON);
3973 fVmxAllowed = RT_BOOL(u64FeatMsr & MSR_IA32_FEATURE_CONTROL_VMXON);
3974 if ( fVmxAllowed
3975 && ( !fSmxVmxHwSupport
3976 || fSmxVmxAllowed))
3977 {
3978 rc = VINF_SUCCESS;
3979 }
3980 else
3981 rc = !fSmxVmxHwSupport ? VERR_VMX_MSR_VMX_ENABLE_FAILED : VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED;
3982 }
3983 else
3984 rc = VERR_VMX_MSR_LOCKING_FAILED;
3985 }
3986
3987 if (pfIsSmxModeAmbiguous)
3988 *pfIsSmxModeAmbiguous = fIsSmxModeAmbiguous;
3989
3990 return rc;
3991}
3992
3993
3994/**
3995 * Checks if AMD-V SVM feature is usable on this CPU.
3996 *
3997 * @returns VBox status code.
3998 * @param fInitSvm If usable, try to initialize SVM on this CPU.
3999 *
4000 * @remarks Must be called with preemption disabled.
4001 */
4002SUPR0DECL(int) SUPR0GetSvmUsability(bool fInitSvm)
4003{
4004 int rc;
4005 uint64_t fVmCr;
4006 uint64_t fEfer;
4007
4008 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
4009 fVmCr = ASMRdMsr(MSR_K8_VM_CR);
4010 if (!(fVmCr & MSR_K8_VM_CR_SVM_DISABLE))
4011 {
4012 rc = VINF_SUCCESS;
4013 if (fInitSvm)
4014 {
4015 /* Turn on SVM in the EFER MSR. */
4016 fEfer = ASMRdMsr(MSR_K6_EFER);
4017 if (fEfer & MSR_K6_EFER_SVME)
4018 rc = VERR_SVM_IN_USE;
4019 else
4020 {
4021 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
4022
4023 /* Paranoia. */
4024 fEfer = ASMRdMsr(MSR_K6_EFER);
4025 if (fEfer & MSR_K6_EFER_SVME)
4026 {
4027 /* Restore previous value. */
4028 ASMWrMsr(MSR_K6_EFER, fEfer & ~MSR_K6_EFER_SVME);
4029 }
4030 else
4031 rc = VERR_SVM_ILLEGAL_EFER_MSR;
4032 }
4033 }
4034 }
4035 else
4036 rc = VERR_SVM_DISABLED;
4037 return rc;
4038}
4039
4040
4041/**
4042 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4043 *
4044 * @returns VBox status code.
4045 * @retval VERR_VMX_NO_VMX
4046 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4047 * @retval VERR_VMX_MSR_VMX_DISABLED
4048 * @retval VERR_VMX_MSR_LOCKING_FAILED
4049 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4050 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4051 * @retval VERR_SVM_NO_SVM
4052 * @retval VERR_SVM_DISABLED
4053 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4054 * (centaur) CPU.
4055 *
4056 * @param pfCaps Where to store the capabilities.
4057 */
4058int VBOXCALL supdrvQueryVTCapsInternal(uint32_t *pfCaps)
4059{
4060 int rc = VERR_UNSUPPORTED_CPU;
4061 bool fIsSmxModeAmbiguous = false;
4062 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
4063
4064 /*
4065 * Input validation.
4066 */
4067 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4068
4069 *pfCaps = 0;
4070 /* We may modify MSRs and re-read them, disable preemption so we make sure we don't migrate CPUs. */
4071 RTThreadPreemptDisable(&PreemptState);
4072 if (ASMHasCpuId())
4073 {
4074 uint32_t fFeaturesECX, fFeaturesEDX, uDummy;
4075 uint32_t uMaxId, uVendorEBX, uVendorECX, uVendorEDX;
4076
4077 ASMCpuId(0, &uMaxId, &uVendorEBX, &uVendorECX, &uVendorEDX);
4078 ASMCpuId(1, &uDummy, &uDummy, &fFeaturesECX, &fFeaturesEDX);
4079
4080 if ( ASMIsValidStdRange(uMaxId)
4081 && ( ASMIsIntelCpuEx( uVendorEBX, uVendorECX, uVendorEDX)
4082 || ASMIsViaCentaurCpuEx(uVendorEBX, uVendorECX, uVendorEDX) )
4083 )
4084 {
4085 if ( (fFeaturesECX & X86_CPUID_FEATURE_ECX_VMX)
4086 && (fFeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
4087 && (fFeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
4088 )
4089 {
4090 rc = SUPR0GetVmxUsability(&fIsSmxModeAmbiguous);
4091 if (rc == VINF_SUCCESS)
4092 {
4093 VMXCAPABILITY vtCaps;
4094
4095 *pfCaps |= SUPVTCAPS_VT_X;
4096
4097 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS);
4098 if (vtCaps.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
4099 {
4100 vtCaps.u = ASMRdMsr(MSR_IA32_VMX_PROCBASED_CTLS2);
4101 if (vtCaps.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
4102 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4103 if (vtCaps.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST)
4104 *pfCaps |= SUPVTCAPS_VTX_UNRESTRICTED_GUEST;
4105 }
4106 }
4107 }
4108 else
4109 rc = VERR_VMX_NO_VMX;
4110 }
4111 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)
4112 && ASMIsValidStdRange(uMaxId))
4113 {
4114 uint32_t fExtFeaturesEcx, uExtMaxId;
4115 ASMCpuId(0x80000000, &uExtMaxId, &uDummy, &uDummy, &uDummy);
4116 ASMCpuId(0x80000001, &uDummy, &uDummy, &fExtFeaturesEcx, &uDummy);
4117
4118 /* Check if SVM is available. */
4119 if ( ASMIsValidExtRange(uExtMaxId)
4120 && uExtMaxId >= 0x8000000a
4121 && (fExtFeaturesEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
4122 && (fFeaturesEDX & X86_CPUID_FEATURE_EDX_MSR)
4123 && (fFeaturesEDX & X86_CPUID_FEATURE_EDX_FXSR)
4124 )
4125 {
4126 rc = SUPR0GetSvmUsability(false /* fInitSvm */);
4127 if (RT_SUCCESS(rc))
4128 {
4129 uint32_t fSvmFeatures;
4130 *pfCaps |= SUPVTCAPS_AMD_V;
4131
4132 /* Query AMD-V features. */
4133 ASMCpuId(0x8000000a, &uDummy, &uDummy, &uDummy, &fSvmFeatures);
4134 if (fSvmFeatures & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
4135 *pfCaps |= SUPVTCAPS_NESTED_PAGING;
4136 }
4137 }
4138 else
4139 rc = VERR_SVM_NO_SVM;
4140 }
4141 }
4142
4143 RTThreadPreemptRestore(&PreemptState);
4144 if (fIsSmxModeAmbiguous)
4145 SUPR0Printf(("WARNING! CR4 hints SMX mode but your CPU is too secretive. Proceeding anyway... We wish you good luck!\n"));
4146 return rc;
4147}
4148
4149/**
4150 * Queries the AMD-V and VT-x capabilities of the calling CPU.
4151 *
4152 * @returns VBox status code.
4153 * @retval VERR_VMX_NO_VMX
4154 * @retval VERR_VMX_MSR_ALL_VMX_DISABLED
4155 * @retval VERR_VMX_MSR_VMX_DISABLED
4156 * @retval VERR_VMX_MSR_LOCKING_FAILED
4157 * @retval VERR_VMX_MSR_VMX_ENABLE_FAILED
4158 * @retval VERR_VMX_MSR_SMX_VMX_ENABLE_FAILED
4159 * @retval VERR_SVM_NO_SVM
4160 * @retval VERR_SVM_DISABLED
4161 * @retval VERR_UNSUPPORTED_CPU if not identifiable as an AMD, Intel or VIA
4162 * (centaur) CPU.
4163 *
4164 * @param pSession The session handle.
4165 * @param pfCaps Where to store the capabilities.
4166 */
4167SUPR0DECL(int) SUPR0QueryVTCaps(PSUPDRVSESSION pSession, uint32_t *pfCaps)
4168{
4169 /*
4170 * Input validation.
4171 */
4172 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4173 AssertPtrReturn(pfCaps, VERR_INVALID_POINTER);
4174
4175 /*
4176 * Call common worker.
4177 */
4178 return supdrvQueryVTCapsInternal(pfCaps);
4179}
4180
4181
4182/**
4183 * Register a component factory with the support driver.
4184 *
4185 * This is currently restricted to kernel sessions only.
4186 *
4187 * @returns VBox status code.
4188 * @retval VINF_SUCCESS on success.
4189 * @retval VERR_NO_MEMORY if we're out of memory.
4190 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
4191 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4192 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4193 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4194 *
4195 * @param pSession The SUPDRV session (must be a ring-0 session).
4196 * @param pFactory Pointer to the component factory registration structure.
4197 *
4198 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
4199 */
4200SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4201{
4202 PSUPDRVFACTORYREG pNewReg;
4203 const char *psz;
4204 int rc;
4205
4206 /*
4207 * Validate parameters.
4208 */
4209 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4210 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4211 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4212 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
4213 psz = RTStrEnd(pFactory->szName, sizeof(pFactory->szName));
4214 AssertReturn(psz, VERR_INVALID_PARAMETER);
4215
4216 /*
4217 * Allocate and initialize a new registration structure.
4218 */
4219 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
4220 if (pNewReg)
4221 {
4222 pNewReg->pNext = NULL;
4223 pNewReg->pFactory = pFactory;
4224 pNewReg->pSession = pSession;
4225 pNewReg->cchName = psz - &pFactory->szName[0];
4226
4227 /*
4228 * Add it to the tail of the list after checking for prior registration.
4229 */
4230 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4231 if (RT_SUCCESS(rc))
4232 {
4233 PSUPDRVFACTORYREG pPrev = NULL;
4234 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4235 while (pCur && pCur->pFactory != pFactory)
4236 {
4237 pPrev = pCur;
4238 pCur = pCur->pNext;
4239 }
4240 if (!pCur)
4241 {
4242 if (pPrev)
4243 pPrev->pNext = pNewReg;
4244 else
4245 pSession->pDevExt->pComponentFactoryHead = pNewReg;
4246 rc = VINF_SUCCESS;
4247 }
4248 else
4249 rc = VERR_ALREADY_EXISTS;
4250
4251 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4252 }
4253
4254 if (RT_FAILURE(rc))
4255 RTMemFree(pNewReg);
4256 }
4257 else
4258 rc = VERR_NO_MEMORY;
4259 return rc;
4260}
4261
4262
4263/**
4264 * Deregister a component factory.
4265 *
4266 * @returns VBox status code.
4267 * @retval VINF_SUCCESS on success.
4268 * @retval VERR_NOT_FOUND if the factory wasn't registered.
4269 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
4270 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4271 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4272 *
4273 * @param pSession The SUPDRV session (must be a ring-0 session).
4274 * @param pFactory Pointer to the component factory registration structure
4275 * previously passed SUPR0ComponentRegisterFactory().
4276 *
4277 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
4278 */
4279SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
4280{
4281 int rc;
4282
4283 /*
4284 * Validate parameters.
4285 */
4286 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4287 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
4288 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
4289
4290 /*
4291 * Take the lock and look for the registration record.
4292 */
4293 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4294 if (RT_SUCCESS(rc))
4295 {
4296 PSUPDRVFACTORYREG pPrev = NULL;
4297 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4298 while (pCur && pCur->pFactory != pFactory)
4299 {
4300 pPrev = pCur;
4301 pCur = pCur->pNext;
4302 }
4303 if (pCur)
4304 {
4305 if (!pPrev)
4306 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
4307 else
4308 pPrev->pNext = pCur->pNext;
4309
4310 pCur->pNext = NULL;
4311 pCur->pFactory = NULL;
4312 pCur->pSession = NULL;
4313 rc = VINF_SUCCESS;
4314 }
4315 else
4316 rc = VERR_NOT_FOUND;
4317
4318 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4319
4320 RTMemFree(pCur);
4321 }
4322 return rc;
4323}
4324
4325
4326/**
4327 * Queries a component factory.
4328 *
4329 * @returns VBox status code.
4330 * @retval VERR_INVALID_PARAMETER on invalid parameter.
4331 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
4332 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
4333 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
4334 *
4335 * @param pSession The SUPDRV session.
4336 * @param pszName The name of the component factory.
4337 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
4338 * @param ppvFactoryIf Where to store the factory interface.
4339 */
4340SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
4341{
4342 const char *pszEnd;
4343 size_t cchName;
4344 int rc;
4345
4346 /*
4347 * Validate parameters.
4348 */
4349 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
4350
4351 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
4352 pszEnd = RTStrEnd(pszName, RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
4353 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4354 cchName = pszEnd - pszName;
4355
4356 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
4357 pszEnd = RTStrEnd(pszInterfaceUuid, RTUUID_STR_LENGTH);
4358 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
4359
4360 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
4361 *ppvFactoryIf = NULL;
4362
4363 /*
4364 * Take the lock and try all factories by this name.
4365 */
4366 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
4367 if (RT_SUCCESS(rc))
4368 {
4369 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
4370 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
4371 while (pCur)
4372 {
4373 if ( pCur->cchName == cchName
4374 && !memcmp(pCur->pFactory->szName, pszName, cchName))
4375 {
4376 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
4377 if (pvFactory)
4378 {
4379 *ppvFactoryIf = pvFactory;
4380 rc = VINF_SUCCESS;
4381 break;
4382 }
4383 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
4384 }
4385
4386 /* next */
4387 pCur = pCur->pNext;
4388 }
4389
4390 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
4391 }
4392 return rc;
4393}
4394
4395
4396/**
4397 * Adds a memory object to the session.
4398 *
4399 * @returns IPRT status code.
4400 * @param pMem Memory tracking structure containing the
4401 * information to track.
4402 * @param pSession The session.
4403 */
4404static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
4405{
4406 PSUPDRVBUNDLE pBundle;
4407
4408 /*
4409 * Find free entry and record the allocation.
4410 */
4411 RTSpinlockAcquire(pSession->Spinlock);
4412 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4413 {
4414 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
4415 {
4416 unsigned i;
4417 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4418 {
4419 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
4420 {
4421 pBundle->cUsed++;
4422 pBundle->aMem[i] = *pMem;
4423 RTSpinlockRelease(pSession->Spinlock);
4424 return VINF_SUCCESS;
4425 }
4426 }
4427 AssertFailed(); /* !!this can't be happening!!! */
4428 }
4429 }
4430 RTSpinlockRelease(pSession->Spinlock);
4431
4432 /*
4433 * Need to allocate a new bundle.
4434 * Insert into the last entry in the bundle.
4435 */
4436 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
4437 if (!pBundle)
4438 return VERR_NO_MEMORY;
4439
4440 /* take last entry. */
4441 pBundle->cUsed++;
4442 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
4443
4444 /* insert into list. */
4445 RTSpinlockAcquire(pSession->Spinlock);
4446 pBundle->pNext = pSession->Bundle.pNext;
4447 pSession->Bundle.pNext = pBundle;
4448 RTSpinlockRelease(pSession->Spinlock);
4449
4450 return VINF_SUCCESS;
4451}
4452
4453
4454/**
4455 * Releases a memory object referenced by pointer and type.
4456 *
4457 * @returns IPRT status code.
4458 * @param pSession Session data.
4459 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
4460 * @param eType Memory type.
4461 */
4462static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
4463{
4464 PSUPDRVBUNDLE pBundle;
4465
4466 /*
4467 * Validate input.
4468 */
4469 if (!uPtr)
4470 {
4471 Log(("Illegal address %p\n", (void *)uPtr));
4472 return VERR_INVALID_PARAMETER;
4473 }
4474
4475 /*
4476 * Search for the address.
4477 */
4478 RTSpinlockAcquire(pSession->Spinlock);
4479 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
4480 {
4481 if (pBundle->cUsed > 0)
4482 {
4483 unsigned i;
4484 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
4485 {
4486 if ( pBundle->aMem[i].eType == eType
4487 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
4488 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
4489 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
4490 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
4491 )
4492 {
4493 /* Make a copy of it and release it outside the spinlock. */
4494 SUPDRVMEMREF Mem = pBundle->aMem[i];
4495 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
4496 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
4497 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
4498 RTSpinlockRelease(pSession->Spinlock);
4499
4500 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
4501 {
4502 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
4503 AssertRC(rc); /** @todo figure out how to handle this. */
4504 }
4505 if (Mem.MemObj != NIL_RTR0MEMOBJ)
4506 {
4507 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
4508 AssertRC(rc); /** @todo figure out how to handle this. */
4509 }
4510 return VINF_SUCCESS;
4511 }
4512 }
4513 }
4514 }
4515 RTSpinlockRelease(pSession->Spinlock);
4516 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
4517 return VERR_INVALID_PARAMETER;
4518}
4519
4520
4521/**
4522 * Opens an image. If it's the first time it's opened the call must upload
4523 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
4524 *
4525 * This is the 1st step of the loading.
4526 *
4527 * @returns IPRT status code.
4528 * @param pDevExt Device globals.
4529 * @param pSession Session data.
4530 * @param pReq The open request.
4531 */
4532static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
4533{
4534 int rc;
4535 PSUPDRVLDRIMAGE pImage;
4536 void *pv;
4537 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
4538 SUPDRV_CHECK_SMAP_SETUP();
4539 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4540 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithTabs=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithTabs));
4541
4542 /*
4543 * Check if we got an instance of the image already.
4544 */
4545 supdrvLdrLock(pDevExt);
4546 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4547 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
4548 {
4549 if ( pImage->szName[cchName] == '\0'
4550 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
4551 {
4552 if (RT_LIKELY(pImage->cUsage < UINT32_MAX / 2U))
4553 {
4554 /** @todo check cbImageBits and cbImageWithTabs here, if they differs that indicates that the images are different. */
4555 pImage->cUsage++;
4556 pReq->u.Out.pvImageBase = pImage->pvImage;
4557 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
4558 pReq->u.Out.fNativeLoader = pImage->fNative;
4559 supdrvLdrAddUsage(pSession, pImage);
4560 supdrvLdrUnlock(pDevExt);
4561 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4562 return VINF_SUCCESS;
4563 }
4564 supdrvLdrUnlock(pDevExt);
4565 Log(("supdrvIOCtl_LdrOpen: To many existing references to '%s'!\n", pReq->u.In.szName));
4566 return VERR_INTERNAL_ERROR_3; /** @todo add VERR_TOO_MANY_REFERENCES */
4567 }
4568 }
4569 /* (not found - add it!) */
4570
4571 /* If the loader interface is locked down, make userland fail early */
4572 if (pDevExt->fLdrLockedDown)
4573 {
4574 supdrvLdrUnlock(pDevExt);
4575 Log(("supdrvIOCtl_LdrOpen: Not adding '%s' to image list, loader interface is locked down!\n", pReq->u.In.szName));
4576 return VERR_PERMISSION_DENIED;
4577 }
4578
4579 /*
4580 * Allocate memory.
4581 */
4582 Assert(cchName < sizeof(pImage->szName));
4583 pv = RTMemAlloc(sizeof(SUPDRVLDRIMAGE));
4584 if (!pv)
4585 {
4586 supdrvLdrUnlock(pDevExt);
4587 Log(("supdrvIOCtl_LdrOpen: RTMemAlloc() failed\n"));
4588 return /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_2;
4589 }
4590 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4591
4592 /*
4593 * Setup and link in the LDR stuff.
4594 */
4595 pImage = (PSUPDRVLDRIMAGE)pv;
4596 pImage->pvImage = NULL;
4597 pImage->pvImageAlloc = NULL;
4598 pImage->cbImageWithTabs = pReq->u.In.cbImageWithTabs;
4599 pImage->cbImageBits = pReq->u.In.cbImageBits;
4600 pImage->cSymbols = 0;
4601 pImage->paSymbols = NULL;
4602 pImage->pachStrTab = NULL;
4603 pImage->cbStrTab = 0;
4604 pImage->pfnModuleInit = NULL;
4605 pImage->pfnModuleTerm = NULL;
4606 pImage->pfnServiceReqHandler = NULL;
4607 pImage->uState = SUP_IOCTL_LDR_OPEN;
4608 pImage->cUsage = 1;
4609 pImage->pDevExt = pDevExt;
4610 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
4611
4612 /*
4613 * Try load it using the native loader, if that isn't supported, fall back
4614 * on the older method.
4615 */
4616 pImage->fNative = true;
4617 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
4618 if (rc == VERR_NOT_SUPPORTED)
4619 {
4620 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
4621 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
4622 pImage->fNative = false;
4623 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_EXEC_MEMORY;
4624 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4625 }
4626 if (RT_FAILURE(rc))
4627 {
4628 supdrvLdrUnlock(pDevExt);
4629 RTMemFree(pImage);
4630 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
4631 return rc;
4632 }
4633 Assert(VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
4634
4635 /*
4636 * Link it.
4637 */
4638 pImage->pNext = pDevExt->pLdrImages;
4639 pDevExt->pLdrImages = pImage;
4640
4641 supdrvLdrAddUsage(pSession, pImage);
4642
4643 pReq->u.Out.pvImageBase = pImage->pvImage;
4644 pReq->u.Out.fNeedsLoading = true;
4645 pReq->u.Out.fNativeLoader = pImage->fNative;
4646 supdrvOSLdrNotifyOpened(pDevExt, pImage);
4647
4648 supdrvLdrUnlock(pDevExt);
4649 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4650 return VINF_SUCCESS;
4651}
4652
4653
4654/**
4655 * Worker that validates a pointer to an image entrypoint.
4656 *
4657 * @returns IPRT status code.
4658 * @param pDevExt The device globals.
4659 * @param pImage The loader image.
4660 * @param pv The pointer into the image.
4661 * @param fMayBeNull Whether it may be NULL.
4662 * @param pszWhat What is this entrypoint? (for logging)
4663 * @param pbImageBits The image bits prepared by ring-3.
4664 *
4665 * @remarks Will leave the lock on failure.
4666 */
4667static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv,
4668 bool fMayBeNull, const uint8_t *pbImageBits, const char *pszWhat)
4669{
4670 if (!fMayBeNull || pv)
4671 {
4672 if ((uintptr_t)pv - (uintptr_t)pImage->pvImage >= pImage->cbImageBits)
4673 {
4674 supdrvLdrUnlock(pDevExt);
4675 Log(("Out of range (%p LB %#x): %s=%p\n", pImage->pvImage, pImage->cbImageBits, pszWhat, pv));
4676 return VERR_INVALID_PARAMETER;
4677 }
4678
4679 if (pImage->fNative)
4680 {
4681 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits);
4682 if (RT_FAILURE(rc))
4683 {
4684 supdrvLdrUnlock(pDevExt);
4685 Log(("Bad entry point address: %s=%p (rc=%Rrc)\n", pszWhat, pv, rc));
4686 return rc;
4687 }
4688 }
4689 }
4690 return VINF_SUCCESS;
4691}
4692
4693
4694/**
4695 * Formats a load error message.
4696 *
4697 * @returns @a rc
4698 * @param rc Return code.
4699 * @param pReq The request.
4700 * @param pszFormat The error message format string.
4701 * @param ... Argument to the format string.
4702 */
4703int VBOXCALL supdrvLdrLoadError(int rc, PSUPLDRLOAD pReq, const char *pszFormat, ...)
4704{
4705 va_list va;
4706 va_start(va, pszFormat);
4707 pReq->u.Out.uErrorMagic = SUPLDRLOAD_ERROR_MAGIC;
4708 RTStrPrintfV(pReq->u.Out.szError, sizeof(pReq->u.Out.szError), pszFormat, va);
4709 va_end(va);
4710 Log(("SUP_IOCTL_LDR_LOAD: %s [rc=%Rrc]\n", pReq->u.Out.szError, rc));
4711 return rc;
4712}
4713
4714
4715/**
4716 * Loads the image bits.
4717 *
4718 * This is the 2nd step of the loading.
4719 *
4720 * @returns IPRT status code.
4721 * @param pDevExt Device globals.
4722 * @param pSession Session data.
4723 * @param pReq The request.
4724 */
4725static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
4726{
4727 PSUPDRVLDRUSAGE pUsage;
4728 PSUPDRVLDRIMAGE pImage;
4729 int rc;
4730 SUPDRV_CHECK_SMAP_SETUP();
4731 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithBits=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithTabs));
4732 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4733
4734 /*
4735 * Find the ldr image.
4736 */
4737 supdrvLdrLock(pDevExt);
4738 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4739
4740 pUsage = pSession->pLdrUsage;
4741 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
4742 pUsage = pUsage->pNext;
4743 if (!pUsage)
4744 {
4745 supdrvLdrUnlock(pDevExt);
4746 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image not found");
4747 }
4748 pImage = pUsage->pImage;
4749
4750 /*
4751 * Validate input.
4752 */
4753 if ( pImage->cbImageWithTabs != pReq->u.In.cbImageWithTabs
4754 || pImage->cbImageBits != pReq->u.In.cbImageBits)
4755 {
4756 supdrvLdrUnlock(pDevExt);
4757 return supdrvLdrLoadError(VERR_INVALID_HANDLE, pReq, "Image size mismatch found: %d(prep) != %d(load) or %d != %d",
4758 pImage->cbImageWithTabs, pReq->u.In.cbImageWithTabs, pImage->cbImageBits, pReq->u.In.cbImageBits);
4759 }
4760
4761 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
4762 {
4763 unsigned uState = pImage->uState;
4764 supdrvLdrUnlock(pDevExt);
4765 if (uState != SUP_IOCTL_LDR_LOAD)
4766 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
4767 pReq->u.Out.uErrorMagic = 0;
4768 return VERR_ALREADY_LOADED;
4769 }
4770
4771 /* If the loader interface is locked down, don't load new images */
4772 if (pDevExt->fLdrLockedDown)
4773 {
4774 supdrvLdrUnlock(pDevExt);
4775 return supdrvLdrLoadError(VERR_PERMISSION_DENIED, pReq, "Loader is locked down");
4776 }
4777
4778 switch (pReq->u.In.eEPType)
4779 {
4780 case SUPLDRLOADEP_NOTHING:
4781 break;
4782
4783 case SUPLDRLOADEP_VMMR0:
4784 rc = supdrvLdrValidatePointer( pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0, false, pReq->u.In.abImage, "pvVMMR0");
4785 if (RT_SUCCESS(rc))
4786 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.abImage, "pvVMMR0EntryFast");
4787 if (RT_SUCCESS(rc))
4788 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.abImage, "pvVMMR0EntryEx");
4789 if (RT_FAILURE(rc))
4790 return supdrvLdrLoadError(rc, pReq, "Invalid VMMR0 pointer");
4791 break;
4792
4793 case SUPLDRLOADEP_SERVICE:
4794 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.abImage, "pfnServiceReq");
4795 if (RT_FAILURE(rc))
4796 return supdrvLdrLoadError(rc, pReq, "Invalid pfnServiceReq pointer: %p", pReq->u.In.EP.Service.pfnServiceReq);
4797 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
4798 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
4799 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
4800 {
4801 supdrvLdrUnlock(pDevExt);
4802 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq,
4803 "Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!",
4804 pImage->pvImage, pReq->u.In.cbImageWithTabs,
4805 pReq->u.In.EP.Service.apvReserved[0],
4806 pReq->u.In.EP.Service.apvReserved[1],
4807 pReq->u.In.EP.Service.apvReserved[2]);
4808 }
4809 break;
4810
4811 default:
4812 supdrvLdrUnlock(pDevExt);
4813 return supdrvLdrLoadError(VERR_INVALID_PARAMETER, pReq, "Invalid eEPType=%d", pReq->u.In.eEPType);
4814 }
4815
4816 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.abImage, "pfnModuleInit");
4817 if (RT_FAILURE(rc))
4818 return supdrvLdrLoadError(rc, pReq, "Invalid pfnModuleInit pointer: %p", pReq->u.In.pfnModuleInit);
4819 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.abImage, "pfnModuleTerm");
4820 if (RT_FAILURE(rc))
4821 return supdrvLdrLoadError(rc, pReq, "Invalid pfnModuleTerm pointer: %p", pReq->u.In.pfnModuleTerm);
4822 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4823
4824 /*
4825 * Allocate and copy the tables.
4826 * (No need to do try/except as this is a buffered request.)
4827 */
4828 pImage->cbStrTab = pReq->u.In.cbStrTab;
4829 if (pImage->cbStrTab)
4830 {
4831 pImage->pachStrTab = (char *)RTMemAlloc(pImage->cbStrTab);
4832 if (pImage->pachStrTab)
4833 memcpy(pImage->pachStrTab, &pReq->u.In.abImage[pReq->u.In.offStrTab], pImage->cbStrTab);
4834 else
4835 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for string table: %#x", pImage->cbStrTab);
4836 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4837 }
4838
4839 pImage->cSymbols = pReq->u.In.cSymbols;
4840 if (RT_SUCCESS(rc) && pImage->cSymbols)
4841 {
4842 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
4843 pImage->paSymbols = (PSUPLDRSYM)RTMemAlloc(cbSymbols);
4844 if (pImage->paSymbols)
4845 memcpy(pImage->paSymbols, &pReq->u.In.abImage[pReq->u.In.offSymbols], cbSymbols);
4846 else
4847 rc = supdrvLdrLoadError(VERR_NO_MEMORY, pReq, "Out of memory for symbol table: %#x", cbSymbols);
4848 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4849 }
4850
4851 /*
4852 * Copy the bits / complete native loading.
4853 */
4854 if (RT_SUCCESS(rc))
4855 {
4856 pImage->uState = SUP_IOCTL_LDR_LOAD;
4857 pImage->pfnModuleInit = (PFNR0MODULEINIT)pReq->u.In.pfnModuleInit;
4858 pImage->pfnModuleTerm = (PFNR0MODULETERM)pReq->u.In.pfnModuleTerm;
4859
4860 if (pImage->fNative)
4861 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.abImage, pReq);
4862 else
4863 {
4864 memcpy(pImage->pvImage, &pReq->u.In.abImage[0], pImage->cbImageBits);
4865 Log(("vboxdrv: Loaded '%s' at %p\n", pImage->szName, pImage->pvImage));
4866 }
4867 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4868 }
4869
4870 /*
4871 * Update any entry points.
4872 */
4873 if (RT_SUCCESS(rc))
4874 {
4875 switch (pReq->u.In.eEPType)
4876 {
4877 default:
4878 case SUPLDRLOADEP_NOTHING:
4879 rc = VINF_SUCCESS;
4880 break;
4881 case SUPLDRLOADEP_VMMR0:
4882 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0,
4883 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
4884 break;
4885 case SUPLDRLOADEP_SERVICE:
4886 pImage->pfnServiceReqHandler = (PFNSUPR0SERVICEREQHANDLER)pReq->u.In.EP.Service.pfnServiceReq;
4887 rc = VINF_SUCCESS;
4888 break;
4889 }
4890 }
4891
4892 /*
4893 * On success call the module initialization.
4894 */
4895 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
4896 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
4897 {
4898 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
4899 pDevExt->pLdrInitImage = pImage;
4900 pDevExt->hLdrInitThread = RTThreadNativeSelf();
4901 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4902 rc = pImage->pfnModuleInit(pImage);
4903 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4904 pDevExt->pLdrInitImage = NULL;
4905 pDevExt->hLdrInitThread = NIL_RTNATIVETHREAD;
4906 if (RT_FAILURE(rc))
4907 {
4908 if (pDevExt->pvVMMR0 == pImage->pvImage)
4909 supdrvLdrUnsetVMMR0EPs(pDevExt);
4910 supdrvLdrLoadError(rc, pReq, "ModuleInit failed: %Rrc", rc);
4911 }
4912 }
4913 if (RT_SUCCESS(rc))
4914 {
4915 SUPR0Printf("vboxdrv: %p %s\n", pImage->pvImage, pImage->szName);
4916 pReq->u.Out.uErrorMagic = 0;
4917 pReq->u.Out.szError[0] = '\0';
4918 }
4919 else
4920 {
4921 /* Inform the tracing component in case ModuleInit registered TPs. */
4922 supdrvTracerModuleUnloading(pDevExt, pImage);
4923
4924 pImage->uState = SUP_IOCTL_LDR_OPEN;
4925 pImage->pfnModuleInit = NULL;
4926 pImage->pfnModuleTerm = NULL;
4927 pImage->pfnServiceReqHandler= NULL;
4928 pImage->cbStrTab = 0;
4929 RTMemFree(pImage->pachStrTab);
4930 pImage->pachStrTab = NULL;
4931 RTMemFree(pImage->paSymbols);
4932 pImage->paSymbols = NULL;
4933 pImage->cSymbols = 0;
4934 }
4935
4936 supdrvLdrUnlock(pDevExt);
4937 SUPDRV_CHECK_SMAP_CHECK(pDevExt, RT_NOTHING);
4938 return rc;
4939}
4940
4941
4942/**
4943 * Frees a previously loaded (prep'ed) image.
4944 *
4945 * @returns IPRT status code.
4946 * @param pDevExt Device globals.
4947 * @param pSession Session data.
4948 * @param pReq The request.
4949 */
4950static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
4951{
4952 int rc;
4953 PSUPDRVLDRUSAGE pUsagePrev;
4954 PSUPDRVLDRUSAGE pUsage;
4955 PSUPDRVLDRIMAGE pImage;
4956 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
4957
4958 /*
4959 * Find the ldr image.
4960 */
4961 supdrvLdrLock(pDevExt);
4962 pUsagePrev = NULL;
4963 pUsage = pSession->pLdrUsage;
4964 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
4965 {
4966 pUsagePrev = pUsage;
4967 pUsage = pUsage->pNext;
4968 }
4969 if (!pUsage)
4970 {
4971 supdrvLdrUnlock(pDevExt);
4972 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
4973 return VERR_INVALID_HANDLE;
4974 }
4975
4976 /*
4977 * Check if we can remove anything.
4978 */
4979 rc = VINF_SUCCESS;
4980 pImage = pUsage->pImage;
4981 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
4982 {
4983 /*
4984 * Check if there are any objects with destructors in the image, if
4985 * so leave it for the session cleanup routine so we get a chance to
4986 * clean things up in the right order and not leave them all dangling.
4987 */
4988 RTSpinlockAcquire(pDevExt->Spinlock);
4989 if (pImage->cUsage <= 1)
4990 {
4991 PSUPDRVOBJ pObj;
4992 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
4993 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
4994 {
4995 rc = VERR_DANGLING_OBJECTS;
4996 break;
4997 }
4998 }
4999 else
5000 {
5001 PSUPDRVUSAGE pGenUsage;
5002 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
5003 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5004 {
5005 rc = VERR_DANGLING_OBJECTS;
5006 break;
5007 }
5008 }
5009 RTSpinlockRelease(pDevExt->Spinlock);
5010 if (rc == VINF_SUCCESS)
5011 {
5012 /* unlink it */
5013 if (pUsagePrev)
5014 pUsagePrev->pNext = pUsage->pNext;
5015 else
5016 pSession->pLdrUsage = pUsage->pNext;
5017
5018 /* free it */
5019 pUsage->pImage = NULL;
5020 pUsage->pNext = NULL;
5021 RTMemFree(pUsage);
5022
5023 /*
5024 * Dereference the image.
5025 */
5026 if (pImage->cUsage <= 1)
5027 supdrvLdrFree(pDevExt, pImage);
5028 else
5029 pImage->cUsage--;
5030 }
5031 else
5032 {
5033 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
5034 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
5035 }
5036 }
5037 else
5038 {
5039 /*
5040 * Dereference both image and usage.
5041 */
5042 pImage->cUsage--;
5043 pUsage->cUsage--;
5044 }
5045
5046 supdrvLdrUnlock(pDevExt);
5047 return rc;
5048}
5049
5050
5051/**
5052 * Lock down the image loader interface.
5053 *
5054 * @returns IPRT status code.
5055 * @param pDevExt Device globals.
5056 */
5057static int supdrvIOCtl_LdrLockDown(PSUPDRVDEVEXT pDevExt)
5058{
5059 LogFlow(("supdrvIOCtl_LdrLockDown:\n"));
5060
5061 supdrvLdrLock(pDevExt);
5062 if (!pDevExt->fLdrLockedDown)
5063 {
5064 pDevExt->fLdrLockedDown = true;
5065 Log(("supdrvIOCtl_LdrLockDown: Image loader interface locked down\n"));
5066 }
5067 supdrvLdrUnlock(pDevExt);
5068
5069 return VINF_SUCCESS;
5070}
5071
5072
5073/**
5074 * Gets the address of a symbol in an open image.
5075 *
5076 * @returns IPRT status code.
5077 * @param pDevExt Device globals.
5078 * @param pSession Session data.
5079 * @param pReq The request buffer.
5080 */
5081static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
5082{
5083 PSUPDRVLDRIMAGE pImage;
5084 PSUPDRVLDRUSAGE pUsage;
5085 uint32_t i;
5086 PSUPLDRSYM paSyms;
5087 const char *pchStrings;
5088 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
5089 void *pvSymbol = NULL;
5090 int rc = VERR_GENERAL_FAILURE;
5091 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
5092
5093 /*
5094 * Find the ldr image.
5095 */
5096 supdrvLdrLock(pDevExt);
5097 pUsage = pSession->pLdrUsage;
5098 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
5099 pUsage = pUsage->pNext;
5100 if (!pUsage)
5101 {
5102 supdrvLdrUnlock(pDevExt);
5103 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
5104 return VERR_INVALID_HANDLE;
5105 }
5106 pImage = pUsage->pImage;
5107 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
5108 {
5109 unsigned uState = pImage->uState;
5110 supdrvLdrUnlock(pDevExt);
5111 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
5112 return VERR_ALREADY_LOADED;
5113 }
5114
5115 /*
5116 * Search the symbol strings.
5117 *
5118 * Note! The int32_t is for native loading on solaris where the data
5119 * and text segments are in very different places.
5120 */
5121 pchStrings = pImage->pachStrTab;
5122 paSyms = pImage->paSymbols;
5123 for (i = 0; i < pImage->cSymbols; i++)
5124 {
5125 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5126 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
5127 {
5128 pvSymbol = (uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol;
5129 rc = VINF_SUCCESS;
5130 break;
5131 }
5132 }
5133 supdrvLdrUnlock(pDevExt);
5134 pReq->u.Out.pvSymbol = pvSymbol;
5135 return rc;
5136}
5137
5138
5139/**
5140 * Gets the address of a symbol in an open image or the support driver.
5141 *
5142 * @returns VINF_SUCCESS on success.
5143 * @returns
5144 * @param pDevExt Device globals.
5145 * @param pSession Session data.
5146 * @param pReq The request buffer.
5147 */
5148static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
5149{
5150 int rc = VINF_SUCCESS;
5151 const char *pszSymbol = pReq->u.In.pszSymbol;
5152 const char *pszModule = pReq->u.In.pszModule;
5153 size_t cbSymbol;
5154 char const *pszEnd;
5155 uint32_t i;
5156
5157 /*
5158 * Input validation.
5159 */
5160 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
5161 pszEnd = RTStrEnd(pszSymbol, 512);
5162 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5163 cbSymbol = pszEnd - pszSymbol + 1;
5164
5165 if (pszModule)
5166 {
5167 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
5168 pszEnd = RTStrEnd(pszModule, 64);
5169 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
5170 }
5171 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
5172
5173
5174 if ( !pszModule
5175 || !strcmp(pszModule, "SupDrv"))
5176 {
5177 /*
5178 * Search the support driver export table.
5179 */
5180 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
5181 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
5182 {
5183 pReq->u.Out.pfnSymbol = (PFNRT)g_aFunctions[i].pfn;
5184 break;
5185 }
5186 }
5187 else
5188 {
5189 /*
5190 * Find the loader image.
5191 */
5192 PSUPDRVLDRIMAGE pImage;
5193
5194 supdrvLdrLock(pDevExt);
5195
5196 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
5197 if (!strcmp(pImage->szName, pszModule))
5198 break;
5199 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
5200 {
5201 /*
5202 * Search the symbol strings.
5203 */
5204 const char *pchStrings = pImage->pachStrTab;
5205 PCSUPLDRSYM paSyms = pImage->paSymbols;
5206 for (i = 0; i < pImage->cSymbols; i++)
5207 {
5208 if ( paSyms[i].offName + cbSymbol <= pImage->cbStrTab
5209 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
5210 {
5211 /*
5212 * Found it! Calc the symbol address and add a reference to the module.
5213 */
5214 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + (int32_t)paSyms[i].offSymbol);
5215 rc = supdrvLdrAddUsage(pSession, pImage);
5216 break;
5217 }
5218 }
5219 }
5220 else
5221 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
5222
5223 supdrvLdrUnlock(pDevExt);
5224 }
5225 return rc;
5226}
5227
5228
5229/**
5230 * Updates the VMMR0 entry point pointers.
5231 *
5232 * @returns IPRT status code.
5233 * @param pDevExt Device globals.
5234 * @param pSession Session data.
5235 * @param pVMMR0 VMMR0 image handle.
5236 * @param pvVMMR0EntryFast VMMR0EntryFast address.
5237 * @param pvVMMR0EntryEx VMMR0EntryEx address.
5238 * @remark Caller must own the loader mutex.
5239 */
5240static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
5241{
5242 int rc = VINF_SUCCESS;
5243 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryFast=%p\n", pvVMMR0, pvVMMR0EntryFast));
5244
5245
5246 /*
5247 * Check if not yet set.
5248 */
5249 if (!pDevExt->pvVMMR0)
5250 {
5251 pDevExt->pvVMMR0 = pvVMMR0;
5252 *(void **)&pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
5253 *(void **)&pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
5254 ASMCompilerBarrier(); /* the above isn't nice, so be careful... */
5255 }
5256 else
5257 {
5258 /*
5259 * Return failure or success depending on whether the values match or not.
5260 */
5261 if ( pDevExt->pvVMMR0 != pvVMMR0
5262 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
5263 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
5264 {
5265 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
5266 rc = VERR_INVALID_PARAMETER;
5267 }
5268 }
5269 return rc;
5270}
5271
5272
5273/**
5274 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
5275 *
5276 * @param pDevExt Device globals.
5277 */
5278static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
5279{
5280 pDevExt->pvVMMR0 = NULL;
5281 pDevExt->pfnVMMR0EntryFast = NULL;
5282 pDevExt->pfnVMMR0EntryEx = NULL;
5283}
5284
5285
5286/**
5287 * Adds a usage reference in the specified session of an image.
5288 *
5289 * Called while owning the loader semaphore.
5290 *
5291 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
5292 * @param pSession Session in question.
5293 * @param pImage Image which the session is using.
5294 */
5295static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
5296{
5297 PSUPDRVLDRUSAGE pUsage;
5298 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
5299
5300 /*
5301 * Referenced it already?
5302 */
5303 pUsage = pSession->pLdrUsage;
5304 while (pUsage)
5305 {
5306 if (pUsage->pImage == pImage)
5307 {
5308 pUsage->cUsage++;
5309 return VINF_SUCCESS;
5310 }
5311 pUsage = pUsage->pNext;
5312 }
5313
5314 /*
5315 * Allocate new usage record.
5316 */
5317 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
5318 AssertReturn(pUsage, /*VERR_NO_MEMORY*/ VERR_INTERNAL_ERROR_5);
5319 pUsage->cUsage = 1;
5320 pUsage->pImage = pImage;
5321 pUsage->pNext = pSession->pLdrUsage;
5322 pSession->pLdrUsage = pUsage;
5323 return VINF_SUCCESS;
5324}
5325
5326
5327/**
5328 * Frees a load image.
5329 *
5330 * @param pDevExt Pointer to device extension.
5331 * @param pImage Pointer to the image we're gonna free.
5332 * This image must exit!
5333 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
5334 */
5335static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
5336{
5337 PSUPDRVLDRIMAGE pImagePrev;
5338 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
5339
5340 /*
5341 * Warn if we're releasing images while the image loader interface is
5342 * locked down -- we won't be able to reload them!
5343 */
5344 if (pDevExt->fLdrLockedDown)
5345 Log(("supdrvLdrFree: Warning: unloading '%s' image, while loader interface is locked down!\n", pImage->szName));
5346
5347 /* find it - arg. should've used doubly linked list. */
5348 Assert(pDevExt->pLdrImages);
5349 pImagePrev = NULL;
5350 if (pDevExt->pLdrImages != pImage)
5351 {
5352 pImagePrev = pDevExt->pLdrImages;
5353 while (pImagePrev->pNext != pImage)
5354 pImagePrev = pImagePrev->pNext;
5355 Assert(pImagePrev->pNext == pImage);
5356 }
5357
5358 /* unlink */
5359 if (pImagePrev)
5360 pImagePrev->pNext = pImage->pNext;
5361 else
5362 pDevExt->pLdrImages = pImage->pNext;
5363
5364 /* check if this is VMMR0.r0 unset its entry point pointers. */
5365 if (pDevExt->pvVMMR0 == pImage->pvImage)
5366 supdrvLdrUnsetVMMR0EPs(pDevExt);
5367
5368 /* check for objects with destructors in this image. (Shouldn't happen.) */
5369 if (pDevExt->pObjs)
5370 {
5371 unsigned cObjs = 0;
5372 PSUPDRVOBJ pObj;
5373 RTSpinlockAcquire(pDevExt->Spinlock);
5374 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
5375 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
5376 {
5377 pObj->pfnDestructor = NULL;
5378 cObjs++;
5379 }
5380 RTSpinlockRelease(pDevExt->Spinlock);
5381 if (cObjs)
5382 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
5383 }
5384
5385 /* call termination function if fully loaded. */
5386 if ( pImage->pfnModuleTerm
5387 && pImage->uState == SUP_IOCTL_LDR_LOAD)
5388 {
5389 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
5390 pImage->pfnModuleTerm(pImage);
5391 }
5392
5393 /* Inform the tracing component. */
5394 supdrvTracerModuleUnloading(pDevExt, pImage);
5395
5396 /* do native unload if appropriate. */
5397 if (pImage->fNative)
5398 supdrvOSLdrUnload(pDevExt, pImage);
5399
5400 /* free the image */
5401 pImage->cUsage = 0;
5402 pImage->pDevExt = NULL;
5403 pImage->pNext = NULL;
5404 pImage->uState = SUP_IOCTL_LDR_FREE;
5405 RTMemExecFree(pImage->pvImageAlloc, pImage->cbImageBits + 31);
5406 pImage->pvImageAlloc = NULL;
5407 RTMemFree(pImage->pachStrTab);
5408 pImage->pachStrTab = NULL;
5409 RTMemFree(pImage->paSymbols);
5410 pImage->paSymbols = NULL;
5411 RTMemFree(pImage);
5412}
5413
5414
5415/**
5416 * Acquires the loader lock.
5417 *
5418 * @returns IPRT status code.
5419 * @param pDevExt The device extension.
5420 */
5421DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
5422{
5423#ifdef SUPDRV_USE_MUTEX_FOR_LDR
5424 int rc = RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
5425#else
5426 int rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
5427#endif
5428 AssertRC(rc);
5429 return rc;
5430}
5431
5432
5433/**
5434 * Releases the loader lock.
5435 *
5436 * @returns IPRT status code.
5437 * @param pDevExt The device extension.
5438 */
5439DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
5440{
5441#ifdef SUPDRV_USE_MUTEX_FOR_LDR
5442 return RTSemMutexRelease(pDevExt->mtxLdr);
5443#else
5444 return RTSemFastMutexRelease(pDevExt->mtxLdr);
5445#endif
5446}
5447
5448
5449/**
5450 * Implements the service call request.
5451 *
5452 * @returns VBox status code.
5453 * @param pDevExt The device extension.
5454 * @param pSession The calling session.
5455 * @param pReq The request packet, valid.
5456 */
5457static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
5458{
5459#if !defined(RT_OS_WINDOWS) || defined(RT_ARCH_AMD64) || defined(DEBUG)
5460 int rc;
5461
5462 /*
5463 * Find the module first in the module referenced by the calling session.
5464 */
5465 rc = supdrvLdrLock(pDevExt);
5466 if (RT_SUCCESS(rc))
5467 {
5468 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
5469 PSUPDRVLDRUSAGE pUsage;
5470
5471 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
5472 if ( pUsage->pImage->pfnServiceReqHandler
5473 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
5474 {
5475 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
5476 break;
5477 }
5478 supdrvLdrUnlock(pDevExt);
5479
5480 if (pfnServiceReqHandler)
5481 {
5482 /*
5483 * Call it.
5484 */
5485 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
5486 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
5487 else
5488 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
5489 }
5490 else
5491 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
5492 }
5493
5494 /* log it */
5495 if ( RT_FAILURE(rc)
5496 && rc != VERR_INTERRUPTED
5497 && rc != VERR_TIMEOUT)
5498 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
5499 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
5500 else
5501 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
5502 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
5503 return rc;
5504#else /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
5505 return VERR_NOT_IMPLEMENTED;
5506#endif /* RT_OS_WINDOWS && !RT_ARCH_AMD64 && !DEBUG */
5507}
5508
5509
5510/**
5511 * Implements the logger settings request.
5512 *
5513 * @returns VBox status code.
5514 * @param pDevExt The device extension.
5515 * @param pSession The caller's session.
5516 * @param pReq The request.
5517 */
5518static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq)
5519{
5520 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
5521 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
5522 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
5523 PRTLOGGER pLogger = NULL;
5524 int rc;
5525
5526 /*
5527 * Some further validation.
5528 */
5529 switch (pReq->u.In.fWhat)
5530 {
5531 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
5532 case SUPLOGGERSETTINGS_WHAT_CREATE:
5533 break;
5534
5535 case SUPLOGGERSETTINGS_WHAT_DESTROY:
5536 if (*pszGroup || *pszFlags || *pszDest)
5537 return VERR_INVALID_PARAMETER;
5538 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
5539 return VERR_ACCESS_DENIED;
5540 break;
5541
5542 default:
5543 return VERR_INTERNAL_ERROR;
5544 }
5545
5546 /*
5547 * Get the logger.
5548 */
5549 switch (pReq->u.In.fWhich)
5550 {
5551 case SUPLOGGERSETTINGS_WHICH_DEBUG:
5552 pLogger = RTLogGetDefaultInstance();
5553 break;
5554
5555 case SUPLOGGERSETTINGS_WHICH_RELEASE:
5556 pLogger = RTLogRelGetDefaultInstance();
5557 break;
5558
5559 default:
5560 return VERR_INTERNAL_ERROR;
5561 }
5562
5563 /*
5564 * Do the job.
5565 */
5566 switch (pReq->u.In.fWhat)
5567 {
5568 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
5569 if (pLogger)
5570 {
5571 rc = RTLogFlags(pLogger, pszFlags);
5572 if (RT_SUCCESS(rc))
5573 rc = RTLogGroupSettings(pLogger, pszGroup);
5574 NOREF(pszDest);
5575 }
5576 else
5577 rc = VERR_NOT_FOUND;
5578 break;
5579
5580 case SUPLOGGERSETTINGS_WHAT_CREATE:
5581 {
5582 if (pLogger)
5583 rc = VERR_ALREADY_EXISTS;
5584 else
5585 {
5586 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
5587
5588 rc = RTLogCreate(&pLogger,
5589 0 /* fFlags */,
5590 pszGroup,
5591 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
5592 ? "VBOX_LOG"
5593 : "VBOX_RELEASE_LOG",
5594 RT_ELEMENTS(s_apszGroups),
5595 s_apszGroups,
5596 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
5597 NULL);
5598 if (RT_SUCCESS(rc))
5599 {
5600 rc = RTLogFlags(pLogger, pszFlags);
5601 NOREF(pszDest);
5602 if (RT_SUCCESS(rc))
5603 {
5604 switch (pReq->u.In.fWhich)
5605 {
5606 case SUPLOGGERSETTINGS_WHICH_DEBUG:
5607 pLogger = RTLogSetDefaultInstance(pLogger);
5608 break;
5609 case SUPLOGGERSETTINGS_WHICH_RELEASE:
5610 pLogger = RTLogRelSetDefaultInstance(pLogger);
5611 break;
5612 }
5613 }
5614 RTLogDestroy(pLogger);
5615 }
5616 }
5617 break;
5618 }
5619
5620 case SUPLOGGERSETTINGS_WHAT_DESTROY:
5621 switch (pReq->u.In.fWhich)
5622 {
5623 case SUPLOGGERSETTINGS_WHICH_DEBUG:
5624 pLogger = RTLogSetDefaultInstance(NULL);
5625 break;
5626 case SUPLOGGERSETTINGS_WHICH_RELEASE:
5627 pLogger = RTLogRelSetDefaultInstance(NULL);
5628 break;
5629 }
5630 rc = RTLogDestroy(pLogger);
5631 break;
5632
5633 default:
5634 {
5635 rc = VERR_INTERNAL_ERROR;
5636 break;
5637 }
5638 }
5639
5640 return rc;
5641}
5642
5643
5644/**
5645 * Implements the MSR prober operations.
5646 *
5647 * @returns VBox status code.
5648 * @param pDevExt The device extension.
5649 * @param pReq The request.
5650 */
5651static int supdrvIOCtl_MsrProber(PSUPDRVDEVEXT pDevExt, PSUPMSRPROBER pReq)
5652{
5653#ifdef SUPDRV_WITH_MSR_PROBER
5654 RTCPUID const idCpu = pReq->u.In.idCpu == UINT32_MAX ? NIL_RTCPUID : pReq->u.In.idCpu;
5655 int rc;
5656
5657 switch (pReq->u.In.enmOp)
5658 {
5659 case SUPMSRPROBEROP_READ:
5660 {
5661 uint64_t uValue;
5662 rc = supdrvOSMsrProberRead(pReq->u.In.uMsr, idCpu, &uValue);
5663 if (RT_SUCCESS(rc))
5664 {
5665 pReq->u.Out.uResults.Read.uValue = uValue;
5666 pReq->u.Out.uResults.Read.fGp = false;
5667 }
5668 else if (rc == VERR_ACCESS_DENIED)
5669 {
5670 pReq->u.Out.uResults.Read.uValue = 0;
5671 pReq->u.Out.uResults.Read.fGp = true;
5672 rc = VINF_SUCCESS;
5673 }
5674 break;
5675 }
5676
5677 case SUPMSRPROBEROP_WRITE:
5678 rc = supdrvOSMsrProberWrite(pReq->u.In.uMsr, idCpu, pReq->u.In.uArgs.Write.uToWrite);
5679 if (RT_SUCCESS(rc))
5680 pReq->u.Out.uResults.Write.fGp = false;
5681 else if (rc == VERR_ACCESS_DENIED)
5682 {
5683 pReq->u.Out.uResults.Write.fGp = true;
5684 rc = VINF_SUCCESS;
5685 }
5686 break;
5687
5688 case SUPMSRPROBEROP_MODIFY:
5689 case SUPMSRPROBEROP_MODIFY_FASTER:
5690 rc = supdrvOSMsrProberModify(idCpu, pReq);
5691 break;
5692
5693 default:
5694 return VERR_INVALID_FUNCTION;
5695 }
5696 return rc;
5697#else
5698 return VERR_NOT_IMPLEMENTED;
5699#endif
5700}
5701
5702
5703/**
5704 * Resume built-in keyboard on MacBook Air and Pro hosts.
5705 * If there is no built-in keyboard device, return success anyway.
5706 *
5707 * @returns 0 on Mac OS X platform, VERR_NOT_IMPLEMENTED on the other ones.
5708 */
5709static int supdrvIOCtl_ResumeSuspendedKbds(void)
5710{
5711#if defined(RT_OS_DARWIN)
5712 return supdrvDarwinResumeSuspendedKbds();
5713#else
5714 return VERR_NOT_IMPLEMENTED;
5715#endif
5716}
5717
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette