VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 25360

Last change on this file since 25360 was 25336, checked in by vboxsync, 15 years ago

HostDrivers,SrvIntNetR0,iprt/ntwrap.mac: Removed all the RT_WITH_W64_UNWIND_HACK fun.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 180.3 KB
Line 
1/* $Revision: 25336 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/cpuset.h>
41#include <iprt/handletable.h>
42#include <iprt/mp.h>
43#include <iprt/power.h>
44#include <iprt/process.h>
45#include <iprt/semaphore.h>
46#include <iprt/spinlock.h>
47#include <iprt/thread.h>
48#include <iprt/uuid.h>
49#include <VBox/param.h>
50#include <VBox/log.h>
51#include <VBox/err.h>
52#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
53# include <iprt/crc32.h>
54# include <iprt/net.h>
55# include <iprt/string.h>
56# include <iprt/rand.h>
57# include <iprt/path.h>
58#endif
59
60/*
61 * Logging assignments:
62 * Log - useful stuff, like failures.
63 * LogFlow - program flow, except the really noisy bits.
64 * Log2 - Cleanup.
65 * Log3 - Loader flow noise.
66 * Log4 - Call VMMR0 flow noise.
67 * Log5 - Native yet-to-be-defined noise.
68 * Log6 - Native ioctl flow noise.
69 *
70 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
71 * instanciation in log-vbox.c(pp).
72 */
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78/** The frequency by which we recalculate the u32UpdateHz and
79 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
80#define GIP_UPDATEHZ_RECALC_FREQ 0x800
81
82/** @def VBOX_SVN_REV
83 * The makefile should define this if it can. */
84#ifndef VBOX_SVN_REV
85# define VBOX_SVN_REV 0
86#endif
87
88
89/*******************************************************************************
90* Internal Functions *
91*******************************************************************************/
92static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
93static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
94static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
95static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
96static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
97static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
98static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
99static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
100static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
101static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
102static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
103static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
104static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
105DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt);
106DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt);
107static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
108static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq);
109static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
110static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
111static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
112static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
113static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
114
115
116/*******************************************************************************
117* Global Variables *
118*******************************************************************************/
119DECLEXPORT(PSUPGLOBALINFOPAGE) g_pSUPGlobalInfoPage = NULL;
120
121/**
122 * Array of the R0 SUP API.
123 */
124static SUPFUNC g_aFunctions[] =
125{
126 /* name function */
127 /* Entries with absolute addresses determined at runtime, fixup
128 code makes ugly ASSUMPTIONS about the order here: */
129 { "SUPR0AbsIs64bit", (void *)0 },
130 { "SUPR0Abs64bitKernelCS", (void *)0 },
131 { "SUPR0Abs64bitKernelSS", (void *)0 },
132 { "SUPR0Abs64bitKernelDS", (void *)0 },
133 { "SUPR0AbsKernelCS", (void *)0 },
134 { "SUPR0AbsKernelSS", (void *)0 },
135 { "SUPR0AbsKernelDS", (void *)0 },
136 { "SUPR0AbsKernelES", (void *)0 },
137 { "SUPR0AbsKernelFS", (void *)0 },
138 { "SUPR0AbsKernelGS", (void *)0 },
139 /* Normal function pointers: */
140 { "SUPR0ComponentRegisterFactory", (void *)SUPR0ComponentRegisterFactory },
141 { "SUPR0ComponentDeregisterFactory", (void *)SUPR0ComponentDeregisterFactory },
142 { "SUPR0ComponentQueryFactory", (void *)SUPR0ComponentQueryFactory },
143 { "SUPR0ObjRegister", (void *)SUPR0ObjRegister },
144 { "SUPR0ObjAddRef", (void *)SUPR0ObjAddRef },
145 { "SUPR0ObjAddRefEx", (void *)SUPR0ObjAddRefEx },
146 { "SUPR0ObjRelease", (void *)SUPR0ObjRelease },
147 { "SUPR0ObjVerifyAccess", (void *)SUPR0ObjVerifyAccess },
148 { "SUPR0LockMem", (void *)SUPR0LockMem },
149 { "SUPR0UnlockMem", (void *)SUPR0UnlockMem },
150 { "SUPR0ContAlloc", (void *)SUPR0ContAlloc },
151 { "SUPR0ContFree", (void *)SUPR0ContFree },
152 { "SUPR0LowAlloc", (void *)SUPR0LowAlloc },
153 { "SUPR0LowFree", (void *)SUPR0LowFree },
154 { "SUPR0MemAlloc", (void *)SUPR0MemAlloc },
155 { "SUPR0MemGetPhys", (void *)SUPR0MemGetPhys },
156 { "SUPR0MemFree", (void *)SUPR0MemFree },
157 { "SUPR0PageAllocEx", (void *)SUPR0PageAllocEx },
158 { "SUPR0PageFree", (void *)SUPR0PageFree },
159 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
160 { "SUPSemEventCreate", (void *)SUPSemEventCreate },
161 { "SUPSemEventClose", (void *)SUPSemEventClose },
162 { "SUPSemEventSignal", (void *)SUPSemEventSignal },
163 { "SUPSemEventWait", (void *)SUPSemEventWait },
164 { "SUPSemEventWaitNoResume", (void *)SUPSemEventWaitNoResume },
165 { "SUPSemEventMultiCreate", (void *)SUPSemEventMultiCreate },
166 { "SUPSemEventMultiClose", (void *)SUPSemEventMultiClose },
167 { "SUPSemEventMultiSignal", (void *)SUPSemEventMultiSignal },
168 { "SUPSemEventMultiReset", (void *)SUPSemEventMultiReset },
169 { "SUPSemEventMultiWait", (void *)SUPSemEventMultiWait },
170 { "SUPSemEventMultiWaitNoResume", (void *)SUPSemEventMultiWaitNoResume },
171 { "SUPR0GetPagingMode", (void *)SUPR0GetPagingMode },
172 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
173 { "SUPGetGIP", (void *)SUPGetGIP },
174 { "g_pSUPGlobalInfoPage", (void *)&g_pSUPGlobalInfoPage },
175 { "RTMemAlloc", (void *)RTMemAlloc },
176 { "RTMemAllocZ", (void *)RTMemAllocZ },
177 { "RTMemFree", (void *)RTMemFree },
178 /*{ "RTMemDup", (void *)RTMemDup },
179 { "RTMemDupEx", (void *)RTMemDupEx },*/
180 { "RTMemRealloc", (void *)RTMemRealloc },
181 { "RTR0MemObjAllocLow", (void *)RTR0MemObjAllocLow },
182 { "RTR0MemObjAllocPage", (void *)RTR0MemObjAllocPage },
183 { "RTR0MemObjAllocPhys", (void *)RTR0MemObjAllocPhys },
184 { "RTR0MemObjAllocPhysNC", (void *)RTR0MemObjAllocPhysNC },
185 { "RTR0MemObjAllocCont", (void *)RTR0MemObjAllocCont },
186 { "RTR0MemObjEnterPhys", (void *)RTR0MemObjEnterPhys },
187 { "RTR0MemObjLockUser", (void *)RTR0MemObjLockUser },
188 { "RTR0MemObjMapKernel", (void *)RTR0MemObjMapKernel },
189 { "RTR0MemObjMapKernelEx", (void *)RTR0MemObjMapKernelEx },
190 { "RTR0MemObjMapUser", (void *)RTR0MemObjMapUser },
191 { "RTR0MemObjProtect", (void *)RTR0MemObjProtect },
192 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
193 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
194 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
195 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
196 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
197 { "RTR0MemObjFree", (void *)RTR0MemObjFree },
198 { "RTR0MemUserCopyFrom", (void *)RTR0MemUserCopyFrom },
199 { "RTR0MemUserCopyTo", (void *)RTR0MemUserCopyTo },
200 { "RTR0MemUserIsValidAddr", (void *)RTR0MemUserIsValidAddr },
201 { "RTR0MemKernelIsValidAddr", (void *)RTR0MemKernelIsValidAddr },
202 { "RTR0MemAreKrnlAndUsrDifferent", (void *)RTR0MemAreKrnlAndUsrDifferent },
203/* These don't work yet on linux - use fast mutexes!
204 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
205 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
206 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
207 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
208*/
209 { "RTProcSelf", (void *)RTProcSelf },
210 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
211 { "RTSemFastMutexCreate", (void *)RTSemFastMutexCreate },
212 { "RTSemFastMutexDestroy", (void *)RTSemFastMutexDestroy },
213 { "RTSemFastMutexRequest", (void *)RTSemFastMutexRequest },
214 { "RTSemFastMutexRelease", (void *)RTSemFastMutexRelease },
215 { "RTSemEventCreate", (void *)RTSemEventCreate },
216 { "RTSemEventSignal", (void *)RTSemEventSignal },
217 { "RTSemEventWait", (void *)RTSemEventWait },
218 { "RTSemEventWaitNoResume", (void *)RTSemEventWaitNoResume },
219 { "RTSemEventDestroy", (void *)RTSemEventDestroy },
220 { "RTSemEventMultiCreate", (void *)RTSemEventMultiCreate },
221 { "RTSemEventMultiSignal", (void *)RTSemEventMultiSignal },
222 { "RTSemEventMultiReset", (void *)RTSemEventMultiReset },
223 { "RTSemEventMultiWait", (void *)RTSemEventMultiWait },
224 { "RTSemEventMultiWaitNoResume", (void *)RTSemEventMultiWaitNoResume },
225 { "RTSemEventMultiDestroy", (void *)RTSemEventMultiDestroy },
226 { "RTSpinlockCreate", (void *)RTSpinlockCreate },
227 { "RTSpinlockDestroy", (void *)RTSpinlockDestroy },
228 { "RTSpinlockAcquire", (void *)RTSpinlockAcquire },
229 { "RTSpinlockRelease", (void *)RTSpinlockRelease },
230 { "RTSpinlockAcquireNoInts", (void *)RTSpinlockAcquireNoInts },
231 { "RTSpinlockReleaseNoInts", (void *)RTSpinlockReleaseNoInts },
232 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
233 { "RTTimeMilliTS", (void *)RTTimeMilliTS },
234 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
235 { "RTTimeSystemMilliTS", (void *)RTTimeSystemMilliTS },
236 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
237 { "RTThreadSleep", (void *)RTThreadSleep },
238 { "RTThreadYield", (void *)RTThreadYield },
239#if 0 /* Thread APIs, Part 2. */
240 { "RTThreadSelf", (void *)RTThreadSelf },
241 { "RTThreadCreate", (void *)RTThreadCreate }, /** @todo need to wrap the callback */
242 { "RTThreadGetNative", (void *)RTThreadGetNative },
243 { "RTThreadWait", (void *)RTThreadWait },
244 { "RTThreadWaitNoResume", (void *)RTThreadWaitNoResume },
245 { "RTThreadGetName", (void *)RTThreadGetName },
246 { "RTThreadSelfName", (void *)RTThreadSelfName },
247 { "RTThreadGetType", (void *)RTThreadGetType },
248 { "RTThreadUserSignal", (void *)RTThreadUserSignal },
249 { "RTThreadUserReset", (void *)RTThreadUserReset },
250 { "RTThreadUserWait", (void *)RTThreadUserWait },
251 { "RTThreadUserWaitNoResume", (void *)RTThreadUserWaitNoResume },
252#endif
253 { "RTThreadPreemptIsEnabled", (void *)RTThreadPreemptIsEnabled },
254 { "RTThreadPreemptIsPending", (void *)RTThreadPreemptIsPending },
255 { "RTThreadPreemptIsPendingTrusty", (void *)RTThreadPreemptIsPendingTrusty },
256 { "RTThreadPreemptIsPossible", (void *)RTThreadPreemptIsPossible },
257 { "RTThreadPreemptDisable", (void *)RTThreadPreemptDisable },
258 { "RTThreadPreemptRestore", (void *)RTThreadPreemptRestore },
259 { "RTThreadIsInInterrupt", (void *)RTThreadIsInInterrupt },
260
261 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
262 { "RTMpCpuId", (void *)RTMpCpuId },
263 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
264 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
265 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
266 { "RTMpGetCount", (void *)RTMpGetCount },
267 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
268 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
269 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
270 { "RTMpGetSet", (void *)RTMpGetSet },
271 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
272 { "RTMpIsCpuWorkPending", (void *)RTMpIsCpuWorkPending },
273 { "RTMpOnAll", (void *)RTMpOnAll },
274 { "RTMpOnOthers", (void *)RTMpOnOthers },
275 { "RTMpOnSpecific", (void *)RTMpOnSpecific },
276 { "RTMpPokeCpu", (void *)RTMpPokeCpu },
277 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
278 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
279 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
280 { "RTLogSetDefaultInstanceThread", (void *)RTLogSetDefaultInstanceThread },
281 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
282 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
283 { "RTLogLoggerExV", (void *)RTLogLoggerExV },
284 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
285 { "RTLogPrintfV", (void *)RTLogPrintfV },
286 { "AssertMsg1", (void *)AssertMsg1 },
287 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
288#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
289 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
290#endif
291#if defined(RT_OS_DARWIN)
292 { "RTAssertMsg1", (void *)RTAssertMsg1 },
293 { "RTAssertMsg2", (void *)RTAssertMsg2 },
294 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
295#endif
296};
297
298#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
299/**
300 * Drag in the rest of IRPT since we share it with the
301 * rest of the kernel modules on darwin.
302 */
303PFNRT g_apfnVBoxDrvIPRTDeps[] =
304{
305 /* VBoxNetFlt */
306 (PFNRT)RTCrc32,
307 (PFNRT)RTErrConvertFromErrno,
308 (PFNRT)RTNetIPv4IsHdrValid,
309 (PFNRT)RTNetIPv4TCPChecksum,
310 (PFNRT)RTNetIPv4UDPChecksum,
311 (PFNRT)RTUuidCompare,
312 (PFNRT)RTUuidCompareStr,
313 (PFNRT)RTUuidFromStr,
314 (PFNRT)RTStrDup,
315 (PFNRT)RTStrFree,
316 /* VBoxNetAdp */
317 (PFNRT)RTRandBytes,
318 /* VBoxUSB */
319 (PFNRT)RTPathStripFilename,
320 NULL
321};
322#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_SOLARIS */
323
324
325/**
326 * Initializes the device extentsion structure.
327 *
328 * @returns IPRT status code.
329 * @param pDevExt The device extension to initialize.
330 */
331int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
332{
333 int rc;
334
335#ifdef SUPDRV_WITH_RELEASE_LOGGER
336 /*
337 * Create the release log.
338 */
339 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
340 PRTLOGGER pRelLogger;
341 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
342 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
343 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
344 if (RT_SUCCESS(rc))
345 RTLogRelSetDefaultInstance(pRelLogger);
346 /** @todo Add native hook for getting logger config parameters and setting
347 * them. On linux we should use the module parameter stuff... */
348#endif
349
350 /*
351 * Initialize it.
352 */
353 memset(pDevExt, 0, sizeof(*pDevExt));
354 rc = RTSpinlockCreate(&pDevExt->Spinlock);
355 if (!rc)
356 {
357#ifdef SUPDRV_USE_MUTEX_FOR_LDR
358 rc = RTSemMutexCreate(&pDevExt->mtxLdr);
359#else
360 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
361#endif
362 if (!rc)
363 {
364 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
365 if (!rc)
366 {
367 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
368 if (!rc)
369 {
370 rc = supdrvGipCreate(pDevExt);
371 if (RT_SUCCESS(rc))
372 {
373 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
374
375 /*
376 * Fixup the absolute symbols.
377 *
378 * Because of the table indexing assumptions we'll have a little #ifdef orgy
379 * here rather than distributing this to OS specific files. At least for now.
380 */
381#ifdef RT_OS_DARWIN
382# if ARCH_BITS == 32
383 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
384 {
385 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
386 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
387 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
388 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
389 }
390 else
391 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
392 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
393 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
394 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
395 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
396 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
397 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
398# else /* 64-bit darwin: */
399 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
400 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
401 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
402 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
403 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
404 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
405 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
406 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
407 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
408 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
409
410# endif
411#else /* !RT_OS_DARWIN */
412# if ARCH_BITS == 64
413 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
414 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
415 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
416 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
417# else
418 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
419# endif
420 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
421 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
422 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
423 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
424 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
425 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
426#endif /* !RT_OS_DARWIN */
427 return VINF_SUCCESS;
428 }
429
430 RTSemFastMutexDestroy(pDevExt->mtxGip);
431 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
432 }
433 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
434 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
435 }
436#ifdef SUPDRV_USE_MUTEX_FOR_LDR
437 RTSemMutexDestroy(pDevExt->mtxLdr);
438 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
439#else
440 RTSemFastMutexDestroy(pDevExt->mtxLdr);
441 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
442#endif
443 }
444 RTSpinlockDestroy(pDevExt->Spinlock);
445 pDevExt->Spinlock = NIL_RTSPINLOCK;
446 }
447#ifdef SUPDRV_WITH_RELEASE_LOGGER
448 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
449 RTLogDestroy(RTLogSetDefaultInstance(NULL));
450#endif
451
452 return rc;
453}
454
455
456/**
457 * Delete the device extension (e.g. cleanup members).
458 *
459 * @param pDevExt The device extension to delete.
460 */
461void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
462{
463 PSUPDRVOBJ pObj;
464 PSUPDRVUSAGE pUsage;
465
466 /*
467 * Kill mutexes and spinlocks.
468 */
469 RTSemFastMutexDestroy(pDevExt->mtxGip);
470 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
471#ifdef SUPDRV_USE_MUTEX_FOR_LDR
472 RTSemMutexDestroy(pDevExt->mtxLdr);
473 pDevExt->mtxLdr = NIL_RTSEMMUTEX;
474#else
475 RTSemFastMutexDestroy(pDevExt->mtxLdr);
476 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
477#endif
478 RTSpinlockDestroy(pDevExt->Spinlock);
479 pDevExt->Spinlock = NIL_RTSPINLOCK;
480 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
481 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
482
483 /*
484 * Free lists.
485 */
486 /* objects. */
487 pObj = pDevExt->pObjs;
488#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
489 Assert(!pObj); /* (can trigger on forced unloads) */
490#endif
491 pDevExt->pObjs = NULL;
492 while (pObj)
493 {
494 void *pvFree = pObj;
495 pObj = pObj->pNext;
496 RTMemFree(pvFree);
497 }
498
499 /* usage records. */
500 pUsage = pDevExt->pUsageFree;
501 pDevExt->pUsageFree = NULL;
502 while (pUsage)
503 {
504 void *pvFree = pUsage;
505 pUsage = pUsage->pNext;
506 RTMemFree(pvFree);
507 }
508
509 /* kill the GIP. */
510 supdrvGipDestroy(pDevExt);
511
512#ifdef SUPDRV_WITH_RELEASE_LOGGER
513 /* destroy the loggers. */
514 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
515 RTLogDestroy(RTLogSetDefaultInstance(NULL));
516#endif
517}
518
519
520/**
521 * Create session.
522 *
523 * @returns IPRT status code.
524 * @param pDevExt Device extension.
525 * @param fUser Flag indicating whether this is a user or kernel session.
526 * @param ppSession Where to store the pointer to the session data.
527 */
528int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
529{
530 /*
531 * Allocate memory for the session data.
532 */
533 int rc = VERR_NO_MEMORY;
534 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
535 if (pSession)
536 {
537 /* Initialize session data. */
538 rc = RTSpinlockCreate(&pSession->Spinlock);
539 if (!rc)
540 {
541 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
542 RTHANDLETABLE_FLAGS_LOCKED | RTHANDLETABLE_FLAGS_CONTEXT,
543 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
544 if (RT_SUCCESS(rc))
545 {
546 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
547 pSession->pDevExt = pDevExt;
548 pSession->u32Cookie = BIRD_INV;
549 /*pSession->pLdrUsage = NULL;
550 pSession->pVM = NULL;
551 pSession->pUsage = NULL;
552 pSession->pGip = NULL;
553 pSession->fGipReferenced = false;
554 pSession->Bundle.cUsed = 0; */
555 pSession->Uid = NIL_RTUID;
556 pSession->Gid = NIL_RTGID;
557 if (fUser)
558 {
559 pSession->Process = RTProcSelf();
560 pSession->R0Process = RTR0ProcHandleSelf();
561 }
562 else
563 {
564 pSession->Process = NIL_RTPROCESS;
565 pSession->R0Process = NIL_RTR0PROCESS;
566 }
567
568 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
569 return VINF_SUCCESS;
570 }
571
572 RTSpinlockDestroy(pSession->Spinlock);
573 }
574 RTMemFree(pSession);
575 *ppSession = NULL;
576 Log(("Failed to create spinlock, rc=%d!\n", rc));
577 }
578
579 return rc;
580}
581
582
583/**
584 * Shared code for cleaning up a session.
585 *
586 * @param pDevExt Device extension.
587 * @param pSession Session data.
588 * This data will be freed by this routine.
589 */
590void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
591{
592 /*
593 * Cleanup the session first.
594 */
595 supdrvCleanupSession(pDevExt, pSession);
596
597 /*
598 * Free the rest of the session stuff.
599 */
600 RTSpinlockDestroy(pSession->Spinlock);
601 pSession->Spinlock = NIL_RTSPINLOCK;
602 pSession->pDevExt = NULL;
603 RTMemFree(pSession);
604 LogFlow(("supdrvCloseSession: returns\n"));
605}
606
607
608/**
609 * Shared code for cleaning up a session (but not quite freeing it).
610 *
611 * This is primarily intended for MAC OS X where we have to clean up the memory
612 * stuff before the file handle is closed.
613 *
614 * @param pDevExt Device extension.
615 * @param pSession Session data.
616 * This data will be freed by this routine.
617 */
618void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
619{
620 int rc;
621 PSUPDRVBUNDLE pBundle;
622 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
623
624 /*
625 * Remove logger instances related to this session.
626 */
627 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
628
629 /*
630 * Destroy the handle table.
631 */
632 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
633 AssertRC(rc);
634 pSession->hHandleTable = NIL_RTHANDLETABLE;
635
636 /*
637 * Release object references made in this session.
638 * In theory there should be noone racing us in this session.
639 */
640 Log2(("release objects - start\n"));
641 if (pSession->pUsage)
642 {
643 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
644 PSUPDRVUSAGE pUsage;
645 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
646
647 while ((pUsage = pSession->pUsage) != NULL)
648 {
649 PSUPDRVOBJ pObj = pUsage->pObj;
650 pSession->pUsage = pUsage->pNext;
651
652 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
653 if (pUsage->cUsage < pObj->cUsage)
654 {
655 pObj->cUsage -= pUsage->cUsage;
656 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
657 }
658 else
659 {
660 /* Destroy the object and free the record. */
661 if (pDevExt->pObjs == pObj)
662 pDevExt->pObjs = pObj->pNext;
663 else
664 {
665 PSUPDRVOBJ pObjPrev;
666 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
667 if (pObjPrev->pNext == pObj)
668 {
669 pObjPrev->pNext = pObj->pNext;
670 break;
671 }
672 Assert(pObjPrev);
673 }
674 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
675
676 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
677 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
678 if (pObj->pfnDestructor)
679 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
680 RTMemFree(pObj);
681 }
682
683 /* free it and continue. */
684 RTMemFree(pUsage);
685
686 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
687 }
688
689 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
690 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
691 }
692 Log2(("release objects - done\n"));
693
694 /*
695 * Release memory allocated in the session.
696 *
697 * We do not serialize this as we assume that the application will
698 * not allocated memory while closing the file handle object.
699 */
700 Log2(("freeing memory:\n"));
701 pBundle = &pSession->Bundle;
702 while (pBundle)
703 {
704 PSUPDRVBUNDLE pToFree;
705 unsigned i;
706
707 /*
708 * Check and unlock all entries in the bundle.
709 */
710 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
711 {
712 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
713 {
714 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
715 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
716 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
717 {
718 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
719 AssertRC(rc); /** @todo figure out how to handle this. */
720 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
721 }
722 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
723 AssertRC(rc); /** @todo figure out how to handle this. */
724 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
725 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
726 }
727 }
728
729 /*
730 * Advance and free previous bundle.
731 */
732 pToFree = pBundle;
733 pBundle = pBundle->pNext;
734
735 pToFree->pNext = NULL;
736 pToFree->cUsed = 0;
737 if (pToFree != &pSession->Bundle)
738 RTMemFree(pToFree);
739 }
740 Log2(("freeing memory - done\n"));
741
742 /*
743 * Deregister component factories.
744 */
745 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
746 Log2(("deregistering component factories:\n"));
747 if (pDevExt->pComponentFactoryHead)
748 {
749 PSUPDRVFACTORYREG pPrev = NULL;
750 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
751 while (pCur)
752 {
753 if (pCur->pSession == pSession)
754 {
755 /* unlink it */
756 PSUPDRVFACTORYREG pNext = pCur->pNext;
757 if (pPrev)
758 pPrev->pNext = pNext;
759 else
760 pDevExt->pComponentFactoryHead = pNext;
761
762 /* free it */
763 pCur->pNext = NULL;
764 pCur->pSession = NULL;
765 pCur->pFactory = NULL;
766 RTMemFree(pCur);
767
768 /* next */
769 pCur = pNext;
770 }
771 else
772 {
773 /* next */
774 pPrev = pCur;
775 pCur = pCur->pNext;
776 }
777 }
778 }
779 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
780 Log2(("deregistering component factories - done\n"));
781
782 /*
783 * Loaded images needs to be dereferenced and possibly freed up.
784 */
785 supdrvLdrLock(pDevExt);
786 Log2(("freeing images:\n"));
787 if (pSession->pLdrUsage)
788 {
789 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
790 pSession->pLdrUsage = NULL;
791 while (pUsage)
792 {
793 void *pvFree = pUsage;
794 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
795 if (pImage->cUsage > pUsage->cUsage)
796 pImage->cUsage -= pUsage->cUsage;
797 else
798 supdrvLdrFree(pDevExt, pImage);
799 pUsage->pImage = NULL;
800 pUsage = pUsage->pNext;
801 RTMemFree(pvFree);
802 }
803 }
804 supdrvLdrUnlock(pDevExt);
805 Log2(("freeing images - done\n"));
806
807 /*
808 * Unmap the GIP.
809 */
810 Log2(("umapping GIP:\n"));
811 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
812 {
813 SUPR0GipUnmap(pSession);
814 pSession->fGipReferenced = 0;
815 }
816 Log2(("umapping GIP - done\n"));
817}
818
819
820/**
821 * RTHandleTableDestroy callback used by supdrvCleanupSession.
822 *
823 * @returns IPRT status code, see SUPR0ObjAddRef.
824 * @param hHandleTable The handle table handle. Ignored.
825 * @param pvObj The object pointer.
826 * @param pvCtx Context, the handle type. Ignored.
827 * @param pvUser Session pointer.
828 */
829static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
830{
831 NOREF(pvCtx);
832 NOREF(hHandleTable);
833 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
834}
835
836
837/**
838 * RTHandleTableDestroy callback used by supdrvCleanupSession.
839 *
840 * @param hHandleTable The handle table handle. Ignored.
841 * @param h The handle value. Ignored.
842 * @param pvObj The object pointer.
843 * @param pvCtx Context, the handle type. Ignored.
844 * @param pvUser Session pointer.
845 */
846static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
847{
848 NOREF(pvCtx);
849 NOREF(h);
850 NOREF(hHandleTable);
851 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
852}
853
854
855/**
856 * Fast path I/O Control worker.
857 *
858 * @returns VBox status code that should be passed down to ring-3 unchanged.
859 * @param uIOCtl Function number.
860 * @param idCpu VMCPU id.
861 * @param pDevExt Device extention.
862 * @param pSession Session data.
863 */
864int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
865{
866 /*
867 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
868 */
869 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
870 {
871 switch (uIOCtl)
872 {
873 case SUP_IOCTL_FAST_DO_RAW_RUN:
874 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
875 break;
876 case SUP_IOCTL_FAST_DO_HWACC_RUN:
877 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
878 break;
879 case SUP_IOCTL_FAST_DO_NOP:
880 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
881 break;
882 default:
883 return VERR_INTERNAL_ERROR;
884 }
885 return VINF_SUCCESS;
886 }
887 return VERR_INTERNAL_ERROR;
888}
889
890
891/**
892 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
893 * We would use strpbrk here if this function would be contained in the RedHat kABI white
894 * list, see http://www.kerneldrivers.org/RHEL5.
895 *
896 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
897 * @param pszStr String to check
898 * @param pszChars Character set
899 */
900static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
901{
902 int chCur;
903 while ((chCur = *pszStr++) != '\0')
904 {
905 int ch;
906 const char *psz = pszChars;
907 while ((ch = *psz++) != '\0')
908 if (ch == chCur)
909 return 1;
910
911 }
912 return 0;
913}
914
915
916/**
917 * I/O Control worker.
918 *
919 * @returns 0 on success.
920 * @returns VERR_INVALID_PARAMETER if the request is invalid.
921 *
922 * @param uIOCtl Function number.
923 * @param pDevExt Device extention.
924 * @param pSession Session data.
925 * @param pReqHdr The request header.
926 */
927int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
928{
929 /*
930 * Validate the request.
931 */
932 /* this first check could probably be omitted as its also done by the OS specific code... */
933 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
934 || pReqHdr->cbIn < sizeof(*pReqHdr)
935 || pReqHdr->cbOut < sizeof(*pReqHdr)))
936 {
937 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
938 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
939 return VERR_INVALID_PARAMETER;
940 }
941 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
942 {
943 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
944 {
945 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
946 return VERR_INVALID_PARAMETER;
947 }
948 }
949 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
950 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
951 {
952 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
953 return VERR_INVALID_PARAMETER;
954 }
955
956/*
957 * Validation macros
958 */
959#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
960 do { \
961 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
962 { \
963 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
964 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
965 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
966 } \
967 } while (0)
968
969#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
970
971#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
972 do { \
973 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
974 { \
975 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
976 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
977 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
978 } \
979 } while (0)
980
981#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
982 do { \
983 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
984 { \
985 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
986 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
987 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
988 } \
989 } while (0)
990
991#define REQ_CHECK_EXPR(Name, expr) \
992 do { \
993 if (RT_UNLIKELY(!(expr))) \
994 { \
995 OSDBGPRINT(( #Name ": %s\n", #expr)); \
996 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
997 } \
998 } while (0)
999
1000#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1001 do { \
1002 if (RT_UNLIKELY(!(expr))) \
1003 { \
1004 OSDBGPRINT( fmt ); \
1005 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1006 } \
1007 } while (0)
1008
1009
1010 /*
1011 * The switch.
1012 */
1013 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1014 {
1015 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1016 {
1017 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1018 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1019 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1020 {
1021 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1022 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1023 return 0;
1024 }
1025
1026#if 0
1027 /*
1028 * Call out to the OS specific code and let it do permission checks on the
1029 * client process.
1030 */
1031 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1032 {
1033 pReq->u.Out.u32Cookie = 0xffffffff;
1034 pReq->u.Out.u32SessionCookie = 0xffffffff;
1035 pReq->u.Out.u32SessionVersion = 0xffffffff;
1036 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1037 pReq->u.Out.pSession = NULL;
1038 pReq->u.Out.cFunctions = 0;
1039 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1040 return 0;
1041 }
1042#endif
1043
1044 /*
1045 * Match the version.
1046 * The current logic is very simple, match the major interface version.
1047 */
1048 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1049 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1050 {
1051 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1052 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1053 pReq->u.Out.u32Cookie = 0xffffffff;
1054 pReq->u.Out.u32SessionCookie = 0xffffffff;
1055 pReq->u.Out.u32SessionVersion = 0xffffffff;
1056 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1057 pReq->u.Out.pSession = NULL;
1058 pReq->u.Out.cFunctions = 0;
1059 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1060 return 0;
1061 }
1062
1063 /*
1064 * Fill in return data and be gone.
1065 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1066 * u32SessionVersion <= u32ReqVersion!
1067 */
1068 /** @todo Somehow validate the client and negotiate a secure cookie... */
1069 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1070 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1071 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1072 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1073 pReq->u.Out.pSession = pSession;
1074 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1075 pReq->Hdr.rc = VINF_SUCCESS;
1076 return 0;
1077 }
1078
1079 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1080 {
1081 /* validate */
1082 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1083 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1084
1085 /* execute */
1086 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1087 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1088 pReq->Hdr.rc = VINF_SUCCESS;
1089 return 0;
1090 }
1091
1092 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1093 {
1094 /* validate */
1095 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1096 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1097 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1098 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1099 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1100
1101 /* execute */
1102 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1103 if (RT_FAILURE(pReq->Hdr.rc))
1104 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1105 return 0;
1106 }
1107
1108 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1109 {
1110 /* validate */
1111 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1112 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1113
1114 /* execute */
1115 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1116 return 0;
1117 }
1118
1119 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1120 {
1121 /* validate */
1122 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1123 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1124
1125 /* execute */
1126 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1127 if (RT_FAILURE(pReq->Hdr.rc))
1128 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1129 return 0;
1130 }
1131
1132 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1133 {
1134 /* validate */
1135 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1136 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1137
1138 /* execute */
1139 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1140 return 0;
1141 }
1142
1143 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1144 {
1145 /* validate */
1146 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1147 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1148 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs > 0);
1149 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageWithTabs < 16*_1M);
1150 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1151 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits > 0);
1152 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImageBits < pReq->u.In.cbImageWithTabs);
1153 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1154 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1155 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1156 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szFilename, '\0', sizeof(pReq->u.In.szFilename)));
1157
1158 /* execute */
1159 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1160 return 0;
1161 }
1162
1163 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1164 {
1165 /* validate */
1166 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1167 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1168 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImageWithTabs), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1169 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1170 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1171 || ( pReq->u.In.offSymbols < pReq->u.In.cbImageWithTabs
1172 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImageWithTabs),
1173 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offSymbols,
1174 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImageWithTabs));
1175 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1176 || ( pReq->u.In.offStrTab < pReq->u.In.cbImageWithTabs
1177 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs
1178 && pReq->u.In.cbStrTab <= pReq->u.In.cbImageWithTabs),
1179 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImageWithTabs=%#lx\n", (long)pReq->u.In.offStrTab,
1180 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImageWithTabs));
1181
1182 if (pReq->u.In.cSymbols)
1183 {
1184 uint32_t i;
1185 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1186 for (i = 0; i < pReq->u.In.cSymbols; i++)
1187 {
1188 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImageWithTabs,
1189 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImageWithTabs));
1190 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1191 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
1192 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1193 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImageWithTabs));
1194 }
1195 }
1196
1197 /* execute */
1198 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1199 return 0;
1200 }
1201
1202 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1203 {
1204 /* validate */
1205 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1206 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1207
1208 /* execute */
1209 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1210 return 0;
1211 }
1212
1213 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1214 {
1215 /* validate */
1216 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1217 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1218 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1219
1220 /* execute */
1221 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1222 return 0;
1223 }
1224
1225 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1226 {
1227 /* validate */
1228 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1229 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1230 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1231
1232 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1233 {
1234 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1235
1236 /* execute */
1237 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1238 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1239 else
1240 pReq->Hdr.rc = VERR_WRONG_ORDER;
1241 }
1242 else
1243 {
1244 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1245 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1246 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1247 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1248 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1249
1250 /* execute */
1251 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1252 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1253 else
1254 pReq->Hdr.rc = VERR_WRONG_ORDER;
1255 }
1256
1257 if ( RT_FAILURE(pReq->Hdr.rc)
1258 && pReq->Hdr.rc != VERR_INTERRUPTED
1259 && pReq->Hdr.rc != VERR_TIMEOUT)
1260 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1261 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1262 else
1263 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1264 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1265 return 0;
1266 }
1267
1268 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1269 {
1270 /* validate */
1271 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1272 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1273
1274 /* execute */
1275 pReq->Hdr.rc = VINF_SUCCESS;
1276 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1277 return 0;
1278 }
1279
1280 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1281 {
1282 /* validate */
1283 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1284 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1285 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1286
1287 /* execute */
1288 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1289 if (RT_FAILURE(pReq->Hdr.rc))
1290 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1291 return 0;
1292 }
1293
1294 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1295 {
1296 /* validate */
1297 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1298 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1299
1300 /* execute */
1301 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1302 return 0;
1303 }
1304
1305 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1306 {
1307 /* validate */
1308 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1309 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1310
1311 /* execute */
1312 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1313 if (RT_SUCCESS(pReq->Hdr.rc))
1314 pReq->u.Out.pGipR0 = pDevExt->pGip;
1315 return 0;
1316 }
1317
1318 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1319 {
1320 /* validate */
1321 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1322 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1323
1324 /* execute */
1325 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1326 return 0;
1327 }
1328
1329 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1330 {
1331 /* validate */
1332 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1333 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1334 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1335 || ( VALID_PTR(pReq->u.In.pVMR0)
1336 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1337 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1338 /* execute */
1339 pSession->pVM = pReq->u.In.pVMR0;
1340 pReq->Hdr.rc = VINF_SUCCESS;
1341 return 0;
1342 }
1343
1344 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1345 {
1346 /* validate */
1347 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1348 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1349 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1350 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1351 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1352 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1353 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1354 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1355 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1356
1357 /* execute */
1358 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1359 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1360 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1361 &pReq->u.Out.aPages[0]);
1362 if (RT_FAILURE(pReq->Hdr.rc))
1363 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1364 return 0;
1365 }
1366
1367 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1368 {
1369 /* validate */
1370 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1371 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1372 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1373 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1374 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1375 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1376
1377 /* execute */
1378 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1379 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1380 if (RT_FAILURE(pReq->Hdr.rc))
1381 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1382 return 0;
1383 }
1384
1385 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
1386 {
1387 /* validate */
1388 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
1389 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
1390 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
1391 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
1392 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
1393 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1394 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
1395
1396 /* execute */
1397 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
1398 return 0;
1399 }
1400
1401 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1402 {
1403 /* validate */
1404 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1405 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1406
1407 /* execute */
1408 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1409 return 0;
1410 }
1411
1412 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1413 {
1414 /* validate */
1415 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1416 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1417 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1418
1419 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1420 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1421 else
1422 {
1423 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1424 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1425 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1426 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1427 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1428 }
1429 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1430
1431 /* execute */
1432 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1433 return 0;
1434 }
1435
1436 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS(0)):
1437 {
1438 /* validate */
1439 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
1440 size_t cbStrTab;
1441 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
1442 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
1443 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
1444 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
1445 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
1446 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
1447 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
1448 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
1449 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
1450 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
1451 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
1452
1453 /* execute */
1454 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pDevExt, pSession, pReq);
1455 return 0;
1456 }
1457
1458 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_CREATE):
1459 {
1460 /* validate */
1461 PSUPSEMCREATE pReq = (PSUPSEMCREATE)pReqHdr;
1462 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_CREATE, SUP_IOCTL_SEM_CREATE_SIZE_IN, SUP_IOCTL_SEM_CREATE_SIZE_OUT);
1463
1464 /* execute */
1465 switch (pReq->u.In.uType)
1466 {
1467 case SUP_SEM_TYPE_EVENT:
1468 {
1469 SUPSEMEVENT hEvent;
1470 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
1471 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
1472 break;
1473 }
1474
1475 case SUP_SEM_TYPE_EVENT_MULTI:
1476 {
1477 SUPSEMEVENTMULTI hEventMulti;
1478 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
1479 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
1480 break;
1481 }
1482
1483 default:
1484 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
1485 break;
1486 }
1487 return 0;
1488 }
1489
1490 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP):
1491 {
1492 /* validate */
1493 PSUPSEMOP pReq = (PSUPSEMOP)pReqHdr;
1494 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP, SUP_IOCTL_SEM_OP_SIZE_IN, SUP_IOCTL_SEM_OP_SIZE_OUT);
1495
1496 /* execute */
1497 switch (pReq->u.In.uType)
1498 {
1499 case SUP_SEM_TYPE_EVENT:
1500 {
1501 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
1502 switch (pReq->u.In.uOp)
1503 {
1504 case SUPSEMOP_WAIT:
1505 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.cMillies);
1506 break;
1507 case SUPSEMOP_SIGNAL:
1508 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
1509 break;
1510 case SUPSEMOP_CLOSE:
1511 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
1512 break;
1513 case SUPSEMOP_RESET:
1514 default:
1515 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
1516 break;
1517 }
1518 break;
1519 }
1520
1521 case SUP_SEM_TYPE_EVENT_MULTI:
1522 {
1523 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
1524 switch (pReq->u.In.uOp)
1525 {
1526 case SUPSEMOP_WAIT:
1527 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.cMillies);
1528 break;
1529 case SUPSEMOP_SIGNAL:
1530 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
1531 break;
1532 case SUPSEMOP_CLOSE:
1533 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
1534 break;
1535 case SUPSEMOP_RESET:
1536 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
1537 break;
1538 default:
1539 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
1540 break;
1541 }
1542 break;
1543 }
1544
1545 default:
1546 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
1547 break;
1548 }
1549 return 0;
1550 }
1551
1552 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
1553 {
1554 /* validate */
1555 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
1556 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
1557 REQ_CHECK_EXPR(SUP_IOCTL_VT_CAPS, pReq->Hdr.cbIn <= SUP_IOCTL_VT_CAPS_SIZE_IN);
1558
1559 /* execute */
1560 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.Caps);
1561 if (RT_FAILURE(pReq->Hdr.rc))
1562 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1563 return 0;
1564 }
1565
1566 default:
1567 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1568 break;
1569 }
1570 return SUPDRV_ERR_GENERAL_FAILURE;
1571}
1572
1573
1574/**
1575 * Inter-Driver Communcation (IDC) worker.
1576 *
1577 * @returns VBox status code.
1578 * @retval VINF_SUCCESS on success.
1579 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1580 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1581 *
1582 * @param uReq The request (function) code.
1583 * @param pDevExt Device extention.
1584 * @param pSession Session data.
1585 * @param pReqHdr The request header.
1586 */
1587int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1588{
1589 /*
1590 * The OS specific code has already validated the pSession
1591 * pointer, and the request size being greater or equal to
1592 * size of the header.
1593 *
1594 * So, just check that pSession is a kernel context session.
1595 */
1596 if (RT_UNLIKELY( pSession
1597 && pSession->R0Process != NIL_RTR0PROCESS))
1598 return VERR_INVALID_PARAMETER;
1599
1600/*
1601 * Validation macro.
1602 */
1603#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1604 do { \
1605 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1606 { \
1607 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1608 (long)pReqHdr->cb, (long)(cbExpect))); \
1609 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1610 } \
1611 } while (0)
1612
1613 switch (uReq)
1614 {
1615 case SUPDRV_IDC_REQ_CONNECT:
1616 {
1617 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1618 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1619
1620 /*
1621 * Validate the cookie and other input.
1622 */
1623 if (pReq->Hdr.pSession != NULL)
1624 {
1625 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1626 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1627 }
1628 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1629 {
1630 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1631 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1632 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1633 }
1634 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1635 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1636 {
1637 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1638 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1639 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1640 }
1641
1642 /*
1643 * Match the version.
1644 * The current logic is very simple, match the major interface version.
1645 */
1646 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1647 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1648 {
1649 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1650 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
1651 pReq->u.Out.pSession = NULL;
1652 pReq->u.Out.uSessionVersion = 0xffffffff;
1653 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1654 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1655 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1656 return VINF_SUCCESS;
1657 }
1658
1659 pReq->u.Out.pSession = NULL;
1660 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1661 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1662 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1663
1664 /*
1665 * On NT we will already have a session associated with the
1666 * client, just like with the SUP_IOCTL_COOKIE request, while
1667 * the other doesn't.
1668 */
1669#ifdef RT_OS_WINDOWS
1670 pReq->Hdr.rc = VINF_SUCCESS;
1671#else
1672 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1673 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1674 if (RT_FAILURE(pReq->Hdr.rc))
1675 {
1676 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1677 return VINF_SUCCESS;
1678 }
1679#endif
1680
1681 pReq->u.Out.pSession = pSession;
1682 pReq->Hdr.pSession = pSession;
1683
1684 return VINF_SUCCESS;
1685 }
1686
1687 case SUPDRV_IDC_REQ_DISCONNECT:
1688 {
1689 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1690
1691#ifdef RT_OS_WINDOWS
1692 /* Windows will destroy the session when the file object is destroyed. */
1693#else
1694 supdrvCloseSession(pDevExt, pSession);
1695#endif
1696 return pReqHdr->rc = VINF_SUCCESS;
1697 }
1698
1699 case SUPDRV_IDC_REQ_GET_SYMBOL:
1700 {
1701 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1702 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1703
1704 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1705 return VINF_SUCCESS;
1706 }
1707
1708 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1709 {
1710 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1711 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1712
1713 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1714 return VINF_SUCCESS;
1715 }
1716
1717 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1718 {
1719 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1720 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1721
1722 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1723 return VINF_SUCCESS;
1724 }
1725
1726 default:
1727 Log(("Unknown IDC %#lx\n", (long)uReq));
1728 break;
1729 }
1730
1731#undef REQ_CHECK_IDC_SIZE
1732 return VERR_NOT_SUPPORTED;
1733}
1734
1735
1736/**
1737 * Register a object for reference counting.
1738 * The object is registered with one reference in the specified session.
1739 *
1740 * @returns Unique identifier on success (pointer).
1741 * All future reference must use this identifier.
1742 * @returns NULL on failure.
1743 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1744 * @param pvUser1 The first user argument.
1745 * @param pvUser2 The second user argument.
1746 */
1747SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1748{
1749 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1750 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1751 PSUPDRVOBJ pObj;
1752 PSUPDRVUSAGE pUsage;
1753
1754 /*
1755 * Validate the input.
1756 */
1757 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1758 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1759 AssertPtrReturn(pfnDestructor, NULL);
1760
1761 /*
1762 * Allocate and initialize the object.
1763 */
1764 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1765 if (!pObj)
1766 return NULL;
1767 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1768 pObj->enmType = enmType;
1769 pObj->pNext = NULL;
1770 pObj->cUsage = 1;
1771 pObj->pfnDestructor = pfnDestructor;
1772 pObj->pvUser1 = pvUser1;
1773 pObj->pvUser2 = pvUser2;
1774 pObj->CreatorUid = pSession->Uid;
1775 pObj->CreatorGid = pSession->Gid;
1776 pObj->CreatorProcess= pSession->Process;
1777 supdrvOSObjInitCreator(pObj, pSession);
1778
1779 /*
1780 * Allocate the usage record.
1781 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
1782 */
1783 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1784
1785 pUsage = pDevExt->pUsageFree;
1786 if (pUsage)
1787 pDevExt->pUsageFree = pUsage->pNext;
1788 else
1789 {
1790 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1791 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1792 if (!pUsage)
1793 {
1794 RTMemFree(pObj);
1795 return NULL;
1796 }
1797 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1798 }
1799
1800 /*
1801 * Insert the object and create the session usage record.
1802 */
1803 /* The object. */
1804 pObj->pNext = pDevExt->pObjs;
1805 pDevExt->pObjs = pObj;
1806
1807 /* The session record. */
1808 pUsage->cUsage = 1;
1809 pUsage->pObj = pObj;
1810 pUsage->pNext = pSession->pUsage;
1811 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1812 pSession->pUsage = pUsage;
1813
1814 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1815
1816 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1817 return pObj;
1818}
1819
1820
1821/**
1822 * Increment the reference counter for the object associating the reference
1823 * with the specified session.
1824 *
1825 * @returns IPRT status code.
1826 * @param pvObj The identifier returned by SUPR0ObjRegister().
1827 * @param pSession The session which is referencing the object.
1828 *
1829 * @remarks The caller should not own any spinlocks and must carefully protect
1830 * itself against potential race with the destructor so freed memory
1831 * isn't accessed here.
1832 */
1833SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1834{
1835 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
1836}
1837
1838
1839/**
1840 * Increment the reference counter for the object associating the reference
1841 * with the specified session.
1842 *
1843 * @returns IPRT status code.
1844 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
1845 * couldn't be allocated. (If you see this you're not doing the right
1846 * thing and it won't ever work reliably.)
1847 *
1848 * @param pvObj The identifier returned by SUPR0ObjRegister().
1849 * @param pSession The session which is referencing the object.
1850 * @param fNoBlocking Set if it's not OK to block. Never try to make the
1851 * first reference to an object in a session with this
1852 * argument set.
1853 *
1854 * @remarks The caller should not own any spinlocks and must carefully protect
1855 * itself against potential race with the destructor so freed memory
1856 * isn't accessed here.
1857 */
1858SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
1859{
1860 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1861 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1862 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1863 int rc = VINF_SUCCESS;
1864 PSUPDRVUSAGE pUsagePre;
1865 PSUPDRVUSAGE pUsage;
1866
1867 /*
1868 * Validate the input.
1869 * Be ready for the destruction race (someone might be stuck in the
1870 * destructor waiting a lock we own).
1871 */
1872 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1873 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
1874 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
1875 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
1876 VERR_INVALID_PARAMETER);
1877
1878 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1879
1880 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1881 {
1882 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1883
1884 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1885 return VERR_WRONG_ORDER;
1886 }
1887
1888 /*
1889 * Preallocate the usage record if we can.
1890 */
1891 pUsagePre = pDevExt->pUsageFree;
1892 if (pUsagePre)
1893 pDevExt->pUsageFree = pUsagePre->pNext;
1894 else if (!fNoBlocking)
1895 {
1896 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1897 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1898 if (!pUsagePre)
1899 return VERR_NO_MEMORY;
1900
1901 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1902 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1903 {
1904 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1905
1906 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1907 return VERR_WRONG_ORDER;
1908 }
1909 }
1910
1911 /*
1912 * Reference the object.
1913 */
1914 pObj->cUsage++;
1915
1916 /*
1917 * Look for the session record.
1918 */
1919 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1920 {
1921 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1922 if (pUsage->pObj == pObj)
1923 break;
1924 }
1925 if (pUsage)
1926 pUsage->cUsage++;
1927 else if (pUsagePre)
1928 {
1929 /* create a new session record. */
1930 pUsagePre->cUsage = 1;
1931 pUsagePre->pObj = pObj;
1932 pUsagePre->pNext = pSession->pUsage;
1933 pSession->pUsage = pUsagePre;
1934 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
1935
1936 pUsagePre = NULL;
1937 }
1938 else
1939 {
1940 pObj->cUsage--;
1941 rc = VERR_TRY_AGAIN;
1942 }
1943
1944 /*
1945 * Put any unused usage record into the free list..
1946 */
1947 if (pUsagePre)
1948 {
1949 pUsagePre->pNext = pDevExt->pUsageFree;
1950 pDevExt->pUsageFree = pUsagePre;
1951 }
1952
1953 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1954
1955 return rc;
1956}
1957
1958
1959/**
1960 * Decrement / destroy a reference counter record for an object.
1961 *
1962 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1963 *
1964 * @returns IPRT status code.
1965 * @retval VINF_SUCCESS if not destroyed.
1966 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
1967 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
1968 * string builds.
1969 *
1970 * @param pvObj The identifier returned by SUPR0ObjRegister().
1971 * @param pSession The session which is referencing the object.
1972 */
1973SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1974{
1975 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1976 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1977 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1978 int rc = VERR_INVALID_PARAMETER;
1979 PSUPDRVUSAGE pUsage;
1980 PSUPDRVUSAGE pUsagePrev;
1981
1982 /*
1983 * Validate the input.
1984 */
1985 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1986 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1987 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1988 VERR_INVALID_PARAMETER);
1989
1990 /*
1991 * Acquire the spinlock and look for the usage record.
1992 */
1993 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1994
1995 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1996 pUsage;
1997 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1998 {
1999 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2000 if (pUsage->pObj == pObj)
2001 {
2002 rc = VINF_SUCCESS;
2003 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
2004 if (pUsage->cUsage > 1)
2005 {
2006 pObj->cUsage--;
2007 pUsage->cUsage--;
2008 }
2009 else
2010 {
2011 /*
2012 * Free the session record.
2013 */
2014 if (pUsagePrev)
2015 pUsagePrev->pNext = pUsage->pNext;
2016 else
2017 pSession->pUsage = pUsage->pNext;
2018 pUsage->pNext = pDevExt->pUsageFree;
2019 pDevExt->pUsageFree = pUsage;
2020
2021 /* What about the object? */
2022 if (pObj->cUsage > 1)
2023 pObj->cUsage--;
2024 else
2025 {
2026 /*
2027 * Object is to be destroyed, unlink it.
2028 */
2029 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
2030 rc = VINF_OBJECT_DESTROYED;
2031 if (pDevExt->pObjs == pObj)
2032 pDevExt->pObjs = pObj->pNext;
2033 else
2034 {
2035 PSUPDRVOBJ pObjPrev;
2036 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
2037 if (pObjPrev->pNext == pObj)
2038 {
2039 pObjPrev->pNext = pObj->pNext;
2040 break;
2041 }
2042 Assert(pObjPrev);
2043 }
2044 }
2045 }
2046 break;
2047 }
2048 }
2049
2050 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2051
2052 /*
2053 * Call the destructor and free the object if required.
2054 */
2055 if (rc == VINF_OBJECT_DESTROYED)
2056 {
2057 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2058 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2059 if (pObj->pfnDestructor)
2060 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2061 RTMemFree(pObj);
2062 }
2063
2064 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2065 return rc;
2066}
2067
2068
2069/**
2070 * Verifies that the current process can access the specified object.
2071 *
2072 * @returns The following IPRT status code:
2073 * @retval VINF_SUCCESS if access was granted.
2074 * @retval VERR_PERMISSION_DENIED if denied access.
2075 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2076 *
2077 * @param pvObj The identifier returned by SUPR0ObjRegister().
2078 * @param pSession The session which wishes to access the object.
2079 * @param pszObjName Object string name. This is optional and depends on the object type.
2080 *
2081 * @remark The caller is responsible for making sure the object isn't removed while
2082 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2083 */
2084SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2085{
2086 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2087 int rc;
2088
2089 /*
2090 * Validate the input.
2091 */
2092 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2093 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2094 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2095 VERR_INVALID_PARAMETER);
2096
2097 /*
2098 * Check access. (returns true if a decision has been made.)
2099 */
2100 rc = VERR_INTERNAL_ERROR;
2101 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2102 return rc;
2103
2104 /*
2105 * Default policy is to allow the user to access his own
2106 * stuff but nothing else.
2107 */
2108 if (pObj->CreatorUid == pSession->Uid)
2109 return VINF_SUCCESS;
2110 return VERR_PERMISSION_DENIED;
2111}
2112
2113
2114/**
2115 * Lock pages.
2116 *
2117 * @returns IPRT status code.
2118 * @param pSession Session to which the locked memory should be associated.
2119 * @param pvR3 Start of the memory range to lock.
2120 * This must be page aligned.
2121 * @param cPages Number of pages to lock.
2122 * @param paPages Where to put the physical addresses of locked memory.
2123 */
2124SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2125{
2126 int rc;
2127 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2128 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2129 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2130
2131 /*
2132 * Verify input.
2133 */
2134 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2135 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2136 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2137 || !pvR3)
2138 {
2139 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2140 return VERR_INVALID_PARAMETER;
2141 }
2142
2143 /*
2144 * Let IPRT do the job.
2145 */
2146 Mem.eType = MEMREF_TYPE_LOCKED;
2147 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
2148 if (RT_SUCCESS(rc))
2149 {
2150 uint32_t iPage = cPages;
2151 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2152 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2153
2154 while (iPage-- > 0)
2155 {
2156 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2157 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2158 {
2159 AssertMsgFailed(("iPage=%d\n", iPage));
2160 rc = VERR_INTERNAL_ERROR;
2161 break;
2162 }
2163 }
2164 if (RT_SUCCESS(rc))
2165 rc = supdrvMemAdd(&Mem, pSession);
2166 if (RT_FAILURE(rc))
2167 {
2168 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2169 AssertRC(rc2);
2170 }
2171 }
2172
2173 return rc;
2174}
2175
2176
2177/**
2178 * Unlocks the memory pointed to by pv.
2179 *
2180 * @returns IPRT status code.
2181 * @param pSession Session to which the memory was locked.
2182 * @param pvR3 Memory to unlock.
2183 */
2184SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2185{
2186 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2187 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2188 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2189}
2190
2191
2192/**
2193 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2194 * backing.
2195 *
2196 * @returns IPRT status code.
2197 * @param pSession Session data.
2198 * @param cPages Number of pages to allocate.
2199 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2200 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2201 * @param pHCPhys Where to put the physical address of allocated memory.
2202 */
2203SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2204{
2205 int rc;
2206 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2207 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2208
2209 /*
2210 * Validate input.
2211 */
2212 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2213 if (!ppvR3 || !ppvR0 || !pHCPhys)
2214 {
2215 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2216 pSession, ppvR0, ppvR3, pHCPhys));
2217 return VERR_INVALID_PARAMETER;
2218
2219 }
2220 if (cPages < 1 || cPages >= 256)
2221 {
2222 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2223 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2224 }
2225
2226 /*
2227 * Let IPRT do the job.
2228 */
2229 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2230 if (RT_SUCCESS(rc))
2231 {
2232 int rc2;
2233 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2234 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2235 if (RT_SUCCESS(rc))
2236 {
2237 Mem.eType = MEMREF_TYPE_CONT;
2238 rc = supdrvMemAdd(&Mem, pSession);
2239 if (!rc)
2240 {
2241 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2242 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2243 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2244 return 0;
2245 }
2246
2247 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2248 AssertRC(rc2);
2249 }
2250 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2251 AssertRC(rc2);
2252 }
2253
2254 return rc;
2255}
2256
2257
2258/**
2259 * Frees memory allocated using SUPR0ContAlloc().
2260 *
2261 * @returns IPRT status code.
2262 * @param pSession The session to which the memory was allocated.
2263 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2264 */
2265SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2266{
2267 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2268 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2269 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2270}
2271
2272
2273/**
2274 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2275 *
2276 * The memory isn't zeroed.
2277 *
2278 * @returns IPRT status code.
2279 * @param pSession Session data.
2280 * @param cPages Number of pages to allocate.
2281 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2282 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2283 * @param paPages Where to put the physical addresses of allocated memory.
2284 */
2285SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2286{
2287 unsigned iPage;
2288 int rc;
2289 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2290 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2291
2292 /*
2293 * Validate input.
2294 */
2295 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2296 if (!ppvR3 || !ppvR0 || !paPages)
2297 {
2298 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2299 pSession, ppvR3, ppvR0, paPages));
2300 return VERR_INVALID_PARAMETER;
2301
2302 }
2303 if (cPages < 1 || cPages >= 256)
2304 {
2305 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2306 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2307 }
2308
2309 /*
2310 * Let IPRT do the work.
2311 */
2312 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2313 if (RT_SUCCESS(rc))
2314 {
2315 int rc2;
2316 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2317 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2318 if (RT_SUCCESS(rc))
2319 {
2320 Mem.eType = MEMREF_TYPE_LOW;
2321 rc = supdrvMemAdd(&Mem, pSession);
2322 if (!rc)
2323 {
2324 for (iPage = 0; iPage < cPages; iPage++)
2325 {
2326 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2327 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2328 }
2329 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2330 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2331 return 0;
2332 }
2333
2334 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2335 AssertRC(rc2);
2336 }
2337
2338 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2339 AssertRC(rc2);
2340 }
2341
2342 return rc;
2343}
2344
2345
2346/**
2347 * Frees memory allocated using SUPR0LowAlloc().
2348 *
2349 * @returns IPRT status code.
2350 * @param pSession The session to which the memory was allocated.
2351 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2352 */
2353SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2354{
2355 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2356 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2357 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2358}
2359
2360
2361
2362/**
2363 * Allocates a chunk of memory with both R0 and R3 mappings.
2364 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2365 *
2366 * @returns IPRT status code.
2367 * @param pSession The session to associated the allocation with.
2368 * @param cb Number of bytes to allocate.
2369 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2370 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2371 */
2372SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2373{
2374 int rc;
2375 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2376 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2377
2378 /*
2379 * Validate input.
2380 */
2381 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2382 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2383 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2384 if (cb < 1 || cb >= _4M)
2385 {
2386 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2387 return VERR_INVALID_PARAMETER;
2388 }
2389
2390 /*
2391 * Let IPRT do the work.
2392 */
2393 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2394 if (RT_SUCCESS(rc))
2395 {
2396 int rc2;
2397 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2398 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2399 if (RT_SUCCESS(rc))
2400 {
2401 Mem.eType = MEMREF_TYPE_MEM;
2402 rc = supdrvMemAdd(&Mem, pSession);
2403 if (!rc)
2404 {
2405 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2406 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2407 return VINF_SUCCESS;
2408 }
2409
2410 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2411 AssertRC(rc2);
2412 }
2413
2414 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2415 AssertRC(rc2);
2416 }
2417
2418 return rc;
2419}
2420
2421
2422/**
2423 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2424 *
2425 * @returns IPRT status code.
2426 * @param pSession The session to which the memory was allocated.
2427 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2428 * @param paPages Where to store the physical addresses.
2429 */
2430SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2431{
2432 PSUPDRVBUNDLE pBundle;
2433 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2434 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2435
2436 /*
2437 * Validate input.
2438 */
2439 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2440 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2441 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2442
2443 /*
2444 * Search for the address.
2445 */
2446 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2447 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2448 {
2449 if (pBundle->cUsed > 0)
2450 {
2451 unsigned i;
2452 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2453 {
2454 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2455 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2456 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2457 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2458 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2459 )
2460 )
2461 {
2462 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2463 size_t iPage;
2464 for (iPage = 0; iPage < cPages; iPage++)
2465 {
2466 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2467 paPages[iPage].uReserved = 0;
2468 }
2469 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2470 return VINF_SUCCESS;
2471 }
2472 }
2473 }
2474 }
2475 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2476 Log(("Failed to find %p!!!\n", (void *)uPtr));
2477 return VERR_INVALID_PARAMETER;
2478}
2479
2480
2481/**
2482 * Free memory allocated by SUPR0MemAlloc().
2483 *
2484 * @returns IPRT status code.
2485 * @param pSession The session owning the allocation.
2486 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2487 */
2488SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2489{
2490 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2491 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2492 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2493}
2494
2495
2496/**
2497 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2498 *
2499 * The memory is fixed and it's possible to query the physical addresses using
2500 * SUPR0MemGetPhys().
2501 *
2502 * @returns IPRT status code.
2503 * @param pSession The session to associated the allocation with.
2504 * @param cPages The number of pages to allocate.
2505 * @param fFlags Flags, reserved for the future. Must be zero.
2506 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2507 * NULL if no ring-3 mapping.
2508 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2509 * NULL if no ring-0 mapping.
2510 * @param paPages Where to store the addresses of the pages. Optional.
2511 */
2512SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2513{
2514 int rc;
2515 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2516 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2517
2518 /*
2519 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2520 */
2521 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2522 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2523 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2524 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2525 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2526 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2527 {
2528 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2529 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2530 }
2531
2532 /*
2533 * Let IPRT do the work.
2534 */
2535 if (ppvR0)
2536 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2537 else
2538 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2539 if (RT_SUCCESS(rc))
2540 {
2541 int rc2;
2542 if (ppvR3)
2543 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2544 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2545 else
2546 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2547 if (RT_SUCCESS(rc))
2548 {
2549 Mem.eType = MEMREF_TYPE_PAGE;
2550 rc = supdrvMemAdd(&Mem, pSession);
2551 if (!rc)
2552 {
2553 if (ppvR3)
2554 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2555 if (ppvR0)
2556 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2557 if (paPages)
2558 {
2559 uint32_t iPage = cPages;
2560 while (iPage-- > 0)
2561 {
2562 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2563 Assert(paPages[iPage] != NIL_RTHCPHYS);
2564 }
2565 }
2566 return VINF_SUCCESS;
2567 }
2568
2569 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2570 AssertRC(rc2);
2571 }
2572
2573 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2574 AssertRC(rc2);
2575 }
2576 return rc;
2577}
2578
2579
2580/**
2581 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
2582 * space.
2583 *
2584 * @returns IPRT status code.
2585 * @param pSession The session to associated the allocation with.
2586 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
2587 * @param offSub Where to start mapping. Must be page aligned.
2588 * @param cbSub How much to map. Must be page aligned.
2589 * @param fFlags Flags, MBZ.
2590 * @param ppvR0 Where to reutrn the address of the ring-0 mapping on
2591 * success.
2592 */
2593SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2594 uint32_t fFlags, PRTR0PTR ppvR0)
2595{
2596 int rc;
2597 PSUPDRVBUNDLE pBundle;
2598 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2599 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2600 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2601
2602 /*
2603 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2604 */
2605 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2606 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2607 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2608 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2609 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2610 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2611
2612 /*
2613 * Find the memory object.
2614 */
2615 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2616 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2617 {
2618 if (pBundle->cUsed > 0)
2619 {
2620 unsigned i;
2621 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2622 {
2623 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2624 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2625 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2626 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2627 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2628 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2629 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2630 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2631 {
2632 hMemObj = pBundle->aMem[i].MemObj;
2633 break;
2634 }
2635 }
2636 }
2637 }
2638 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2639
2640 rc = VERR_INVALID_PARAMETER;
2641 if (hMemObj != NIL_RTR0MEMOBJ)
2642 {
2643 /*
2644 * Do some furter input validations before calling IPRT.
2645 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2646 */
2647 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2648 if ( offSub < cbMemObj
2649 && cbSub <= cbMemObj
2650 && offSub + cbSub <= cbMemObj)
2651 {
2652 RTR0MEMOBJ hMapObj;
2653 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2654 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2655 if (RT_SUCCESS(rc))
2656 *ppvR0 = RTR0MemObjAddress(hMapObj);
2657 }
2658 else
2659 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2660
2661 }
2662 return rc;
2663}
2664
2665
2666/**
2667 * Changes the page level protection of one or more pages previously allocated
2668 * by SUPR0PageAllocEx.
2669 *
2670 * @returns IPRT status code.
2671 * @param pSession The session to associated the allocation with.
2672 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
2673 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
2674 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
2675 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
2676 * @param offSub Where to start changing. Must be page aligned.
2677 * @param cbSub How much to change. Must be page aligned.
2678 * @param fProt The new page level protection, see RTMEM_PROT_*.
2679 */
2680SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
2681{
2682 int rc;
2683 PSUPDRVBUNDLE pBundle;
2684 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2685 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
2686 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
2687 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
2688
2689 /*
2690 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2691 */
2692 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2693 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
2694 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2695 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2696 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2697
2698 /*
2699 * Find the memory object.
2700 */
2701 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2702 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2703 {
2704 if (pBundle->cUsed > 0)
2705 {
2706 unsigned i;
2707 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2708 {
2709 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2710 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2711 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2712 || pvR3 == NIL_RTR3PTR)
2713 && ( pvR0 == NIL_RTR0PTR
2714 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
2715 && ( pvR3 == NIL_RTR3PTR
2716 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
2717 {
2718 if (pvR0 != NIL_RTR0PTR)
2719 hMemObjR0 = pBundle->aMem[i].MemObj;
2720 if (pvR3 != NIL_RTR3PTR)
2721 hMemObjR3 = pBundle->aMem[i].MapObjR3;
2722 break;
2723 }
2724 }
2725 }
2726 }
2727 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2728
2729 rc = VERR_INVALID_PARAMETER;
2730 if ( hMemObjR0 != NIL_RTR0MEMOBJ
2731 || hMemObjR3 != NIL_RTR0MEMOBJ)
2732 {
2733 /*
2734 * Do some furter input validations before calling IPRT.
2735 */
2736 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
2737 if ( offSub < cbMemObj
2738 && cbSub <= cbMemObj
2739 && offSub + cbSub <= cbMemObj)
2740 {
2741 rc = VINF_SUCCESS;
2742 if (hMemObjR3 != NIL_RTR0PTR)
2743 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
2744 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
2745 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
2746 }
2747 else
2748 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2749
2750 }
2751 return rc;
2752
2753}
2754
2755
2756/**
2757 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2758 *
2759 * @returns IPRT status code.
2760 * @param pSession The session owning the allocation.
2761 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2762 * SUPR0PageAllocEx().
2763 */
2764SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2765{
2766 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2767 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2768 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2769}
2770
2771
2772/**
2773 * Maps the GIP into userspace and/or get the physical address of the GIP.
2774 *
2775 * @returns IPRT status code.
2776 * @param pSession Session to which the GIP mapping should belong.
2777 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2778 * @param pHCPhysGip Where to store the physical address. (optional)
2779 *
2780 * @remark There is no reference counting on the mapping, so one call to this function
2781 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2782 * and remove the session as a GIP user.
2783 */
2784SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2785{
2786 int rc;
2787 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2788 RTR3PTR pGipR3 = NIL_RTR3PTR;
2789 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2790 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2791
2792 /*
2793 * Validate
2794 */
2795 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2796 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2797 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2798
2799 RTSemFastMutexRequest(pDevExt->mtxGip);
2800 if (pDevExt->pGip)
2801 {
2802 /*
2803 * Map it?
2804 */
2805 rc = VINF_SUCCESS;
2806 if (ppGipR3)
2807 {
2808 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2809 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2810 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2811 if (RT_SUCCESS(rc))
2812 pGipR3 = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2813 }
2814
2815 /*
2816 * Get physical address.
2817 */
2818 if (pHCPhysGip && RT_SUCCESS(rc))
2819 HCPhys = pDevExt->HCPhysGip;
2820
2821 /*
2822 * Reference globally.
2823 */
2824 if (!pSession->fGipReferenced && RT_SUCCESS(rc))
2825 {
2826 pSession->fGipReferenced = 1;
2827 pDevExt->cGipUsers++;
2828 if (pDevExt->cGipUsers == 1)
2829 {
2830 PSUPGLOBALINFOPAGE pGipR0 = pDevExt->pGip;
2831 unsigned i;
2832
2833 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2834
2835 for (i = 0; i < RT_ELEMENTS(pGipR0->aCPUs); i++)
2836 ASMAtomicXchgU32(&pGipR0->aCPUs[i].u32TransactionId, pGipR0->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2837 ASMAtomicXchgU64(&pGipR0->u64NanoTSLastUpdateHz, 0);
2838
2839 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2840 AssertRC(rc); rc = VINF_SUCCESS;
2841 }
2842 }
2843 }
2844 else
2845 {
2846 rc = SUPDRV_ERR_GENERAL_FAILURE;
2847 Log(("SUPR0GipMap: GIP is not available!\n"));
2848 }
2849 RTSemFastMutexRelease(pDevExt->mtxGip);
2850
2851 /*
2852 * Write returns.
2853 */
2854 if (pHCPhysGip)
2855 *pHCPhysGip = HCPhys;
2856 if (ppGipR3)
2857 *ppGipR3 = pGipR3;
2858
2859#ifdef DEBUG_DARWIN_GIP
2860 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx pGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)pGipR3));
2861#else
2862 LogFlow(( "SUPR0GipMap: returns %d *pHCPhysGip=%lx pGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)pGipR3));
2863#endif
2864 return rc;
2865}
2866
2867
2868/**
2869 * Unmaps any user mapping of the GIP and terminates all GIP access
2870 * from this session.
2871 *
2872 * @returns IPRT status code.
2873 * @param pSession Session to which the GIP mapping should belong.
2874 */
2875SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2876{
2877 int rc = VINF_SUCCESS;
2878 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2879#ifdef DEBUG_DARWIN_GIP
2880 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2881 pSession,
2882 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2883 pSession->GipMapObjR3));
2884#else
2885 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
2886#endif
2887 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2888
2889 RTSemFastMutexRequest(pDevExt->mtxGip);
2890
2891 /*
2892 * Unmap anything?
2893 */
2894 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2895 {
2896 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2897 AssertRC(rc);
2898 if (RT_SUCCESS(rc))
2899 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2900 }
2901
2902 /*
2903 * Dereference global GIP.
2904 */
2905 if (pSession->fGipReferenced && !rc)
2906 {
2907 pSession->fGipReferenced = 0;
2908 if ( pDevExt->cGipUsers > 0
2909 && !--pDevExt->cGipUsers)
2910 {
2911 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
2912 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = VINF_SUCCESS;
2913 }
2914 }
2915
2916 RTSemFastMutexRelease(pDevExt->mtxGip);
2917
2918 return rc;
2919}
2920
2921
2922/**
2923 * Gets the GIP pointer.
2924 *
2925 * @returns Pointer to the GIP or NULL.
2926 */
2927SUPDECL(PSUPGLOBALINFOPAGE) SUPGetGIP(void)
2928{
2929 return g_pSUPGlobalInfoPage;
2930}
2931
2932
2933/**
2934 * Register a component factory with the support driver.
2935 *
2936 * This is currently restricted to kernel sessions only.
2937 *
2938 * @returns VBox status code.
2939 * @retval VINF_SUCCESS on success.
2940 * @retval VERR_NO_MEMORY if we're out of memory.
2941 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
2942 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2943 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2944 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2945 *
2946 * @param pSession The SUPDRV session (must be a ring-0 session).
2947 * @param pFactory Pointer to the component factory registration structure.
2948 *
2949 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
2950 */
2951SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2952{
2953 PSUPDRVFACTORYREG pNewReg;
2954 const char *psz;
2955 int rc;
2956
2957 /*
2958 * Validate parameters.
2959 */
2960 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2961 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2962 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2963 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
2964 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
2965 AssertReturn(psz, VERR_INVALID_PARAMETER);
2966
2967 /*
2968 * Allocate and initialize a new registration structure.
2969 */
2970 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
2971 if (pNewReg)
2972 {
2973 pNewReg->pNext = NULL;
2974 pNewReg->pFactory = pFactory;
2975 pNewReg->pSession = pSession;
2976 pNewReg->cchName = psz - &pFactory->szName[0];
2977
2978 /*
2979 * Add it to the tail of the list after checking for prior registration.
2980 */
2981 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
2982 if (RT_SUCCESS(rc))
2983 {
2984 PSUPDRVFACTORYREG pPrev = NULL;
2985 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
2986 while (pCur && pCur->pFactory != pFactory)
2987 {
2988 pPrev = pCur;
2989 pCur = pCur->pNext;
2990 }
2991 if (!pCur)
2992 {
2993 if (pPrev)
2994 pPrev->pNext = pNewReg;
2995 else
2996 pSession->pDevExt->pComponentFactoryHead = pNewReg;
2997 rc = VINF_SUCCESS;
2998 }
2999 else
3000 rc = VERR_ALREADY_EXISTS;
3001
3002 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3003 }
3004
3005 if (RT_FAILURE(rc))
3006 RTMemFree(pNewReg);
3007 }
3008 else
3009 rc = VERR_NO_MEMORY;
3010 return rc;
3011}
3012
3013
3014/**
3015 * Deregister a component factory.
3016 *
3017 * @returns VBox status code.
3018 * @retval VINF_SUCCESS on success.
3019 * @retval VERR_NOT_FOUND if the factory wasn't registered.
3020 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3021 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3022 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3023 *
3024 * @param pSession The SUPDRV session (must be a ring-0 session).
3025 * @param pFactory Pointer to the component factory registration structure
3026 * previously passed SUPR0ComponentRegisterFactory().
3027 *
3028 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
3029 */
3030SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3031{
3032 int rc;
3033
3034 /*
3035 * Validate parameters.
3036 */
3037 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3038 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3039 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3040
3041 /*
3042 * Take the lock and look for the registration record.
3043 */
3044 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3045 if (RT_SUCCESS(rc))
3046 {
3047 PSUPDRVFACTORYREG pPrev = NULL;
3048 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3049 while (pCur && pCur->pFactory != pFactory)
3050 {
3051 pPrev = pCur;
3052 pCur = pCur->pNext;
3053 }
3054 if (pCur)
3055 {
3056 if (!pPrev)
3057 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3058 else
3059 pPrev->pNext = pCur->pNext;
3060
3061 pCur->pNext = NULL;
3062 pCur->pFactory = NULL;
3063 pCur->pSession = NULL;
3064 rc = VINF_SUCCESS;
3065 }
3066 else
3067 rc = VERR_NOT_FOUND;
3068
3069 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3070
3071 RTMemFree(pCur);
3072 }
3073 return rc;
3074}
3075
3076
3077/**
3078 * Queries a component factory.
3079 *
3080 * @returns VBox status code.
3081 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3082 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3083 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3084 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3085 *
3086 * @param pSession The SUPDRV session.
3087 * @param pszName The name of the component factory.
3088 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3089 * @param ppvFactoryIf Where to store the factory interface.
3090 */
3091SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3092{
3093 const char *pszEnd;
3094 size_t cchName;
3095 int rc;
3096
3097 /*
3098 * Validate parameters.
3099 */
3100 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3101
3102 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3103 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3104 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3105 cchName = pszEnd - pszName;
3106
3107 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3108 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3109 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3110
3111 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3112 *ppvFactoryIf = NULL;
3113
3114 /*
3115 * Take the lock and try all factories by this name.
3116 */
3117 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3118 if (RT_SUCCESS(rc))
3119 {
3120 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3121 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3122 while (pCur)
3123 {
3124 if ( pCur->cchName == cchName
3125 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3126 {
3127 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3128 if (pvFactory)
3129 {
3130 *ppvFactoryIf = pvFactory;
3131 rc = VINF_SUCCESS;
3132 break;
3133 }
3134 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3135 }
3136
3137 /* next */
3138 pCur = pCur->pNext;
3139 }
3140
3141 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3142 }
3143 return rc;
3144}
3145
3146
3147/**
3148 * Adds a memory object to the session.
3149 *
3150 * @returns IPRT status code.
3151 * @param pMem Memory tracking structure containing the
3152 * information to track.
3153 * @param pSession The session.
3154 */
3155static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3156{
3157 PSUPDRVBUNDLE pBundle;
3158 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3159
3160 /*
3161 * Find free entry and record the allocation.
3162 */
3163 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3164 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3165 {
3166 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3167 {
3168 unsigned i;
3169 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3170 {
3171 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3172 {
3173 pBundle->cUsed++;
3174 pBundle->aMem[i] = *pMem;
3175 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3176 return VINF_SUCCESS;
3177 }
3178 }
3179 AssertFailed(); /* !!this can't be happening!!! */
3180 }
3181 }
3182 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3183
3184 /*
3185 * Need to allocate a new bundle.
3186 * Insert into the last entry in the bundle.
3187 */
3188 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3189 if (!pBundle)
3190 return VERR_NO_MEMORY;
3191
3192 /* take last entry. */
3193 pBundle->cUsed++;
3194 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3195
3196 /* insert into list. */
3197 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3198 pBundle->pNext = pSession->Bundle.pNext;
3199 pSession->Bundle.pNext = pBundle;
3200 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3201
3202 return VINF_SUCCESS;
3203}
3204
3205
3206/**
3207 * Releases a memory object referenced by pointer and type.
3208 *
3209 * @returns IPRT status code.
3210 * @param pSession Session data.
3211 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3212 * @param eType Memory type.
3213 */
3214static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3215{
3216 PSUPDRVBUNDLE pBundle;
3217 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3218
3219 /*
3220 * Validate input.
3221 */
3222 if (!uPtr)
3223 {
3224 Log(("Illegal address %p\n", (void *)uPtr));
3225 return VERR_INVALID_PARAMETER;
3226 }
3227
3228 /*
3229 * Search for the address.
3230 */
3231 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3232 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3233 {
3234 if (pBundle->cUsed > 0)
3235 {
3236 unsigned i;
3237 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3238 {
3239 if ( pBundle->aMem[i].eType == eType
3240 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3241 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3242 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3243 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3244 )
3245 {
3246 /* Make a copy of it and release it outside the spinlock. */
3247 SUPDRVMEMREF Mem = pBundle->aMem[i];
3248 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3249 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3250 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3251 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3252
3253 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3254 {
3255 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3256 AssertRC(rc); /** @todo figure out how to handle this. */
3257 }
3258 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3259 {
3260 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3261 AssertRC(rc); /** @todo figure out how to handle this. */
3262 }
3263 return VINF_SUCCESS;
3264 }
3265 }
3266 }
3267 }
3268 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3269 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3270 return VERR_INVALID_PARAMETER;
3271}
3272
3273
3274/**
3275 * Opens an image. If it's the first time it's opened the call must upload
3276 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3277 *
3278 * This is the 1st step of the loading.
3279 *
3280 * @returns IPRT status code.
3281 * @param pDevExt Device globals.
3282 * @param pSession Session data.
3283 * @param pReq The open request.
3284 */
3285static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3286{
3287 int rc;
3288 PSUPDRVLDRIMAGE pImage;
3289 void *pv;
3290 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
3291 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImageWithTabs=%d\n", pReq->u.In.szName, pReq->u.In.cbImageWithTabs));
3292
3293 /*
3294 * Check if we got an instance of the image already.
3295 */
3296 supdrvLdrLock(pDevExt);
3297 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3298 {
3299 if ( pImage->szName[cchName] == '\0'
3300 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
3301 {
3302 /** @todo check cbImageBits and cbImageWithTabs here, if they differs that indicates that the images are different. */
3303 pImage->cUsage++;
3304 pReq->u.Out.pvImageBase = pImage->pvImage;
3305 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3306 pReq->u.Out.fNativeLoader = pImage->fNative;
3307 supdrvLdrAddUsage(pSession, pImage);
3308 supdrvLdrUnlock(pDevExt);
3309 return VINF_SUCCESS;
3310 }
3311 }
3312 /* (not found - add it!) */
3313
3314 /*
3315 * Allocate memory.
3316 */
3317 pv = RTMemAlloc(RT_OFFSETOF(SUPDRVLDRIMAGE, szName[cchName + 1]));
3318 if (!pv)
3319 {
3320 supdrvLdrUnlock(pDevExt);
3321 Log(("supdrvIOCtl_LdrOpen: RTMemAlloc() failed\n"));
3322 return VERR_NO_MEMORY;
3323 }
3324
3325 /*
3326 * Setup and link in the LDR stuff.
3327 */
3328 pImage = (PSUPDRVLDRIMAGE)pv;
3329 pImage->pvImage = NULL;
3330 pImage->pvImageAlloc = NULL;
3331 pImage->cbImageWithTabs = pReq->u.In.cbImageWithTabs;
3332 pImage->cbImageBits = pReq->u.In.cbImageBits;
3333 pImage->cSymbols = 0;
3334 pImage->paSymbols = NULL;
3335 pImage->pachStrTab = NULL;
3336 pImage->cbStrTab = 0;
3337 pImage->pfnModuleInit = NULL;
3338 pImage->pfnModuleTerm = NULL;
3339 pImage->pfnServiceReqHandler = NULL;
3340 pImage->uState = SUP_IOCTL_LDR_OPEN;
3341 pImage->cUsage = 1;
3342 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
3343
3344 /*
3345 * Try load it using the native loader, if that isn't supported, fall back
3346 * on the older method.
3347 */
3348 pImage->fNative = true;
3349 rc = supdrvOSLdrOpen(pDevExt, pImage, pReq->u.In.szFilename);
3350 if (rc == VERR_NOT_SUPPORTED)
3351 {
3352 pImage->pvImageAlloc = RTMemExecAlloc(pImage->cbImageBits + 31);
3353 pImage->pvImage = RT_ALIGN_P(pImage->pvImageAlloc, 32);
3354 pImage->fNative = false;
3355 rc = pImage->pvImageAlloc ? VINF_SUCCESS : VERR_NO_MEMORY;
3356 }
3357 if (RT_FAILURE(rc))
3358 {
3359 supdrvLdrUnlock(pDevExt);
3360 RTMemFree(pImage);
3361 Log(("supdrvIOCtl_LdrOpen(%s): failed - %Rrc\n", pReq->u.In.szName, rc));
3362 return rc;
3363 }
3364 Assert(VALID_PTR(pImage->pvImage) || RT_FAILURE(rc));
3365
3366 /*
3367 * Link it.
3368 */
3369 pImage->pNext = pDevExt->pLdrImages;
3370 pDevExt->pLdrImages = pImage;
3371
3372 supdrvLdrAddUsage(pSession, pImage);
3373
3374 pReq->u.Out.pvImageBase = pImage->pvImage;
3375 pReq->u.Out.fNeedsLoading = true;
3376 pReq->u.Out.fNativeLoader = pImage->fNative;
3377 supdrvLdrUnlock(pDevExt);
3378
3379#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3380 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3381#endif
3382 return VINF_SUCCESS;
3383}
3384
3385
3386/**
3387 * Worker that validates a pointer to an image entrypoint.
3388 *
3389 * @returns IPRT status code.
3390 * @param pDevExt The device globals.
3391 * @param pImage The loader image.
3392 * @param pv The pointer into the image.
3393 * @param fMayBeNull Whether it may be NULL.
3394 * @param pszWhat What is this entrypoint? (for logging)
3395 * @param pbImageBits The image bits prepared by ring-3.
3396 *
3397 * @remarks Will leave the lock on failure.
3398 */
3399static int supdrvLdrValidatePointer(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage, void *pv,
3400 bool fMayBeNull, const uint8_t *pbImageBits, const char *pszWhat)
3401{
3402 if (!fMayBeNull || pv)
3403 {
3404 if ((uintptr_t)pv - (uintptr_t)pImage->pvImage >= pImage->cbImageBits)
3405 {
3406 supdrvLdrUnlock(pDevExt);
3407 Log(("Out of range (%p LB %#x): %s=%p\n", pImage->pvImage, pImage->cbImageBits, pszWhat, pv));
3408 return VERR_INVALID_PARAMETER;
3409 }
3410
3411 if (pImage->fNative)
3412 {
3413 int rc = supdrvOSLdrValidatePointer(pDevExt, pImage, pv, pbImageBits);
3414 if (RT_FAILURE(rc))
3415 {
3416 supdrvLdrUnlock(pDevExt);
3417 Log(("Bad entry point address: %s=%p (rc=%Rrc)\n", pszWhat, pv, rc));
3418 return rc;
3419 }
3420 }
3421 }
3422 return VINF_SUCCESS;
3423}
3424
3425
3426/**
3427 * Loads the image bits.
3428 *
3429 * This is the 2nd step of the loading.
3430 *
3431 * @returns IPRT status code.
3432 * @param pDevExt Device globals.
3433 * @param pSession Session data.
3434 * @param pReq The request.
3435 */
3436static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3437{
3438 PSUPDRVLDRUSAGE pUsage;
3439 PSUPDRVLDRIMAGE pImage;
3440 int rc;
3441 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImageWithBits=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImageWithTabs));
3442
3443 /*
3444 * Find the ldr image.
3445 */
3446 supdrvLdrLock(pDevExt);
3447 pUsage = pSession->pLdrUsage;
3448 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3449 pUsage = pUsage->pNext;
3450 if (!pUsage)
3451 {
3452 supdrvLdrUnlock(pDevExt);
3453 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3454 return VERR_INVALID_HANDLE;
3455 }
3456 pImage = pUsage->pImage;
3457
3458 /*
3459 * Validate input.
3460 */
3461 if ( pImage->cbImageWithTabs != pReq->u.In.cbImageWithTabs
3462 || pImage->cbImageBits != pReq->u.In.cbImageBits)
3463 {
3464 supdrvLdrUnlock(pDevExt);
3465 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load) or %d != %d\n",
3466 pImage->cbImageWithTabs, pReq->u.In.cbImageWithTabs, pImage->cbImageBits, pReq->u.In.cbImageBits));
3467 return VERR_INVALID_HANDLE;
3468 }
3469
3470 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3471 {
3472 unsigned uState = pImage->uState;
3473 supdrvLdrUnlock(pDevExt);
3474 if (uState != SUP_IOCTL_LDR_LOAD)
3475 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3476 return SUPDRV_ERR_ALREADY_LOADED;
3477 }
3478
3479 switch (pReq->u.In.eEPType)
3480 {
3481 case SUPLDRLOADEP_NOTHING:
3482 break;
3483
3484 case SUPLDRLOADEP_VMMR0:
3485 rc = supdrvLdrValidatePointer( pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0, false, pReq->u.In.achImage, "pvVMMR0");
3486 if (RT_SUCCESS(rc))
3487 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt, false, pReq->u.In.achImage, "pvVMMR0EntryInt");
3488 if (RT_SUCCESS(rc))
3489 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, false, pReq->u.In.achImage, "pvVMMR0EntryFast");
3490 if (RT_SUCCESS(rc))
3491 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx, false, pReq->u.In.achImage, "pvVMMR0EntryEx");
3492 if (RT_FAILURE(rc))
3493 return rc;
3494 break;
3495
3496 case SUPLDRLOADEP_SERVICE:
3497 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.EP.Service.pfnServiceReq, false, pReq->u.In.achImage, "pfnServiceReq");
3498 if (RT_FAILURE(rc))
3499 return rc;
3500 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3501 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3502 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3503 {
3504 supdrvLdrUnlock(pDevExt);
3505 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3506 pImage->pvImage, pReq->u.In.cbImageWithTabs,
3507 pReq->u.In.EP.Service.apvReserved[0],
3508 pReq->u.In.EP.Service.apvReserved[1],
3509 pReq->u.In.EP.Service.apvReserved[2]));
3510 return VERR_INVALID_PARAMETER;
3511 }
3512 break;
3513
3514 default:
3515 supdrvLdrUnlock(pDevExt);
3516 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3517 return VERR_INVALID_PARAMETER;
3518 }
3519
3520 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleInit, true, pReq->u.In.achImage, "pfnModuleInit");
3521 if (RT_FAILURE(rc))
3522 return rc;
3523 rc = supdrvLdrValidatePointer(pDevExt, pImage, pReq->u.In.pfnModuleTerm, true, pReq->u.In.achImage, "pfnModuleTerm");
3524 if (RT_FAILURE(rc))
3525 return rc;
3526
3527 /*
3528 * Allocate and copy the tables.
3529 * (No need to do try/except as this is a buffered request.)
3530 */
3531 pImage->cbStrTab = pReq->u.In.cbStrTab;
3532 if (pImage->cbStrTab)
3533 {
3534 pImage->pachStrTab = (char *)RTMemAlloc(pImage->cbStrTab);
3535 if (pImage->pachStrTab)
3536 memcpy(pImage->pachStrTab, &pReq->u.In.achImage[pReq->u.In.offStrTab], pImage->cbStrTab);
3537 else
3538 rc = VERR_NO_MEMORY;
3539 }
3540
3541 pImage->cSymbols = pReq->u.In.cSymbols;
3542 if (RT_SUCCESS(rc) && pImage->cSymbols)
3543 {
3544 size_t cbSymbols = pImage->cSymbols * sizeof(SUPLDRSYM);
3545 pImage->paSymbols = (PSUPLDRSYM)RTMemAlloc(cbSymbols);
3546 if (pImage->paSymbols)
3547 memcpy(pImage->paSymbols, &pReq->u.In.achImage[pReq->u.In.offSymbols], cbSymbols);
3548 else
3549 rc = VERR_NO_MEMORY;
3550 }
3551
3552 /*
3553 * Copy the bits / complete native loading.
3554 */
3555 if (RT_SUCCESS(rc))
3556 {
3557 pImage->uState = SUP_IOCTL_LDR_LOAD;
3558 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3559 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3560
3561 if (pImage->fNative)
3562 rc = supdrvOSLdrLoad(pDevExt, pImage, pReq->u.In.achImage);
3563 else
3564 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImageBits);
3565 }
3566
3567 /*
3568 * Update any entry points.
3569 */
3570 if (RT_SUCCESS(rc))
3571 {
3572 switch (pReq->u.In.eEPType)
3573 {
3574 default:
3575 case SUPLDRLOADEP_NOTHING:
3576 rc = VINF_SUCCESS;
3577 break;
3578 case SUPLDRLOADEP_VMMR0:
3579 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3580 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3581 break;
3582 case SUPLDRLOADEP_SERVICE:
3583 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3584 rc = VINF_SUCCESS;
3585 break;
3586 }
3587 }
3588
3589 /*
3590 * On success call the module initialization.
3591 */
3592 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3593 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3594 {
3595 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3596 rc = pImage->pfnModuleInit();
3597 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3598 supdrvLdrUnsetVMMR0EPs(pDevExt);
3599 }
3600
3601 if (RT_FAILURE(rc))
3602 {
3603 pImage->uState = SUP_IOCTL_LDR_OPEN;
3604 pImage->pfnModuleInit = NULL;
3605 pImage->pfnModuleTerm = NULL;
3606 pImage->pfnServiceReqHandler= NULL;
3607 pImage->cbStrTab = 0;
3608 RTMemFree(pImage->pachStrTab);
3609 pImage->pachStrTab = NULL;
3610 RTMemFree(pImage->paSymbols);
3611 pImage->paSymbols = NULL;
3612 pImage->cSymbols = 0;
3613 }
3614
3615 supdrvLdrUnlock(pDevExt);
3616 return rc;
3617}
3618
3619
3620/**
3621 * Frees a previously loaded (prep'ed) image.
3622 *
3623 * @returns IPRT status code.
3624 * @param pDevExt Device globals.
3625 * @param pSession Session data.
3626 * @param pReq The request.
3627 */
3628static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3629{
3630 int rc;
3631 PSUPDRVLDRUSAGE pUsagePrev;
3632 PSUPDRVLDRUSAGE pUsage;
3633 PSUPDRVLDRIMAGE pImage;
3634 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3635
3636 /*
3637 * Find the ldr image.
3638 */
3639 supdrvLdrLock(pDevExt);
3640 pUsagePrev = NULL;
3641 pUsage = pSession->pLdrUsage;
3642 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3643 {
3644 pUsagePrev = pUsage;
3645 pUsage = pUsage->pNext;
3646 }
3647 if (!pUsage)
3648 {
3649 supdrvLdrUnlock(pDevExt);
3650 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3651 return VERR_INVALID_HANDLE;
3652 }
3653
3654 /*
3655 * Check if we can remove anything.
3656 */
3657 rc = VINF_SUCCESS;
3658 pImage = pUsage->pImage;
3659 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3660 {
3661 /*
3662 * Check if there are any objects with destructors in the image, if
3663 * so leave it for the session cleanup routine so we get a chance to
3664 * clean things up in the right order and not leave them all dangling.
3665 */
3666 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3667 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3668 if (pImage->cUsage <= 1)
3669 {
3670 PSUPDRVOBJ pObj;
3671 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3672 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
3673 {
3674 rc = VERR_DANGLING_OBJECTS;
3675 break;
3676 }
3677 }
3678 else
3679 {
3680 PSUPDRVUSAGE pGenUsage;
3681 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3682 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
3683 {
3684 rc = VERR_DANGLING_OBJECTS;
3685 break;
3686 }
3687 }
3688 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3689 if (rc == VINF_SUCCESS)
3690 {
3691 /* unlink it */
3692 if (pUsagePrev)
3693 pUsagePrev->pNext = pUsage->pNext;
3694 else
3695 pSession->pLdrUsage = pUsage->pNext;
3696
3697 /* free it */
3698 pUsage->pImage = NULL;
3699 pUsage->pNext = NULL;
3700 RTMemFree(pUsage);
3701
3702 /*
3703 * Dereference the image.
3704 */
3705 if (pImage->cUsage <= 1)
3706 supdrvLdrFree(pDevExt, pImage);
3707 else
3708 pImage->cUsage--;
3709 }
3710 else
3711 {
3712 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3713 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
3714 }
3715 }
3716 else
3717 {
3718 /*
3719 * Dereference both image and usage.
3720 */
3721 pImage->cUsage--;
3722 pUsage->cUsage--;
3723 }
3724
3725 supdrvLdrUnlock(pDevExt);
3726 return rc;
3727}
3728
3729
3730/**
3731 * Gets the address of a symbol in an open image.
3732 *
3733 * @returns 0 on success.
3734 * @returns SUPDRV_ERR_* on failure.
3735 * @param pDevExt Device globals.
3736 * @param pSession Session data.
3737 * @param pReq The request buffer.
3738 */
3739static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3740{
3741 PSUPDRVLDRIMAGE pImage;
3742 PSUPDRVLDRUSAGE pUsage;
3743 uint32_t i;
3744 PSUPLDRSYM paSyms;
3745 const char *pchStrings;
3746 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3747 void *pvSymbol = NULL;
3748 int rc = VERR_GENERAL_FAILURE;
3749 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3750
3751 /*
3752 * Find the ldr image.
3753 */
3754 supdrvLdrLock(pDevExt);
3755 pUsage = pSession->pLdrUsage;
3756 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3757 pUsage = pUsage->pNext;
3758 if (!pUsage)
3759 {
3760 supdrvLdrUnlock(pDevExt);
3761 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3762 return VERR_INVALID_HANDLE;
3763 }
3764 pImage = pUsage->pImage;
3765 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3766 {
3767 unsigned uState = pImage->uState;
3768 supdrvLdrUnlock(pDevExt);
3769 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3770 return VERR_ALREADY_LOADED;
3771 }
3772
3773 /*
3774 * Search the symbol strings.
3775 */
3776 pchStrings = pImage->pachStrTab;
3777 paSyms = pImage->paSymbols;
3778 for (i = 0; i < pImage->cSymbols; i++)
3779 {
3780 if ( paSyms[i].offSymbol < pImage->cbImageBits /* paranoia */
3781 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3782 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3783 {
3784 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3785 rc = VINF_SUCCESS;
3786 break;
3787 }
3788 }
3789 supdrvLdrUnlock(pDevExt);
3790 pReq->u.Out.pvSymbol = pvSymbol;
3791 return rc;
3792}
3793
3794
3795/**
3796 * Gets the address of a symbol in an open image or the support driver.
3797 *
3798 * @returns VINF_SUCCESS on success.
3799 * @returns
3800 * @param pDevExt Device globals.
3801 * @param pSession Session data.
3802 * @param pReq The request buffer.
3803 */
3804static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3805{
3806 int rc = VINF_SUCCESS;
3807 const char *pszSymbol = pReq->u.In.pszSymbol;
3808 const char *pszModule = pReq->u.In.pszModule;
3809 size_t cbSymbol;
3810 char const *pszEnd;
3811 uint32_t i;
3812
3813 /*
3814 * Input validation.
3815 */
3816 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3817 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3818 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3819 cbSymbol = pszEnd - pszSymbol + 1;
3820
3821 if (pszModule)
3822 {
3823 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3824 pszEnd = (char *)memchr(pszModule, '\0', 64);
3825 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3826 }
3827 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3828
3829
3830 if ( !pszModule
3831 || !strcmp(pszModule, "SupDrv"))
3832 {
3833 /*
3834 * Search the support driver export table.
3835 */
3836 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3837 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3838 {
3839 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3840 break;
3841 }
3842 }
3843 else
3844 {
3845 /*
3846 * Find the loader image.
3847 */
3848 PSUPDRVLDRIMAGE pImage;
3849
3850 supdrvLdrLock(pDevExt);
3851
3852 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3853 if (!strcmp(pImage->szName, pszModule))
3854 break;
3855 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3856 {
3857 /*
3858 * Search the symbol strings.
3859 */
3860 const char *pchStrings = pImage->pachStrTab;
3861 PCSUPLDRSYM paSyms = pImage->paSymbols;
3862 for (i = 0; i < pImage->cSymbols; i++)
3863 {
3864 if ( paSyms[i].offSymbol < pImage->cbImageBits /* paranoia */
3865 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3866 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3867 {
3868 /*
3869 * Found it! Calc the symbol address and add a reference to the module.
3870 */
3871 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3872 rc = supdrvLdrAddUsage(pSession, pImage);
3873 break;
3874 }
3875 }
3876 }
3877 else
3878 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3879
3880 supdrvLdrUnlock(pDevExt);
3881 }
3882 return rc;
3883}
3884
3885
3886/**
3887 * Updates the VMMR0 entry point pointers.
3888 *
3889 * @returns IPRT status code.
3890 * @param pDevExt Device globals.
3891 * @param pSession Session data.
3892 * @param pVMMR0 VMMR0 image handle.
3893 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3894 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3895 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3896 * @remark Caller must own the loader mutex.
3897 */
3898static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3899{
3900 int rc = VINF_SUCCESS;
3901 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3902
3903
3904 /*
3905 * Check if not yet set.
3906 */
3907 if (!pDevExt->pvVMMR0)
3908 {
3909 pDevExt->pvVMMR0 = pvVMMR0;
3910 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3911 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3912 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3913 }
3914 else
3915 {
3916 /*
3917 * Return failure or success depending on whether the values match or not.
3918 */
3919 if ( pDevExt->pvVMMR0 != pvVMMR0
3920 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3921 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3922 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3923 {
3924 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3925 rc = VERR_INVALID_PARAMETER;
3926 }
3927 }
3928 return rc;
3929}
3930
3931
3932/**
3933 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3934 *
3935 * @param pDevExt Device globals.
3936 */
3937static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3938{
3939 pDevExt->pvVMMR0 = NULL;
3940 pDevExt->pfnVMMR0EntryInt = NULL;
3941 pDevExt->pfnVMMR0EntryFast = NULL;
3942 pDevExt->pfnVMMR0EntryEx = NULL;
3943}
3944
3945
3946/**
3947 * Adds a usage reference in the specified session of an image.
3948 *
3949 * Called while owning the loader semaphore.
3950 *
3951 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
3952 * @param pSession Session in question.
3953 * @param pImage Image which the session is using.
3954 */
3955static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3956{
3957 PSUPDRVLDRUSAGE pUsage;
3958 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3959
3960 /*
3961 * Referenced it already?
3962 */
3963 pUsage = pSession->pLdrUsage;
3964 while (pUsage)
3965 {
3966 if (pUsage->pImage == pImage)
3967 {
3968 pUsage->cUsage++;
3969 return VINF_SUCCESS;
3970 }
3971 pUsage = pUsage->pNext;
3972 }
3973
3974 /*
3975 * Allocate new usage record.
3976 */
3977 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3978 AssertReturn(pUsage, VERR_NO_MEMORY);
3979 pUsage->cUsage = 1;
3980 pUsage->pImage = pImage;
3981 pUsage->pNext = pSession->pLdrUsage;
3982 pSession->pLdrUsage = pUsage;
3983 return VINF_SUCCESS;
3984}
3985
3986
3987/**
3988 * Frees a load image.
3989 *
3990 * @param pDevExt Pointer to device extension.
3991 * @param pImage Pointer to the image we're gonna free.
3992 * This image must exit!
3993 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3994 */
3995static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3996{
3997 PSUPDRVLDRIMAGE pImagePrev;
3998 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
3999
4000 /* find it - arg. should've used doubly linked list. */
4001 Assert(pDevExt->pLdrImages);
4002 pImagePrev = NULL;
4003 if (pDevExt->pLdrImages != pImage)
4004 {
4005 pImagePrev = pDevExt->pLdrImages;
4006 while (pImagePrev->pNext != pImage)
4007 pImagePrev = pImagePrev->pNext;
4008 Assert(pImagePrev->pNext == pImage);
4009 }
4010
4011 /* unlink */
4012 if (pImagePrev)
4013 pImagePrev->pNext = pImage->pNext;
4014 else
4015 pDevExt->pLdrImages = pImage->pNext;
4016
4017 /* check if this is VMMR0.r0 unset its entry point pointers. */
4018 if (pDevExt->pvVMMR0 == pImage->pvImage)
4019 supdrvLdrUnsetVMMR0EPs(pDevExt);
4020
4021 /* check for objects with destructors in this image. (Shouldn't happen.) */
4022 if (pDevExt->pObjs)
4023 {
4024 unsigned cObjs = 0;
4025 PSUPDRVOBJ pObj;
4026 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
4027 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
4028 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
4029 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImageBits))
4030 {
4031 pObj->pfnDestructor = NULL;
4032 cObjs++;
4033 }
4034 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
4035 if (cObjs)
4036 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
4037 }
4038
4039 /* call termination function if fully loaded. */
4040 if ( pImage->pfnModuleTerm
4041 && pImage->uState == SUP_IOCTL_LDR_LOAD)
4042 {
4043 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
4044 pImage->pfnModuleTerm();
4045 }
4046
4047 /* do native unload if appropriate. */
4048 if (pImage->fNative)
4049 supdrvOSLdrUnload(pDevExt, pImage);
4050
4051 /* free the image */
4052 pImage->cUsage = 0;
4053 pImage->pNext = 0;
4054 pImage->uState = SUP_IOCTL_LDR_FREE;
4055 RTMemExecFree(pImage->pvImageAlloc);
4056 pImage->pvImageAlloc = NULL;
4057 RTMemFree(pImage->pachStrTab);
4058 pImage->pachStrTab = NULL;
4059 RTMemFree(pImage->paSymbols);
4060 pImage->paSymbols = NULL;
4061 RTMemFree(pImage);
4062}
4063
4064
4065/**
4066 * Acquires the loader lock.
4067 *
4068 * @returns IPRT status code.
4069 * @param pDevExt The device extension.
4070 */
4071DECLINLINE(int) supdrvLdrLock(PSUPDRVDEVEXT pDevExt)
4072{
4073#ifdef SUPDRV_USE_MUTEX_FOR_LDR
4074 return RTSemMutexRequest(pDevExt->mtxLdr, RT_INDEFINITE_WAIT);
4075#else
4076 return RTSemFastMutexRequest(pDevExt->mtxLdr);
4077#endif
4078}
4079
4080
4081/**
4082 * Releases the loader lock.
4083 *
4084 * @returns IPRT status code.
4085 * @param pDevExt The device extension.
4086 */
4087DECLINLINE(int) supdrvLdrUnlock(PSUPDRVDEVEXT pDevExt)
4088{
4089#ifdef SUPDRV_USE_MUTEX_FOR_LDR
4090 return RTSemMutexRelease(pDevExt->mtxLdr);
4091#else
4092 return RTSemFastMutexRelease(pDevExt->mtxLdr);
4093#endif
4094}
4095
4096
4097/**
4098 * Implements the service call request.
4099 *
4100 * @returns VBox status code.
4101 * @param pDevExt The device extension.
4102 * @param pSession The calling session.
4103 * @param pReq The request packet, valid.
4104 */
4105static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
4106{
4107#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
4108 int rc;
4109
4110 /*
4111 * Find the module first in the module referenced by the calling session.
4112 */
4113 rc = supdrvLdrLock(pDevExt);
4114 if (RT_SUCCESS(rc))
4115 {
4116 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
4117 PSUPDRVLDRUSAGE pUsage;
4118
4119 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
4120 if ( pUsage->pImage->pfnServiceReqHandler
4121 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
4122 {
4123 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
4124 break;
4125 }
4126 supdrvLdrUnlock(pDevExt);
4127
4128 if (pfnServiceReqHandler)
4129 {
4130 /*
4131 * Call it.
4132 */
4133 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
4134 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4135 else
4136 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4137 }
4138 else
4139 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
4140 }
4141
4142 /* log it */
4143 if ( RT_FAILURE(rc)
4144 && rc != VERR_INTERRUPTED
4145 && rc != VERR_TIMEOUT)
4146 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4147 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4148 else
4149 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4150 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4151 return rc;
4152#else /* RT_OS_WINDOWS && !DEBUG */
4153 return VERR_NOT_IMPLEMENTED;
4154#endif /* RT_OS_WINDOWS && !DEBUG */
4155}
4156
4157
4158/**
4159 * Implements the logger settings request.
4160 *
4161 * @returns VBox status code.
4162 * @param pDevExt The device extension.
4163 * @param pSession The caller's session.
4164 * @param pReq The request.
4165 */
4166static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq)
4167{
4168 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
4169 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
4170 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
4171 PRTLOGGER pLogger = NULL;
4172 int rc;
4173
4174 /*
4175 * Some further validation.
4176 */
4177 switch (pReq->u.In.fWhat)
4178 {
4179 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4180 case SUPLOGGERSETTINGS_WHAT_CREATE:
4181 break;
4182
4183 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4184 if (*pszGroup || *pszFlags || *pszDest)
4185 return VERR_INVALID_PARAMETER;
4186 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
4187 return VERR_ACCESS_DENIED;
4188 break;
4189
4190 default:
4191 return VERR_INTERNAL_ERROR;
4192 }
4193
4194 /*
4195 * Get the logger.
4196 */
4197 switch (pReq->u.In.fWhich)
4198 {
4199 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4200 pLogger = RTLogGetDefaultInstance();
4201 break;
4202
4203 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4204 pLogger = RTLogRelDefaultInstance();
4205 break;
4206
4207 default:
4208 return VERR_INTERNAL_ERROR;
4209 }
4210
4211 /*
4212 * Do the job.
4213 */
4214 switch (pReq->u.In.fWhat)
4215 {
4216 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4217 if (pLogger)
4218 {
4219 rc = RTLogFlags(pLogger, pszFlags);
4220 if (RT_SUCCESS(rc))
4221 rc = RTLogGroupSettings(pLogger, pszGroup);
4222 NOREF(pszDest);
4223 }
4224 else
4225 rc = VERR_NOT_FOUND;
4226 break;
4227
4228 case SUPLOGGERSETTINGS_WHAT_CREATE:
4229 {
4230 if (pLogger)
4231 rc = VERR_ALREADY_EXISTS;
4232 else
4233 {
4234 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
4235
4236 rc = RTLogCreate(&pLogger,
4237 0 /* fFlags */,
4238 pszGroup,
4239 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
4240 ? "VBOX_LOG"
4241 : "VBOX_RELEASE_LOG",
4242 RT_ELEMENTS(s_apszGroups),
4243 s_apszGroups,
4244 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
4245 NULL);
4246 if (RT_SUCCESS(rc))
4247 {
4248 rc = RTLogFlags(pLogger, pszFlags);
4249 NOREF(pszDest);
4250 if (RT_SUCCESS(rc))
4251 {
4252 switch (pReq->u.In.fWhich)
4253 {
4254 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4255 pLogger = RTLogSetDefaultInstance(pLogger);
4256 break;
4257 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4258 pLogger = RTLogRelSetDefaultInstance(pLogger);
4259 break;
4260 }
4261 }
4262 RTLogDestroy(pLogger);
4263 }
4264 }
4265 break;
4266 }
4267
4268 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4269 switch (pReq->u.In.fWhich)
4270 {
4271 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4272 pLogger = RTLogSetDefaultInstance(NULL);
4273 break;
4274 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4275 pLogger = RTLogRelSetDefaultInstance(NULL);
4276 break;
4277 }
4278 rc = RTLogDestroy(pLogger);
4279 break;
4280
4281 default:
4282 {
4283 rc = VERR_INTERNAL_ERROR;
4284 break;
4285 }
4286 }
4287
4288 return rc;
4289}
4290
4291
4292/**
4293 * Creates the GIP.
4294 *
4295 * @returns VBox status code.
4296 * @param pDevExt Instance data. GIP stuff may be updated.
4297 */
4298static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4299{
4300 PSUPGLOBALINFOPAGE pGip;
4301 RTHCPHYS HCPhysGip;
4302 uint32_t u32SystemResolution;
4303 uint32_t u32Interval;
4304 int rc;
4305
4306 LogFlow(("supdrvGipCreate:\n"));
4307
4308 /* assert order */
4309 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4310 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4311 Assert(!pDevExt->pGipTimer);
4312
4313 /*
4314 * Allocate a suitable page with a default kernel mapping.
4315 */
4316 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4317 if (RT_FAILURE(rc))
4318 {
4319 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4320 return rc;
4321 }
4322 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4323 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4324
4325#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4326 * It only applies to Windows and should probably revisited later, if possible made part of the
4327 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4328 /*
4329 * Try bump up the system timer resolution.
4330 * The more interrupts the better...
4331 */
4332 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4333 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4334 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4335 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4336 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4337 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4338 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4339 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4340 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4341 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4342 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4343 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4344 )
4345 {
4346 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4347 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4348 }
4349#endif
4350
4351 /*
4352 * Find a reasonable update interval and initialize the structure.
4353 */
4354 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4355 while (u32Interval < 10000000 /* 10 ms */)
4356 u32Interval += u32SystemResolution;
4357
4358 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4359
4360 /*
4361 * Create the timer.
4362 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4363 */
4364 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4365 {
4366 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4367 if (rc == VERR_NOT_SUPPORTED)
4368 {
4369 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4370 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4371 }
4372 }
4373 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4374 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4375 if (RT_SUCCESS(rc))
4376 {
4377 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4378 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4379 if (RT_SUCCESS(rc))
4380 {
4381 /*
4382 * We're good.
4383 */
4384 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4385 g_pSUPGlobalInfoPage = pGip;
4386 return VINF_SUCCESS;
4387 }
4388
4389 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4390 }
4391 else
4392 {
4393 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4394 Assert(!pDevExt->pGipTimer);
4395 }
4396 supdrvGipDestroy(pDevExt);
4397 return rc;
4398}
4399
4400
4401/**
4402 * Terminates the GIP.
4403 *
4404 * @param pDevExt Instance data. GIP stuff may be updated.
4405 */
4406static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4407{
4408 int rc;
4409#ifdef DEBUG_DARWIN_GIP
4410 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4411 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4412 pDevExt->pGipTimer, pDevExt->GipMemObj));
4413#endif
4414
4415 /*
4416 * Invalid the GIP data.
4417 */
4418 if (pDevExt->pGip)
4419 {
4420 supdrvGipTerm(pDevExt->pGip);
4421 pDevExt->pGip = NULL;
4422 }
4423 g_pSUPGlobalInfoPage = NULL;
4424
4425 /*
4426 * Destroy the timer and free the GIP memory object.
4427 */
4428 if (pDevExt->pGipTimer)
4429 {
4430 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4431 pDevExt->pGipTimer = NULL;
4432 }
4433
4434 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4435 {
4436 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4437 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4438 }
4439
4440 /*
4441 * Finally, release the system timer resolution request if one succeeded.
4442 */
4443 if (pDevExt->u32SystemTimerGranularityGrant)
4444 {
4445 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4446 pDevExt->u32SystemTimerGranularityGrant = 0;
4447 }
4448}
4449
4450
4451/**
4452 * Timer callback function sync GIP mode.
4453 * @param pTimer The timer.
4454 * @param pvUser The device extension.
4455 */
4456static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4457{
4458 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4459 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4460 uint64_t u64TSC = ASMReadTSC();
4461 uint64_t NanoTS = RTTimeSystemNanoTS();
4462
4463 supdrvGipUpdate(pDevExt->pGip, NanoTS, u64TSC);
4464
4465 ASMSetFlags(fOldFlags);
4466}
4467
4468
4469/**
4470 * Timer callback function for async GIP mode.
4471 * @param pTimer The timer.
4472 * @param pvUser The device extension.
4473 */
4474static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4475{
4476 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4477 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4478 RTCPUID idCpu = RTMpCpuId();
4479 uint64_t u64TSC = ASMReadTSC();
4480 uint64_t NanoTS = RTTimeSystemNanoTS();
4481
4482 /** @todo reset the transaction number and whatnot when iTick == 1. */
4483 if (pDevExt->idGipMaster == idCpu)
4484 supdrvGipUpdate(pDevExt->pGip, NanoTS, u64TSC);
4485 else
4486 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, u64TSC, ASMGetApicId());
4487
4488 ASMSetFlags(fOldFlags);
4489}
4490
4491
4492/**
4493 * Multiprocessor event notification callback.
4494 *
4495 * This is used to make sue that the GIP master gets passed on to
4496 * another CPU.
4497 *
4498 * @param enmEvent The event.
4499 * @param idCpu The cpu it applies to.
4500 * @param pvUser Pointer to the device extension.
4501 */
4502static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4503{
4504 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4505 if (enmEvent == RTMPEVENT_OFFLINE)
4506 {
4507 RTCPUID idGipMaster;
4508 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4509 if (idGipMaster == idCpu)
4510 {
4511 /*
4512 * Find a new GIP master.
4513 */
4514 bool fIgnored;
4515 unsigned i;
4516 RTCPUID idNewGipMaster = NIL_RTCPUID;
4517 RTCPUSET OnlineCpus;
4518 RTMpGetOnlineSet(&OnlineCpus);
4519
4520 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4521 {
4522 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4523 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4524 && idCurCpu != idGipMaster)
4525 {
4526 idNewGipMaster = idCurCpu;
4527 break;
4528 }
4529 }
4530
4531 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4532 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4533 NOREF(fIgnored);
4534 }
4535 }
4536}
4537
4538
4539/**
4540 * Initializes the GIP data.
4541 *
4542 * @returns IPRT status code.
4543 * @param pDevExt Pointer to the device instance data.
4544 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4545 * @param HCPhys The physical address of the GIP.
4546 * @param u64NanoTS The current nanosecond timestamp.
4547 * @param uUpdateHz The update freqence.
4548 */
4549int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4550{
4551 unsigned i;
4552#ifdef DEBUG_DARWIN_GIP
4553 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4554#else
4555 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4556#endif
4557
4558 /*
4559 * Initialize the structure.
4560 */
4561 memset(pGip, 0, PAGE_SIZE);
4562 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4563 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4564 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4565 pGip->u32UpdateHz = uUpdateHz;
4566 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4567 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4568
4569 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4570 {
4571 pGip->aCPUs[i].u32TransactionId = 2;
4572 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4573 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4574
4575 /*
4576 * We don't know the following values until we've executed updates.
4577 * So, we'll just insert very high values.
4578 */
4579 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4580 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4581 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4582 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4583 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4584 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4585 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4586 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4587 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4588 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4589 }
4590
4591 /*
4592 * Link it to the device extension.
4593 */
4594 pDevExt->pGip = pGip;
4595 pDevExt->HCPhysGip = HCPhys;
4596 pDevExt->cGipUsers = 0;
4597
4598 return VINF_SUCCESS;
4599}
4600
4601
4602/**
4603 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4604 *
4605 * @param idCpu Ignored.
4606 * @param pvUser1 Where to put the TSC.
4607 * @param pvUser2 Ignored.
4608 */
4609static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4610{
4611#if 1
4612 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4613#else
4614 *(uint64_t *)pvUser1 = ASMReadTSC();
4615#endif
4616}
4617
4618
4619/**
4620 * Determine if Async GIP mode is required because of TSC drift.
4621 *
4622 * When using the default/normal timer code it is essential that the time stamp counter
4623 * (TSC) runs never backwards, that is, a read operation to the counter should return
4624 * a bigger value than any previous read operation. This is guaranteed by the latest
4625 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4626 * case we have to choose the asynchronous timer mode.
4627 *
4628 * @param poffMin Pointer to the determined difference between different cores.
4629 * @return false if the time stamp counters appear to be synchron, true otherwise.
4630 */
4631bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4632{
4633 /*
4634 * Just iterate all the cpus 8 times and make sure that the TSC is
4635 * ever increasing. We don't bother taking TSC rollover into account.
4636 */
4637 RTCPUSET CpuSet;
4638 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4639 int iCpu;
4640 int cLoops = 8;
4641 bool fAsync = false;
4642 int rc = VINF_SUCCESS;
4643 uint64_t offMax = 0;
4644 uint64_t offMin = ~(uint64_t)0;
4645 uint64_t PrevTsc = ASMReadTSC();
4646
4647 while (cLoops-- > 0)
4648 {
4649 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4650 {
4651 uint64_t CurTsc;
4652 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4653 if (RT_SUCCESS(rc))
4654 {
4655 if (CurTsc <= PrevTsc)
4656 {
4657 fAsync = true;
4658 offMin = offMax = PrevTsc - CurTsc;
4659 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4660 iCpu, cLoops, CurTsc, PrevTsc));
4661 break;
4662 }
4663
4664 /* Gather statistics (except the first time). */
4665 if (iCpu != 0 || cLoops != 7)
4666 {
4667 uint64_t off = CurTsc - PrevTsc;
4668 if (off < offMin)
4669 offMin = off;
4670 if (off > offMax)
4671 offMax = off;
4672 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4673 }
4674
4675 /* Next */
4676 PrevTsc = CurTsc;
4677 }
4678 else if (rc == VERR_NOT_SUPPORTED)
4679 break;
4680 else
4681 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4682 }
4683
4684 /* broke out of the loop. */
4685 if (iCpu <= iLastCpu)
4686 break;
4687 }
4688
4689 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4690 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4691 fAsync, iLastCpu, rc, offMin, offMax));
4692#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4693 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4694#endif
4695 return fAsync;
4696}
4697
4698
4699/**
4700 * Invalidates the GIP data upon termination.
4701 *
4702 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4703 */
4704void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4705{
4706 unsigned i;
4707 pGip->u32Magic = 0;
4708 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4709 {
4710 pGip->aCPUs[i].u64NanoTS = 0;
4711 pGip->aCPUs[i].u64TSC = 0;
4712 pGip->aCPUs[i].iTSCHistoryHead = 0;
4713 }
4714}
4715
4716
4717/**
4718 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4719 * updates all the per cpu data except the transaction id.
4720 *
4721 * @param pGip The GIP.
4722 * @param pGipCpu Pointer to the per cpu data.
4723 * @param u64NanoTS The current time stamp.
4724 * @param u64TSC The current TSC.
4725 */
4726static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS, uint64_t u64TSC)
4727{
4728 uint64_t u64TSCDelta;
4729 uint32_t u32UpdateIntervalTSC;
4730 uint32_t u32UpdateIntervalTSCSlack;
4731 unsigned iTSCHistoryHead;
4732 uint64_t u64CpuHz;
4733
4734 /* Delta between this and the previous update. */
4735 pGipCpu->u32UpdateIntervalNS = (uint32_t)(u64NanoTS - pGipCpu->u64NanoTS);
4736
4737 /*
4738 * Update the NanoTS.
4739 */
4740 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4741
4742 /*
4743 * Calc TSC delta.
4744 */
4745 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4746 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4747 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4748
4749 if (u64TSCDelta >> 32)
4750 {
4751 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4752 pGipCpu->cErrors++;
4753 }
4754
4755 /*
4756 * TSC History.
4757 */
4758 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4759
4760 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4761 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4762 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4763
4764 /*
4765 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4766 */
4767 if (pGip->u32UpdateHz >= 1000)
4768 {
4769 uint32_t u32;
4770 u32 = pGipCpu->au32TSCHistory[0];
4771 u32 += pGipCpu->au32TSCHistory[1];
4772 u32 += pGipCpu->au32TSCHistory[2];
4773 u32 += pGipCpu->au32TSCHistory[3];
4774 u32 >>= 2;
4775 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4776 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4777 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4778 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4779 u32UpdateIntervalTSC >>= 2;
4780 u32UpdateIntervalTSC += u32;
4781 u32UpdateIntervalTSC >>= 1;
4782
4783 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4784 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4785 }
4786 else if (pGip->u32UpdateHz >= 90)
4787 {
4788 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4789 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4790 u32UpdateIntervalTSC >>= 1;
4791
4792 /* value choosen on a 2GHz thinkpad running windows */
4793 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4794 }
4795 else
4796 {
4797 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4798
4799 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4800 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4801 }
4802 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4803
4804 /*
4805 * CpuHz.
4806 */
4807 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4808 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4809}
4810
4811
4812/**
4813 * Updates the GIP.
4814 *
4815 * @param pGip Pointer to the GIP.
4816 * @param u64NanoTS The current nanosecond timesamp.
4817 * @param u64TSC The current TSC timesamp.
4818 */
4819void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, uint64_t u64TSC)
4820{
4821 /*
4822 * Determin the relevant CPU data.
4823 */
4824 PSUPGIPCPU pGipCpu;
4825 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4826 pGipCpu = &pGip->aCPUs[0];
4827 else
4828 {
4829 unsigned iCpu = ASMGetApicId();
4830 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4831 return;
4832 pGipCpu = &pGip->aCPUs[iCpu];
4833 }
4834
4835 /*
4836 * Start update transaction.
4837 */
4838 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4839 {
4840 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4841 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4842 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4843 pGipCpu->cErrors++;
4844 return;
4845 }
4846
4847 /*
4848 * Recalc the update frequency every 0x800th time.
4849 */
4850 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4851 {
4852 if (pGip->u64NanoTSLastUpdateHz)
4853 {
4854#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4855 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4856 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4857 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4858 {
4859 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4860 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4861 }
4862#endif
4863 }
4864 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4865 }
4866
4867 /*
4868 * Update the data.
4869 */
4870 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS, u64TSC);
4871
4872 /*
4873 * Complete transaction.
4874 */
4875 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4876}
4877
4878
4879/**
4880 * Updates the per cpu GIP data for the calling cpu.
4881 *
4882 * @param pGip Pointer to the GIP.
4883 * @param u64NanoTS The current nanosecond timesamp.
4884 * @param u64TSC The current TSC timesamp.
4885 * @param iCpu The CPU index.
4886 */
4887void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, uint64_t u64TSC, unsigned iCpu)
4888{
4889 PSUPGIPCPU pGipCpu;
4890
4891 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4892 {
4893 pGipCpu = &pGip->aCPUs[iCpu];
4894
4895 /*
4896 * Start update transaction.
4897 */
4898 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4899 {
4900 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4901 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4902 pGipCpu->cErrors++;
4903 return;
4904 }
4905
4906 /*
4907 * Update the data.
4908 */
4909 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS, u64TSC);
4910
4911 /*
4912 * Complete transaction.
4913 */
4914 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4915 }
4916}
4917
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette