VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c@ 2321

Last change on this file since 2321 was 2243, checked in by vboxsync, 18 years ago

lower the valid IDT address from 2GB to 1GB

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.1 KB
Line 
1/** @file
2 *
3 * VBox host drivers - Ring-0 support drivers - Shared code:
4 * Driver code for all host platforms
5 */
6
7/*
8 * Copyright (C) 2006 InnoTek Systemberatung GmbH
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License as published by the Free Software Foundation,
14 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
15 * distribution. VirtualBox OSE is distributed in the hope that it will
16 * be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * If you received this file as part of a commercial VirtualBox
19 * distribution, then only the terms of your commercial VirtualBox
20 * license agreement apply instead of the previous paragraph.
21 */
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#include "SUPDRV.h"
28#ifndef PAGE_SHIFT
29# include <iprt/param.h>
30#endif
31#include <iprt/alloc.h>
32#include <iprt/semaphore.h>
33#include <iprt/spinlock.h>
34#include <iprt/thread.h>
35#include <iprt/process.h>
36#include <iprt/log.h>
37#ifdef VBOX_WITHOUT_IDT_PATCHING
38# include <VBox/vmm.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/* from x86.h - clashes with linux thus this duplication */
46#undef X86_CR0_PG
47#define X86_CR0_PG BIT(31)
48#undef X86_CR0_PE
49#define X86_CR0_PE BIT(0)
50#undef X86_CPUID_AMD_FEATURE_EDX_NX
51#define X86_CPUID_AMD_FEATURE_EDX_NX BIT(20)
52#undef MSR_K6_EFER
53#define MSR_K6_EFER 0xc0000080
54#undef MSR_K6_EFER_NXE
55#define MSR_K6_EFER_NXE BIT(11)
56#undef MSR_K6_EFER_LMA
57#define MSR_K6_EFER_LMA BIT(10)
58#undef X86_CR4_PGE
59#define X86_CR4_PGE BIT(7)
60#undef X86_CR4_PAE
61#define X86_CR4_PAE BIT(5)
62#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
63#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE BIT(29)
64
65
66/** The frequency by which we recalculate the u32UpdateHz and
67 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
68#define GIP_UPDATEHZ_RECALC_FREQ 0x800
69
70
71/*******************************************************************************
72* Global Variables *
73*******************************************************************************/
74/**
75 * Array of the R0 SUP API.
76 */
77static SUPFUNC g_aFunctions[] =
78{
79 /* name function */
80 { "SUPR0ObjRegister", (void *)SUPR0ObjRegister },
81 { "SUPR0ObjAddRef", (void *)SUPR0ObjAddRef },
82 { "SUPR0ObjRelease", (void *)SUPR0ObjRelease },
83 { "SUPR0ObjVerifyAccess", (void *)SUPR0ObjVerifyAccess },
84 { "SUPR0LockMem", (void *)SUPR0LockMem },
85 { "SUPR0UnlockMem", (void *)SUPR0UnlockMem },
86 { "SUPR0ContAlloc", (void *)SUPR0ContAlloc },
87 { "SUPR0ContFree", (void *)SUPR0ContFree },
88 { "SUPR0MemAlloc", (void *)SUPR0MemAlloc },
89 { "SUPR0MemGetPhys", (void *)SUPR0MemGetPhys },
90 { "SUPR0MemFree", (void *)SUPR0MemFree },
91 { "SUPR0Printf", (void *)SUPR0Printf },
92 { "RTMemAlloc", (void *)RTMemAlloc },
93 { "RTMemAllocZ", (void *)RTMemAllocZ },
94 { "RTMemFree", (void *)RTMemFree },
95/* These doesn't work yet on linux - use fast mutexes!
96 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
97 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
98 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
99 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
100*/
101 { "RTSemFastMutexCreate", (void *)RTSemFastMutexCreate },
102 { "RTSemFastMutexDestroy", (void *)RTSemFastMutexDestroy },
103 { "RTSemFastMutexRequest", (void *)RTSemFastMutexRequest },
104 { "RTSemFastMutexRelease", (void *)RTSemFastMutexRelease },
105 { "RTSemEventCreate", (void *)RTSemEventCreate },
106 { "RTSemEventSignal", (void *)RTSemEventSignal },
107 { "RTSemEventWait", (void *)RTSemEventWait },
108 { "RTSemEventDestroy", (void *)RTSemEventDestroy },
109 { "RTSpinlockCreate", (void *)RTSpinlockCreate },
110 { "RTSpinlockDestroy", (void *)RTSpinlockDestroy },
111 { "RTSpinlockAcquire", (void *)RTSpinlockAcquire },
112 { "RTSpinlockRelease", (void *)RTSpinlockRelease },
113 { "RTSpinlockAcquireNoInts", (void *)RTSpinlockAcquireNoInts },
114 { "RTSpinlockReleaseNoInts", (void *)RTSpinlockReleaseNoInts },
115 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
116 { "RTThreadSleep", (void *)RTThreadSleep },
117 { "RTThreadYield", (void *)RTThreadYield },
118#if 0 /* Thread APIs, Part 2. */
119 { "RTThreadSelf", (void *)RTThreadSelf },
120 { "RTThreadCreate", (void *)RTThreadCreate },
121 { "RTThreadGetNative", (void *)RTThreadGetNative },
122 { "RTThreadWait", (void *)RTThreadWait },
123 { "RTThreadWaitNoResume", (void *)RTThreadWaitNoResume },
124 { "RTThreadGetName", (void *)RTThreadGetName },
125 { "RTThreadSelfName", (void *)RTThreadSelfName },
126 { "RTThreadGetType", (void *)RTThreadGetType },
127 { "RTThreadUserSignal", (void *)RTThreadUserSignal },
128 { "RTThreadUserReset", (void *)RTThreadUserReset },
129 { "RTThreadUserWait", (void *)RTThreadUserWait },
130 { "RTThreadUserWaitNoResume", (void *)RTThreadUserWaitNoResume },
131#endif
132 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
133 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
134 { "RTLogSetDefaultInstanceThread", (void *)RTLogSetDefaultInstanceThread },
135 { "RTLogLogger", (void *)RTLogLogger },
136 { "RTLogLoggerEx", (void *)RTLogLoggerEx },
137 { "RTLogLoggerExV", (void *)RTLogLoggerExV },
138 { "AssertMsg1", (void *)AssertMsg1 },
139 { "AssertMsg2", (void *)AssertMsg2 },
140};
141
142
143/*******************************************************************************
144* Internal Functions *
145*******************************************************************************/
146__BEGIN_DECLS
147static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
148static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
149#ifndef VBOX_WITHOUT_IDT_PATCHING
150static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL_IN pIn, PSUPIDTINSTALL_OUT pOut);
151static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
152static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession);
153static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
154static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry);
155#endif /* !VBOX_WITHOUT_IDT_PATCHING */
156static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN_IN pIn, PSUPLDROPEN_OUT pOut);
157static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD_IN pIn);
158static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE_IN pIn);
159static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL_IN pIn, PSUPLDRGETSYMBOL_OUT pOut);
160static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry);
161static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt);
162static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
163static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
164static int supdrvIOCtl_GetPagingMode(PSUPGETPAGINGMODE_OUT pOut);
165static SUPGIPMODE supdrvGipDeterminTscMode(void);
166#ifdef USE_NEW_OS_INTERFACE
167static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
168static int supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
169static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser);
170#endif
171
172__END_DECLS
173
174
175/**
176 * Initializes the device extentsion structure.
177 *
178 * @returns 0 on success.
179 * @returns SUPDRV_ERR_ on failure.
180 * @param pDevExt The device extension to initialize.
181 */
182int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
183{
184 /*
185 * Initialize it.
186 */
187 int rc;
188 memset(pDevExt, 0, sizeof(*pDevExt));
189 rc = RTSpinlockCreate(&pDevExt->Spinlock);
190 if (!rc)
191 {
192 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
193 if (!rc)
194 {
195 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
196 if (!rc)
197 {
198#ifdef USE_NEW_OS_INTERFACE
199 rc = supdrvGipCreate(pDevExt);
200 if (RT_SUCCESS(rc))
201 {
202 pDevExt->u32Cookie = BIRD;
203 return 0;
204 }
205#else
206 pDevExt->u32Cookie = BIRD;
207 return 0;
208#endif
209 }
210 RTSemFastMutexDestroy(pDevExt->mtxLdr);
211 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
212 }
213 RTSpinlockDestroy(pDevExt->Spinlock);
214 pDevExt->Spinlock = NIL_RTSPINLOCK;
215 }
216 return rc;
217}
218
219/**
220 * Delete the device extension (e.g. cleanup members).
221 *
222 * @returns 0.
223 * @param pDevExt The device extension to delete.
224 */
225int VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
226{
227#ifndef VBOX_WITHOUT_IDT_PATCHING
228 PSUPDRVPATCH pPatch;
229#endif
230 PSUPDRVOBJ pObj;
231 PSUPDRVUSAGE pUsage;
232
233 /*
234 * Kill mutexes and spinlocks.
235 */
236 RTSemFastMutexDestroy(pDevExt->mtxGip);
237 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
238 RTSemFastMutexDestroy(pDevExt->mtxLdr);
239 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
240 RTSpinlockDestroy(pDevExt->Spinlock);
241 pDevExt->Spinlock = NIL_RTSPINLOCK;
242
243 /*
244 * Free lists.
245 */
246
247#ifndef VBOX_WITHOUT_IDT_PATCHING
248 /* patches */
249 /** @todo make sure we don't uninstall patches which has been patched by someone else. */
250 pPatch = pDevExt->pIdtPatchesFree;
251 pDevExt->pIdtPatchesFree = NULL;
252 while (pPatch)
253 {
254 void *pvFree = pPatch;
255 pPatch = pPatch->pNext;
256 RTMemExecFree(pvFree);
257 }
258#endif /* !VBOX_WITHOUT_IDT_PATCHING */
259
260 /* objects. */
261 pObj = pDevExt->pObjs;
262#if !defined(DEBUG_bird) || !defined(__LINUX__) /* breaks unloading, temporary, remove me! */
263 Assert(!pObj); /* (can trigger on forced unloads) */
264#endif
265 pDevExt->pObjs = NULL;
266 while (pObj)
267 {
268 void *pvFree = pObj;
269 pObj = pObj->pNext;
270 RTMemFree(pvFree);
271 }
272
273 /* usage records. */
274 pUsage = pDevExt->pUsageFree;
275 pDevExt->pUsageFree = NULL;
276 while (pUsage)
277 {
278 void *pvFree = pUsage;
279 pUsage = pUsage->pNext;
280 RTMemFree(pvFree);
281 }
282
283#ifdef USE_NEW_OS_INTERFACE
284 /* kill the GIP */
285 supdrvGipDestroy(pDevExt);
286#endif
287
288 return 0;
289}
290
291
292/**
293 * Create session.
294 *
295 * @returns 0 on success.
296 * @returns SUPDRV_ERR_ on failure.
297 * @param pDevExt Device extension.
298 * @param ppSession Where to store the pointer to the session data.
299 */
300int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION *ppSession)
301{
302 /*
303 * Allocate memory for the session data.
304 */
305 int rc = SUPDRV_ERR_NO_MEMORY;
306 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
307 if (pSession)
308 {
309 /* Initialize session data. */
310 rc = RTSpinlockCreate(&pSession->Spinlock);
311 if (!rc)
312 {
313 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
314 pSession->pDevExt = pDevExt;
315 pSession->u32Cookie = BIRD_INV;
316 /*pSession->pLdrUsage = NULL;
317 pSession->pPatchUsage = NULL;
318 pSession->pUsage = NULL;
319 pSession->pGip = NULL;
320 pSession->fGipReferenced = false;
321 pSession->Bundle.cUsed = 0 */
322
323 dprintf(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
324 return 0;
325 }
326
327 RTMemFree(pSession);
328 *ppSession = NULL;
329 }
330
331 dprintf(("Failed to create spinlock, rc=%d!\n", rc));
332 return rc;
333}
334
335
336/**
337 * Shared code for cleaning up a session.
338 *
339 * @param pDevExt Device extension.
340 * @param pSession Session data.
341 * This data will be freed by this routine.
342 */
343void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
344{
345 /*
346 * Cleanup the session first.
347 */
348 supdrvCleanupSession(pDevExt, pSession);
349
350 /*
351 * Free the rest of the session stuff.
352 */
353 RTSpinlockDestroy(pSession->Spinlock);
354 pSession->Spinlock = NIL_RTSPINLOCK;
355 pSession->pDevExt = NULL;
356 RTMemFree(pSession);
357 dprintf2(("supdrvCloseSession: returns\n"));
358}
359
360
361/**
362 * Shared code for cleaning up a session (but not quite freeing it).
363 *
364 * This is primarily intended for MAC OS X where we have to clean up the memory
365 * stuff before the file handle is closed.
366 *
367 * @param pDevExt Device extension.
368 * @param pSession Session data.
369 * This data will be freed by this routine.
370 */
371void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
372{
373 PSUPDRVBUNDLE pBundle;
374 dprintf(("supdrvCleanupSession: pSession=%p\n", pSession));
375
376 /*
377 * Remove logger instances related to this session.
378 * (This assumes the dprintf and dprintf2 macros doesn't use the normal logging.)
379 */
380 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
381
382#ifndef VBOX_WITHOUT_IDT_PATCHING
383 /*
384 * Uninstall any IDT patches installed for this session.
385 */
386 supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
387#endif
388
389 /*
390 * Release object references made in this session.
391 * In theory there should be noone racing us in this session.
392 */
393 dprintf2(("release objects - start\n"));
394 if (pSession->pUsage)
395 {
396 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
397 PSUPDRVUSAGE pUsage;
398 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
399
400 while ((pUsage = pSession->pUsage) != NULL)
401 {
402 PSUPDRVOBJ pObj = pUsage->pObj;
403 pSession->pUsage = pUsage->pNext;
404
405 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
406 if (pUsage->cUsage < pObj->cUsage)
407 {
408 pObj->cUsage -= pUsage->cUsage;
409 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
410 }
411 else
412 {
413 /* Destroy the object and free the record. */
414 if (pDevExt->pObjs == pObj)
415 pDevExt->pObjs = pObj->pNext;
416 else
417 {
418 PSUPDRVOBJ pObjPrev;
419 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
420 if (pObjPrev->pNext == pObj)
421 {
422 pObjPrev->pNext = pObj->pNext;
423 break;
424 }
425 Assert(pObjPrev);
426 }
427 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
428
429 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
430 RTMemFree(pObj);
431 }
432
433 /* free it and continue. */
434 RTMemFree(pUsage);
435
436 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
437 }
438
439 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
440 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
441 }
442 dprintf2(("release objects - done\n"));
443
444 /*
445 * Release memory allocated in the session.
446 *
447 * We do not serialize this as we assume that the application will
448 * not allocated memory while closing the file handle object.
449 */
450 dprintf2(("freeing memory:\n"));
451 pBundle = &pSession->Bundle;
452 while (pBundle)
453 {
454 PSUPDRVBUNDLE pToFree;
455 unsigned i;
456
457 /*
458 * Check and unlock all entries in the bundle.
459 */
460 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
461 {
462#ifdef USE_NEW_OS_INTERFACE
463 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
464 {
465 int rc;
466 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
467 {
468 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
469 AssertRC(rc); /** @todo figure out how to handle this. */
470 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
471 }
472 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, false);
473 AssertRC(rc); /** @todo figure out how to handle this. */
474 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
475 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
476 }
477
478#else /* !USE_NEW_OS_INTERFACE */
479 if ( pBundle->aMem[i].pvR0
480 || pBundle->aMem[i].pvR3)
481 {
482 dprintf2(("eType=%d pvR0=%p pvR3=%p cb=%d\n", pBundle->aMem[i].eType,
483 pBundle->aMem[i].pvR0, pBundle->aMem[i].pvR3, pBundle->aMem[i].cb));
484 switch (pBundle->aMem[i].eType)
485 {
486 case MEMREF_TYPE_LOCKED:
487 supdrvOSUnlockMemOne(&pBundle->aMem[i]);
488 break;
489 case MEMREF_TYPE_CONT:
490 supdrvOSContFreeOne(&pBundle->aMem[i]);
491 break;
492 case MEMREF_TYPE_LOW:
493 supdrvOSLowFreeOne(&pBundle->aMem[i]);
494 break;
495 case MEMREF_TYPE_MEM:
496 supdrvOSMemFreeOne(&pBundle->aMem[i]);
497 break;
498 default:
499 break;
500 }
501 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
502 }
503#endif /* !USE_NEW_OS_INTERFACE */
504 }
505
506 /*
507 * Advance and free previous bundle.
508 */
509 pToFree = pBundle;
510 pBundle = pBundle->pNext;
511
512 pToFree->pNext = NULL;
513 pToFree->cUsed = 0;
514 if (pToFree != &pSession->Bundle)
515 RTMemFree(pToFree);
516 }
517 dprintf2(("freeing memory - done\n"));
518
519 /*
520 * Loaded images needs to be dereferenced and possibly freed up.
521 */
522 RTSemFastMutexRequest(pDevExt->mtxLdr);
523 dprintf2(("freeing images:\n"));
524 if (pSession->pLdrUsage)
525 {
526 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
527 pSession->pLdrUsage = NULL;
528 while (pUsage)
529 {
530 void *pvFree = pUsage;
531 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
532 if (pImage->cUsage > pUsage->cUsage)
533 pImage->cUsage -= pUsage->cUsage;
534 else
535 supdrvLdrFree(pDevExt, pImage);
536 pUsage->pImage = NULL;
537 pUsage = pUsage->pNext;
538 RTMemFree(pvFree);
539 }
540 }
541 RTSemFastMutexRelease(pDevExt->mtxLdr);
542 dprintf2(("freeing images - done\n"));
543
544 /*
545 * Unmap the GIP.
546 */
547 dprintf2(("umapping GIP:\n"));
548#ifdef USE_NEW_OS_INTERFACE
549 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
550#else
551 if (pSession->pGip)
552#endif
553 {
554 SUPR0GipUnmap(pSession);
555#ifndef USE_NEW_OS_INTERFACE
556 pSession->pGip = NULL;
557#endif
558 pSession->fGipReferenced = 0;
559 }
560 dprintf2(("umapping GIP - done\n"));
561}
562
563
564#ifdef VBOX_WITHOUT_IDT_PATCHING
565/**
566 * Fast path I/O Control worker.
567 *
568 * @returns 0 on success.
569 * @returns One of the SUPDRV_ERR_* on failure.
570 * @param uIOCtl Function number.
571 * @param pDevExt Device extention.
572 * @param pSession Session data.
573 */
574int VBOXCALL supdrvIOCtlFast(unsigned uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
575{
576 /*
577 * Disable interrupts before invoking VMMR0Entry() because it ASSUMES
578 * that interrupts are disabled. (We check the two prereqs after doing
579 * this only to allow the compiler to optimize things better.)
580 */
581 int rc;
582 RTCCUINTREG uFlags = ASMGetFlags();
583 ASMIntDisable();
584
585 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0Entry))
586 {
587 switch (uIOCtl)
588 {
589 case SUP_IOCTL_FAST_DO_RAW_RUN:
590 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_RAW_RUN, NULL);
591 break;
592 case SUP_IOCTL_FAST_DO_HWACC_RUN:
593 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_HWACC_RUN, NULL);
594 break;
595 case SUP_IOCTL_FAST_DO_NOP:
596 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_NOP, NULL);
597 break;
598 default:
599 rc = VERR_INTERNAL_ERROR;
600 break;
601 }
602 }
603 else
604 rc = VERR_INTERNAL_ERROR;
605
606 ASMSetFlags(uFlags);
607 return rc;
608}
609#endif /* VBOX_WITHOUT_IDT_PATCHING */
610
611
612/**
613 * I/O Control worker.
614 *
615 * @returns 0 on success.
616 * @returns One of the SUPDRV_ERR_* on failure.
617 * @param uIOCtl Function number.
618 * @param pDevExt Device extention.
619 * @param pSession Session data.
620 * @param pvIn Input data.
621 * @param cbIn Size of input data.
622 * @param pvOut Output data.
623 * IMPORTANT! This buffer may be shared with the input
624 * data, thus no writing before done reading
625 * input data!!!
626 * @param cbOut Size of output data.
627 * @param pcbReturned Size of the returned data.
628 */
629int VBOXCALL supdrvIOCtl(unsigned int uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession,
630 void *pvIn, unsigned cbIn, void *pvOut, unsigned cbOut, unsigned *pcbReturned)
631{
632 *pcbReturned = 0;
633 switch (uIOCtl)
634 {
635 case SUP_IOCTL_COOKIE:
636 {
637 PSUPCOOKIE_IN pIn = (PSUPCOOKIE_IN)pvIn;
638 PSUPCOOKIE_OUT pOut = (PSUPCOOKIE_OUT)pvOut;
639
640 /*
641 * Validate.
642 */
643 if ( cbIn != sizeof(*pIn)
644 || cbOut != sizeof(*pOut))
645 {
646 OSDBGPRINT(("SUP_IOCTL_COOKIE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
647 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
648 return SUPDRV_ERR_INVALID_PARAM;
649 }
650 if (strncmp(pIn->szMagic, SUPCOOKIE_MAGIC, sizeof(pIn->szMagic)))
651 {
652 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pIn->szMagic));
653 return SUPDRV_ERR_INVALID_MAGIC;
654 }
655
656 /*
657 * Match the version.
658 * The current logic is very simple, match the major interface version.
659 */
660 if ( pIn->u32MinVersion > SUPDRVIOC_VERSION
661 || (pIn->u32MinVersion & 0xffff0000) != (SUPDRVIOC_VERSION & 0xffff0000))
662 {
663 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
664 pIn->u32ReqVersion, pIn->u32MinVersion, SUPDRVIOC_VERSION));
665 pOut->u32Cookie = 0xffffffff;
666 pOut->u32SessionCookie = 0xffffffff;
667 pOut->u32SessionVersion = 0xffffffff;
668 pOut->u32DriverVersion = SUPDRVIOC_VERSION;
669 pOut->pSession = NULL;
670 pOut->cFunctions = 0;
671 *pcbReturned = sizeof(*pOut);
672 return SUPDRV_ERR_VERSION_MISMATCH;
673 }
674
675 /*
676 * Fill in return data and be gone.
677 * N.B. The first one to change SUPDRVIOC_VERSION shall makes sure that
678 * u32SessionVersion <= u32ReqVersion!
679 */
680 /** @todo A more secure cookie negotiation? */
681 pOut->u32Cookie = pDevExt->u32Cookie;
682 pOut->u32SessionCookie = pSession->u32Cookie;
683 pOut->u32SessionVersion = SUPDRVIOC_VERSION;
684 pOut->u32DriverVersion = SUPDRVIOC_VERSION;
685 pOut->pSession = pSession;
686 pOut->cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
687 *pcbReturned = sizeof(*pOut);
688 return 0;
689 }
690
691
692 case SUP_IOCTL_QUERY_FUNCS:
693 {
694 unsigned cFunctions;
695 PSUPQUERYFUNCS_IN pIn = (PSUPQUERYFUNCS_IN)pvIn;
696 PSUPQUERYFUNCS_OUT pOut = (PSUPQUERYFUNCS_OUT)pvOut;
697
698 /*
699 * Validate.
700 */
701 if ( cbIn != sizeof(*pIn)
702 || cbOut < sizeof(*pOut))
703 {
704 dprintf(("SUP_IOCTL_QUERY_FUNCS: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
705 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
706 return SUPDRV_ERR_INVALID_PARAM;
707 }
708 if ( pIn->u32Cookie != pDevExt->u32Cookie
709 || pIn->u32SessionCookie != pSession->u32Cookie )
710 {
711 dprintf(("SUP_IOCTL_QUERY_FUNCS: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
712 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
713 return SUPDRV_ERR_INVALID_MAGIC;
714 }
715
716 /*
717 * Copy the functions.
718 */
719 cFunctions = (cbOut - RT_OFFSETOF(SUPQUERYFUNCS_OUT, aFunctions)) / sizeof(pOut->aFunctions[0]);
720 cFunctions = RT_MIN(cFunctions, ELEMENTS(g_aFunctions));
721 AssertMsg(cFunctions == ELEMENTS(g_aFunctions),
722 ("Why aren't R3 querying all the functions!?! cFunctions=%d while there are %d available\n",
723 cFunctions, ELEMENTS(g_aFunctions)));
724 pOut->cFunctions = cFunctions;
725 memcpy(&pOut->aFunctions[0], g_aFunctions, sizeof(pOut->aFunctions[0]) * cFunctions);
726 *pcbReturned = RT_OFFSETOF(SUPQUERYFUNCS_OUT, aFunctions[cFunctions]);
727 return 0;
728 }
729
730
731 case SUP_IOCTL_IDT_INSTALL:
732 {
733 PSUPIDTINSTALL_IN pIn = (PSUPIDTINSTALL_IN)pvIn;
734 PSUPIDTINSTALL_OUT pOut = (PSUPIDTINSTALL_OUT)pvOut;
735
736 /*
737 * Validate.
738 */
739 if ( cbIn != sizeof(*pIn)
740 || cbOut != sizeof(*pOut))
741 {
742 dprintf(("SUP_IOCTL_INSTALL: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
743 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
744 return SUPDRV_ERR_INVALID_PARAM;
745 }
746 if ( pIn->u32Cookie != pDevExt->u32Cookie
747 || pIn->u32SessionCookie != pSession->u32Cookie )
748 {
749 dprintf(("SUP_IOCTL_INSTALL: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
750 pIn->u32Cookie, pDevExt->u32Cookie,
751 pIn->u32SessionCookie, pSession->u32Cookie));
752 return SUPDRV_ERR_INVALID_MAGIC;
753 }
754
755 *pcbReturned = sizeof(*pOut);
756#ifndef VBOX_WITHOUT_IDT_PATCHING
757 return supdrvIOCtl_IdtInstall(pDevExt, pSession, pIn, pOut);
758#else
759 pOut->u8Idt = 3;
760 return 0;
761#endif
762 }
763
764
765 case SUP_IOCTL_IDT_REMOVE:
766 {
767 PSUPIDTREMOVE_IN pIn = (PSUPIDTREMOVE_IN)pvIn;
768
769 /*
770 * Validate.
771 */
772 if ( cbIn != sizeof(*pIn)
773 || cbOut != 0)
774 {
775 dprintf(("SUP_IOCTL_REMOVE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
776 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
777 return SUPDRV_ERR_INVALID_PARAM;
778 }
779 if ( pIn->u32Cookie != pDevExt->u32Cookie
780 || pIn->u32SessionCookie != pSession->u32Cookie )
781 {
782 dprintf(("SUP_IOCTL_REMOVE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
783 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
784 return SUPDRV_ERR_INVALID_MAGIC;
785 }
786
787#ifndef VBOX_WITHOUT_IDT_PATCHING
788 return supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
789#else
790 return 0;
791#endif
792 }
793
794
795 case SUP_IOCTL_PINPAGES:
796 {
797 int rc;
798 PSUPPINPAGES_IN pIn = (PSUPPINPAGES_IN)pvIn;
799 PSUPPINPAGES_OUT pOut = (PSUPPINPAGES_OUT)pvOut;
800
801 /*
802 * Validate.
803 */
804 if ( cbIn != sizeof(*pIn)
805 || cbOut < sizeof(*pOut))
806 {
807 dprintf(("SUP_IOCTL_PINPAGES: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
808 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
809 return SUPDRV_ERR_INVALID_PARAM;
810 }
811 if ( pIn->u32Cookie != pDevExt->u32Cookie
812 || pIn->u32SessionCookie != pSession->u32Cookie )
813 {
814 dprintf(("SUP_IOCTL_PINPAGES: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
815 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
816 return SUPDRV_ERR_INVALID_MAGIC;
817 }
818 if (pIn->cPages <= 0 || !pIn->pvR3)
819 {
820 dprintf(("SUP_IOCTL_PINPAGES: Illegal request %p %d\n", (void *)pIn->pvR3, pIn->cPages));
821 return SUPDRV_ERR_INVALID_PARAM;
822 }
823 if ((unsigned)RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cPages]) > cbOut)
824 {
825 dprintf(("SUP_IOCTL_PINPAGES: Output buffer is too small! %d required %d passed in.\n",
826 RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cPages]), cbOut));
827 return SUPDRV_ERR_INVALID_PARAM;
828 }
829
830 /*
831 * Execute.
832 */
833 *pcbReturned = RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cPages]);
834 rc = SUPR0LockMem(pSession, pIn->pvR3, pIn->cPages, &pOut->aPages[0]);
835 if (rc)
836 *pcbReturned = 0;
837 return rc;
838 }
839
840
841 case SUP_IOCTL_UNPINPAGES:
842 {
843 PSUPUNPINPAGES_IN pIn = (PSUPUNPINPAGES_IN)pvIn;
844
845 /*
846 * Validate.
847 */
848 if ( cbIn != sizeof(*pIn)
849 || cbOut != 0)
850 {
851 dprintf(("SUP_IOCTL_UNPINPAGES: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
852 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
853 return SUPDRV_ERR_INVALID_PARAM;
854 }
855 if ( pIn->u32Cookie != pDevExt->u32Cookie
856 || pIn->u32SessionCookie != pSession->u32Cookie)
857 {
858 dprintf(("SUP_IOCTL_UNPINPAGES: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
859 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
860 return SUPDRV_ERR_INVALID_MAGIC;
861 }
862
863 /*
864 * Execute.
865 */
866 return SUPR0UnlockMem(pSession, pIn->pvR3);
867 }
868
869 case SUP_IOCTL_CONT_ALLOC:
870 {
871 int rc;
872 PSUPCONTALLOC_IN pIn = (PSUPCONTALLOC_IN)pvIn;
873 PSUPCONTALLOC_OUT pOut = (PSUPCONTALLOC_OUT)pvOut;
874
875 /*
876 * Validate.
877 */
878 if ( cbIn != sizeof(*pIn)
879 || cbOut < sizeof(*pOut))
880 {
881 dprintf(("SUP_IOCTL_CONT_ALLOC: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
882 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
883 return SUPDRV_ERR_INVALID_PARAM;
884 }
885 if ( pIn->u32Cookie != pDevExt->u32Cookie
886 || pIn->u32SessionCookie != pSession->u32Cookie )
887 {
888 dprintf(("SUP_IOCTL_CONT_ALLOC: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
889 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
890 return SUPDRV_ERR_INVALID_MAGIC;
891 }
892
893 /*
894 * Execute.
895 */
896 rc = SUPR0ContAlloc(pSession, pIn->cPages, &pOut->pvR0, &pOut->pvR3, &pOut->HCPhys);
897 if (!rc)
898 *pcbReturned = sizeof(*pOut);
899 return rc;
900 }
901
902
903 case SUP_IOCTL_CONT_FREE:
904 {
905 PSUPCONTFREE_IN pIn = (PSUPCONTFREE_IN)pvIn;
906
907 /*
908 * Validate.
909 */
910 if ( cbIn != sizeof(*pIn)
911 || cbOut != 0)
912 {
913 dprintf(("SUP_IOCTL_CONT_FREE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
914 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
915 return SUPDRV_ERR_INVALID_PARAM;
916 }
917 if ( pIn->u32Cookie != pDevExt->u32Cookie
918 || pIn->u32SessionCookie != pSession->u32Cookie)
919 {
920 dprintf(("SUP_IOCTL_CONT_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
921 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
922 return SUPDRV_ERR_INVALID_MAGIC;
923 }
924
925 /*
926 * Execute.
927 */
928 return SUPR0ContFree(pSession, (RTHCUINTPTR)pIn->pvR3);
929 }
930
931
932 case SUP_IOCTL_LDR_OPEN:
933 {
934 PSUPLDROPEN_IN pIn = (PSUPLDROPEN_IN)pvIn;
935 PSUPLDROPEN_OUT pOut = (PSUPLDROPEN_OUT)pvOut;
936
937 /*
938 * Validate.
939 */
940 if ( cbIn != sizeof(*pIn)
941 || cbOut != sizeof(*pOut))
942 {
943 dprintf(("SUP_IOCTL_LDR_OPEN: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
944 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
945 return SUPDRV_ERR_INVALID_PARAM;
946 }
947 if ( pIn->u32Cookie != pDevExt->u32Cookie
948 || pIn->u32SessionCookie != pSession->u32Cookie)
949 {
950 dprintf(("SUP_IOCTL_LDR_OPEN: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
951 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
952 return SUPDRV_ERR_INVALID_MAGIC;
953 }
954 if ( pIn->cbImage <= 0
955 || pIn->cbImage >= 16*1024*1024 /*16MB*/)
956 {
957 dprintf(("SUP_IOCTL_LDR_OPEN: Invalid size %d. (max is 16MB)\n", pIn->cbImage));
958 return SUPDRV_ERR_INVALID_PARAM;
959 }
960 if (!memchr(pIn->szName, '\0', sizeof(pIn->szName)))
961 {
962 dprintf(("SUP_IOCTL_LDR_OPEN: The image name isn't terminated!\n"));
963 return SUPDRV_ERR_INVALID_PARAM;
964 }
965 if (!pIn->szName[0])
966 {
967 dprintf(("SUP_IOCTL_LDR_OPEN: The image name is too short\n"));
968 return SUPDRV_ERR_INVALID_PARAM;
969 }
970 if (strpbrk(pIn->szName, ";:()[]{}/\\|&*%#@!~`\"'"))
971 {
972 dprintf(("SUP_IOCTL_LDR_OPEN: The name is invalid '%s'\n", pIn->szName));
973 return SUPDRV_ERR_INVALID_PARAM;
974 }
975
976 *pcbReturned = sizeof(*pOut);
977 return supdrvIOCtl_LdrOpen(pDevExt, pSession, pIn, pOut);
978 }
979
980
981 case SUP_IOCTL_LDR_LOAD:
982 {
983 PSUPLDRLOAD_IN pIn = (PSUPLDRLOAD_IN)pvIn;
984
985 /*
986 * Validate.
987 */
988 if ( cbIn <= sizeof(*pIn)
989 || cbOut != 0)
990 {
991 dprintf(("SUP_IOCTL_LDR_LOAD: Invalid input/output sizes. cbIn=%ld expected greater than %ld. cbOut=%ld expected %ld.\n",
992 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
993 return SUPDRV_ERR_INVALID_PARAM;
994 }
995 if ( pIn->u32Cookie != pDevExt->u32Cookie
996 || pIn->u32SessionCookie != pSession->u32Cookie)
997 {
998 dprintf(("SUP_IOCTL_LDR_LOAD: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
999 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1000 return SUPDRV_ERR_INVALID_MAGIC;
1001 }
1002 if ((unsigned)RT_OFFSETOF(SUPLDRLOAD_IN, achImage[pIn->cbImage]) > cbIn)
1003 {
1004 dprintf(("SUP_IOCTL_LDR_LOAD: Invalid size %d. InputBufferLength=%d\n",
1005 pIn->cbImage, cbIn));
1006 return SUPDRV_ERR_INVALID_PARAM;
1007 }
1008 if (pIn->cSymbols > 16384)
1009 {
1010 dprintf(("SUP_IOCTL_LDR_LOAD: Too many symbols. cSymbols=%u max=16384\n", pIn->cSymbols));
1011 return SUPDRV_ERR_INVALID_PARAM;
1012 }
1013 if ( pIn->cSymbols
1014 && ( pIn->offSymbols >= pIn->cbImage
1015 || pIn->offSymbols + pIn->cSymbols * sizeof(SUPLDRSYM) > pIn->cbImage)
1016 )
1017 {
1018 dprintf(("SUP_IOCTL_LDR_LOAD: symbol table is outside the image bits! offSymbols=%u cSymbols=%d cbImage=%d\n",
1019 pIn->offSymbols, pIn->cSymbols, pIn->cbImage));
1020 return SUPDRV_ERR_INVALID_PARAM;
1021 }
1022 if ( pIn->cbStrTab
1023 && ( pIn->offStrTab >= pIn->cbImage
1024 || pIn->offStrTab + pIn->cbStrTab > pIn->cbImage
1025 || pIn->offStrTab + pIn->cbStrTab < pIn->offStrTab)
1026 )
1027 {
1028 dprintf(("SUP_IOCTL_LDR_LOAD: string table is outside the image bits! offStrTab=%u cbStrTab=%d cbImage=%d\n",
1029 pIn->offStrTab, pIn->cbStrTab, pIn->cbImage));
1030 return SUPDRV_ERR_INVALID_PARAM;
1031 }
1032
1033 if (pIn->cSymbols)
1034 {
1035 uint32_t i;
1036 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pIn->achImage[pIn->offSymbols];
1037 for (i = 0; i < pIn->cSymbols; i++)
1038 {
1039 if (paSyms[i].offSymbol >= pIn->cbImage)
1040 {
1041 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an invalid symbol offset: %#x (max=%#x)\n",
1042 i, paSyms[i].offSymbol, pIn->cbImage));
1043 return SUPDRV_ERR_INVALID_PARAM;
1044 }
1045 if (paSyms[i].offName >= pIn->cbStrTab)
1046 {
1047 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an invalid name offset: %#x (max=%#x)\n",
1048 i, paSyms[i].offName, pIn->cbStrTab));
1049 return SUPDRV_ERR_INVALID_PARAM;
1050 }
1051 if (!memchr(&pIn->achImage[pIn->offStrTab + paSyms[i].offName], '\0', pIn->cbStrTab - paSyms[i].offName))
1052 {
1053 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an unterminated name! offName=%#x (max=%#x)\n",
1054 i, paSyms[i].offName, pIn->cbStrTab));
1055 return SUPDRV_ERR_INVALID_PARAM;
1056 }
1057 }
1058 }
1059
1060 return supdrvIOCtl_LdrLoad(pDevExt, pSession, pIn);
1061 }
1062
1063
1064 case SUP_IOCTL_LDR_FREE:
1065 {
1066 PSUPLDRFREE_IN pIn = (PSUPLDRFREE_IN)pvIn;
1067
1068 /*
1069 * Validate.
1070 */
1071 if ( cbIn != sizeof(*pIn)
1072 || cbOut != 0)
1073 {
1074 dprintf(("SUP_IOCTL_LDR_FREE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1075 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1076 return SUPDRV_ERR_INVALID_PARAM;
1077 }
1078 if ( pIn->u32Cookie != pDevExt->u32Cookie
1079 || pIn->u32SessionCookie != pSession->u32Cookie)
1080 {
1081 dprintf(("SUP_IOCTL_LDR_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1082 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1083 return SUPDRV_ERR_INVALID_MAGIC;
1084 }
1085
1086 return supdrvIOCtl_LdrFree(pDevExt, pSession, pIn);
1087 }
1088
1089
1090 case SUP_IOCTL_LDR_GET_SYMBOL:
1091 {
1092 PSUPLDRGETSYMBOL_IN pIn = (PSUPLDRGETSYMBOL_IN)pvIn;
1093 PSUPLDRGETSYMBOL_OUT pOut = (PSUPLDRGETSYMBOL_OUT)pvOut;
1094 char *pszEnd;
1095
1096 /*
1097 * Validate.
1098 */
1099 if ( cbIn < (unsigned)RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol[2])
1100 || cbOut != sizeof(*pOut))
1101 {
1102 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: Invalid input/output sizes. cbIn=%d expected >=%d. cbOut=%d expected at%d.\n",
1103 cbIn, RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol[2]), cbOut, 0));
1104 return SUPDRV_ERR_INVALID_PARAM;
1105 }
1106 if ( pIn->u32Cookie != pDevExt->u32Cookie
1107 || pIn->u32SessionCookie != pSession->u32Cookie)
1108 {
1109 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1110 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1111 return SUPDRV_ERR_INVALID_MAGIC;
1112 }
1113 pszEnd = memchr(pIn->szSymbol, '\0', cbIn - RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol));
1114 if (!pszEnd)
1115 {
1116 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: The symbol name isn't terminated!\n"));
1117 return SUPDRV_ERR_INVALID_PARAM;
1118 }
1119 if (pszEnd - &pIn->szSymbol[0] >= 1024)
1120 {
1121 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: The symbol name too long (%ld chars, max is %d)!\n",
1122 (long)(pszEnd - &pIn->szSymbol[0]), 1024));
1123 return SUPDRV_ERR_INVALID_PARAM;
1124 }
1125
1126 pOut->pvSymbol = NULL;
1127 *pcbReturned = sizeof(*pOut);
1128 return supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pIn, pOut);
1129 }
1130
1131
1132 /** @todo this interface needs re-doing, we're accessing Ring-3 buffers directly here! */
1133 case SUP_IOCTL_CALL_VMMR0:
1134 {
1135 PSUPCALLVMMR0_IN pIn = (PSUPCALLVMMR0_IN)pvIn;
1136 PSUPCALLVMMR0_OUT pOut = (PSUPCALLVMMR0_OUT)pvOut;
1137
1138 /*
1139 * Validate.
1140 */
1141 if ( cbIn != sizeof(*pIn)
1142 || cbOut != sizeof(*pOut))
1143 {
1144 dprintf(("SUP_IOCTL_CALL_VMMR0: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1145 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
1146 return SUPDRV_ERR_INVALID_PARAM;
1147 }
1148 if ( pIn->u32Cookie != pDevExt->u32Cookie
1149 || pIn->u32SessionCookie != pSession->u32Cookie )
1150 {
1151 dprintf(("SUP_IOCTL_CALL_VMMR0: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1152 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1153 return SUPDRV_ERR_INVALID_MAGIC;
1154 }
1155
1156 /*
1157 * Do we have an entrypoint?
1158 */
1159 if (!pDevExt->pfnVMMR0Entry)
1160 return SUPDRV_ERR_GENERAL_FAILURE;
1161
1162 /*
1163 * Execute.
1164 */
1165 pOut->rc = pDevExt->pfnVMMR0Entry(pIn->pVMR0, pIn->uOperation, (void *)pIn->pvArg); /** @todo address the pvArg problem! */
1166 *pcbReturned = sizeof(*pOut);
1167 return 0;
1168 }
1169
1170
1171 case SUP_IOCTL_GET_PAGING_MODE:
1172 {
1173 int rc;
1174 PSUPGETPAGINGMODE_IN pIn = (PSUPGETPAGINGMODE_IN)pvIn;
1175 PSUPGETPAGINGMODE_OUT pOut = (PSUPGETPAGINGMODE_OUT)pvOut;
1176
1177 /*
1178 * Validate.
1179 */
1180 if ( cbIn != sizeof(*pIn)
1181 || cbOut != sizeof(*pOut))
1182 {
1183 dprintf(("SUP_IOCTL_GET_PAGING_MODE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1184 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
1185 return SUPDRV_ERR_INVALID_PARAM;
1186 }
1187 if ( pIn->u32Cookie != pDevExt->u32Cookie
1188 || pIn->u32SessionCookie != pSession->u32Cookie )
1189 {
1190 dprintf(("SUP_IOCTL_GET_PAGING_MODE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1191 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1192 return SUPDRV_ERR_INVALID_MAGIC;
1193 }
1194
1195 /*
1196 * Execute.
1197 */
1198 *pcbReturned = sizeof(*pOut);
1199 rc = supdrvIOCtl_GetPagingMode(pOut);
1200 if (rc)
1201 *pcbReturned = 0;
1202 return rc;
1203 }
1204
1205
1206 case SUP_IOCTL_LOW_ALLOC:
1207 {
1208 int rc;
1209 PSUPLOWALLOC_IN pIn = (PSUPLOWALLOC_IN)pvIn;
1210 PSUPLOWALLOC_OUT pOut = (PSUPLOWALLOC_OUT)pvOut;
1211
1212 /*
1213 * Validate.
1214 */
1215 if ( cbIn != sizeof(*pIn)
1216 || cbOut < sizeof(*pOut))
1217 {
1218 dprintf(("SUP_IOCTL_LOW_ALLOC: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1219 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)sizeof(*pOut)));
1220 return SUPDRV_ERR_INVALID_PARAM;
1221 }
1222 if ( pIn->u32Cookie != pDevExt->u32Cookie
1223 || pIn->u32SessionCookie != pSession->u32Cookie )
1224 {
1225 dprintf(("SUP_IOCTL_LOW_ALLOC: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1226 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1227 return SUPDRV_ERR_INVALID_MAGIC;
1228 }
1229 if ((unsigned)RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]) > cbOut)
1230 {
1231 dprintf(("SUP_IOCTL_LOW_ALLOC: Output buffer is too small! %d required %d passed in.\n",
1232 RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]), cbOut));
1233 return SUPDRV_ERR_INVALID_PARAM;
1234 }
1235
1236 /*
1237 * Execute.
1238 */
1239 *pcbReturned = RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]);
1240 rc = SUPR0LowAlloc(pSession, pIn->cPages, &pOut->pvR0, &pOut->pvR3, &pOut->aPages[0]);
1241 if (rc)
1242 *pcbReturned = 0;
1243 return rc;
1244 }
1245
1246
1247 case SUP_IOCTL_LOW_FREE:
1248 {
1249 PSUPLOWFREE_IN pIn = (PSUPLOWFREE_IN)pvIn;
1250
1251 /*
1252 * Validate.
1253 */
1254 if ( cbIn != sizeof(*pIn)
1255 || cbOut != 0)
1256 {
1257 dprintf(("SUP_IOCTL_LOW_FREE: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1258 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1259 return SUPDRV_ERR_INVALID_PARAM;
1260 }
1261 if ( pIn->u32Cookie != pDevExt->u32Cookie
1262 || pIn->u32SessionCookie != pSession->u32Cookie)
1263 {
1264 dprintf(("SUP_IOCTL_LOW_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1265 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1266 return SUPDRV_ERR_INVALID_MAGIC;
1267 }
1268
1269 /*
1270 * Execute.
1271 */
1272 return SUPR0LowFree(pSession, (RTHCUINTPTR)pIn->pvR3);
1273 }
1274
1275
1276 case SUP_IOCTL_GIP_MAP:
1277 {
1278 int rc;
1279 PSUPGIPMAP_IN pIn = (PSUPGIPMAP_IN)pvIn;
1280 PSUPGIPMAP_OUT pOut = (PSUPGIPMAP_OUT)pvOut;
1281
1282 /*
1283 * Validate.
1284 */
1285 if ( cbIn != sizeof(*pIn)
1286 || cbOut != sizeof(*pOut))
1287 {
1288 dprintf(("SUP_IOCTL_GIP_MAP: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1289 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1290 return SUPDRV_ERR_INVALID_PARAM;
1291 }
1292 if ( pIn->u32Cookie != pDevExt->u32Cookie
1293 || pIn->u32SessionCookie != pSession->u32Cookie)
1294 {
1295 dprintf(("SUP_IOCTL_GIP_MAP: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1296 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1297 return SUPDRV_ERR_INVALID_MAGIC;
1298 }
1299
1300 /*
1301 * Execute.
1302 */
1303 rc = SUPR0GipMap(pSession, &pOut->pGipR3, &pOut->HCPhysGip);
1304 if (!rc)
1305 {
1306 pOut->pGipR0 = pDevExt->pGip;
1307 *pcbReturned = sizeof(*pOut);
1308 }
1309 return rc;
1310 }
1311
1312
1313 case SUP_IOCTL_GIP_UNMAP:
1314 {
1315 PSUPGIPUNMAP_IN pIn = (PSUPGIPUNMAP_IN)pvIn;
1316
1317 /*
1318 * Validate.
1319 */
1320 if ( cbIn != sizeof(*pIn)
1321 || cbOut != 0)
1322 {
1323 dprintf(("SUP_IOCTL_GIP_UNMAP: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1324 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1325 return SUPDRV_ERR_INVALID_PARAM;
1326 }
1327 if ( pIn->u32Cookie != pDevExt->u32Cookie
1328 || pIn->u32SessionCookie != pSession->u32Cookie)
1329 {
1330 dprintf(("SUP_IOCTL_GIP_UNMAP: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1331 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1332 return SUPDRV_ERR_INVALID_MAGIC;
1333 }
1334
1335 /*
1336 * Execute.
1337 */
1338 return SUPR0GipUnmap(pSession);
1339 }
1340
1341
1342 case SUP_IOCTL_SET_VM_FOR_FAST:
1343 {
1344 PSUPSETVMFORFAST_IN pIn = (PSUPSETVMFORFAST_IN)pvIn;
1345
1346 /*
1347 * Validate.
1348 */
1349 if ( cbIn != sizeof(*pIn)
1350 || cbOut != 0)
1351 {
1352 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n",
1353 (long)cbIn, (long)sizeof(*pIn), (long)cbOut, (long)0));
1354 return SUPDRV_ERR_INVALID_PARAM;
1355 }
1356 if ( pIn->u32Cookie != pDevExt->u32Cookie
1357 || pIn->u32SessionCookie != pSession->u32Cookie)
1358 {
1359 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1360 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1361 return SUPDRV_ERR_INVALID_MAGIC;
1362 }
1363 if ( pIn->pVMR0 != NULL
1364 && ( !VALID_PTR(pIn->pVMR0)
1365 || ((uintptr_t)pIn->pVMR0 & (PAGE_SIZE - 1))
1366 )
1367 )
1368 {
1369 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p! Must be a valid, page aligned, pointer.\n", pIn->pVMR0));
1370 return SUPDRV_ERR_INVALID_POINTER;
1371 }
1372
1373 /*
1374 * Execute.
1375 */
1376#ifndef VBOX_WITHOUT_IDT_PATCHING
1377 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: !VBOX_WITHOUT_IDT_PATCHING\n"));
1378 return SUPDRV_ERR_GENERAL_FAILURE;
1379#else
1380 pSession->pVM = pIn->pVMR0;
1381 return 0;
1382#endif
1383 }
1384
1385
1386 default:
1387 dprintf(("Unknown IOCTL %#x\n", uIOCtl));
1388 break;
1389 }
1390 return SUPDRV_ERR_GENERAL_FAILURE;
1391}
1392
1393
1394/**
1395 * Register a object for reference counting.
1396 * The object is registered with one reference in the specified session.
1397 *
1398 * @returns Unique identifier on success (pointer).
1399 * All future reference must use this identifier.
1400 * @returns NULL on failure.
1401 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1402 * @param pvUser1 The first user argument.
1403 * @param pvUser2 The second user argument.
1404 */
1405SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1406{
1407 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1408 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1409 PSUPDRVOBJ pObj;
1410 PSUPDRVUSAGE pUsage;
1411
1412 /*
1413 * Validate the input.
1414 */
1415 if (!pSession)
1416 {
1417 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1418 return NULL;
1419 }
1420 if ( enmType <= SUPDRVOBJTYPE_INVALID
1421 || enmType >= SUPDRVOBJTYPE_END)
1422 {
1423 AssertMsgFailed(("Invalid enmType=%d\n", enmType));
1424 return NULL;
1425 }
1426 if (!pfnDestructor)
1427 {
1428 AssertMsgFailed(("Invalid pfnDestructor=%d\n", pfnDestructor));
1429 return NULL;
1430 }
1431
1432 /*
1433 * Allocate and initialize the object.
1434 */
1435 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1436 if (!pObj)
1437 return NULL;
1438 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1439 pObj->enmType = enmType;
1440 pObj->pNext = NULL;
1441 pObj->cUsage = 1;
1442 pObj->pfnDestructor = pfnDestructor;
1443 pObj->pvUser1 = pvUser1;
1444 pObj->pvUser2 = pvUser2;
1445 pObj->CreatorUid = pSession->Uid;
1446 pObj->CreatorGid = pSession->Gid;
1447 pObj->CreatorProcess= pSession->Process;
1448 supdrvOSObjInitCreator(pObj, pSession);
1449
1450 /*
1451 * Allocate the usage record.
1452 * (We keep freed usage records around to simplity SUPR0ObjAddRef().)
1453 */
1454 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1455
1456 pUsage = pDevExt->pUsageFree;
1457 if (pUsage)
1458 pDevExt->pUsageFree = pUsage->pNext;
1459 else
1460 {
1461 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1462 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1463 if (!pUsage)
1464 {
1465 RTMemFree(pObj);
1466 return NULL;
1467 }
1468 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1469 }
1470
1471 /*
1472 * Insert the object and create the session usage record.
1473 */
1474 /* The object. */
1475 pObj->pNext = pDevExt->pObjs;
1476 pDevExt->pObjs = pObj;
1477
1478 /* The session record. */
1479 pUsage->cUsage = 1;
1480 pUsage->pObj = pObj;
1481 pUsage->pNext = pSession->pUsage;
1482 dprintf(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1483 pSession->pUsage = pUsage;
1484
1485 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1486
1487 dprintf(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1488 return pObj;
1489}
1490
1491
1492/**
1493 * Increment the reference counter for the object associating the reference
1494 * with the specified session.
1495 *
1496 * @returns 0 on success.
1497 * @returns SUPDRV_ERR_* on failure.
1498 * @param pvObj The identifier returned by SUPR0ObjRegister().
1499 * @param pSession The session which is referencing the object.
1500 */
1501SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1502{
1503 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1504 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1505 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1506 PSUPDRVUSAGE pUsagePre;
1507 PSUPDRVUSAGE pUsage;
1508
1509 /*
1510 * Validate the input.
1511 */
1512 if (!pSession)
1513 {
1514 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1515 return SUPDRV_ERR_INVALID_PARAM;
1516 }
1517 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1518 {
1519 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1520 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1521 return SUPDRV_ERR_INVALID_PARAM;
1522 }
1523
1524 /*
1525 * Preallocate the usage record.
1526 */
1527 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1528
1529 pUsagePre = pDevExt->pUsageFree;
1530 if (pUsagePre)
1531 pDevExt->pUsageFree = pUsagePre->pNext;
1532 else
1533 {
1534 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1535 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1536 if (!pUsagePre)
1537 return SUPDRV_ERR_NO_MEMORY;
1538 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1539 }
1540
1541 /*
1542 * Reference the object.
1543 */
1544 pObj->cUsage++;
1545
1546 /*
1547 * Look for the session record.
1548 */
1549 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1550 {
1551 dprintf(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1552 if (pUsage->pObj == pObj)
1553 break;
1554 }
1555 if (pUsage)
1556 pUsage->cUsage++;
1557 else
1558 {
1559 /* create a new session record. */
1560 pUsagePre->cUsage = 1;
1561 pUsagePre->pObj = pObj;
1562 pUsagePre->pNext = pSession->pUsage;
1563 pSession->pUsage = pUsagePre;
1564 dprintf(("SUPR0ObjRelease: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));
1565
1566 pUsagePre = NULL;
1567 }
1568
1569 /*
1570 * Put any unused usage record into the free list..
1571 */
1572 if (pUsagePre)
1573 {
1574 pUsagePre->pNext = pDevExt->pUsageFree;
1575 pDevExt->pUsageFree = pUsagePre;
1576 }
1577
1578 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1579
1580 return 0;
1581}
1582
1583
1584/**
1585 * Decrement / destroy a reference counter record for an object.
1586 *
1587 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1588 *
1589 * @returns 0 on success.
1590 * @returns SUPDRV_ERR_* on failure.
1591 * @param pvObj The identifier returned by SUPR0ObjRegister().
1592 * @param pSession The session which is referencing the object.
1593 */
1594SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1595{
1596 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1597 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1598 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1599 bool fDestroy = false;
1600 PSUPDRVUSAGE pUsage;
1601 PSUPDRVUSAGE pUsagePrev;
1602
1603 /*
1604 * Validate the input.
1605 */
1606 if (!pSession)
1607 {
1608 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1609 return SUPDRV_ERR_INVALID_PARAM;
1610 }
1611 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1612 {
1613 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1614 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1615 return SUPDRV_ERR_INVALID_PARAM;
1616 }
1617
1618 /*
1619 * Acquire the spinlock and look for the usage record.
1620 */
1621 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1622
1623 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1624 pUsage;
1625 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1626 {
1627 dprintf(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1628 if (pUsage->pObj == pObj)
1629 {
1630 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1631 if (pUsage->cUsage > 1)
1632 {
1633 pObj->cUsage--;
1634 pUsage->cUsage--;
1635 }
1636 else
1637 {
1638 /*
1639 * Free the session record.
1640 */
1641 if (pUsagePrev)
1642 pUsagePrev->pNext = pUsage->pNext;
1643 else
1644 pSession->pUsage = pUsage->pNext;
1645 pUsage->pNext = pDevExt->pUsageFree;
1646 pDevExt->pUsageFree = pUsage;
1647
1648 /* What about the object? */
1649 if (pObj->cUsage > 1)
1650 pObj->cUsage--;
1651 else
1652 {
1653 /*
1654 * Object is to be destroyed, unlink it.
1655 */
1656 fDestroy = true;
1657 if (pDevExt->pObjs == pObj)
1658 pDevExt->pObjs = pObj->pNext;
1659 else
1660 {
1661 PSUPDRVOBJ pObjPrev;
1662 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1663 if (pObjPrev->pNext == pObj)
1664 {
1665 pObjPrev->pNext = pObj->pNext;
1666 break;
1667 }
1668 Assert(pObjPrev);
1669 }
1670 }
1671 }
1672 break;
1673 }
1674 }
1675
1676 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1677
1678 /*
1679 * Call the destructor and free the object if required.
1680 */
1681 if (fDestroy)
1682 {
1683 pObj->u32Magic++;
1684 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1685 RTMemFree(pObj);
1686 }
1687
1688 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
1689 return pUsage ? 0 : SUPDRV_ERR_INVALID_PARAM;
1690}
1691
1692/**
1693 * Verifies that the current process can access the specified object.
1694 *
1695 * @returns 0 if access is granted.
1696 * @returns SUPDRV_ERR_PERMISSION_DENIED if denied access.
1697 * @returns SUPDRV_ERR_INVALID_PARAM if invalid parameter.
1698 *
1699 * @param pvObj The identifier returned by SUPR0ObjRegister().
1700 * @param pSession The session which wishes to access the object.
1701 * @param pszObjName Object string name. This is optional and depends on the object type.
1702 *
1703 * @remark The caller is responsible for making sure the object isn't removed while
1704 * we're inside this function. If uncertain about this, just call AddRef before calling us.
1705 */
1706SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
1707{
1708 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1709 int rc = SUPDRV_ERR_GENERAL_FAILURE;
1710
1711 /*
1712 * Validate the input.
1713 */
1714 if (!pSession)
1715 {
1716 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1717 return SUPDRV_ERR_INVALID_PARAM;
1718 }
1719 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1720 {
1721 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1722 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1723 return SUPDRV_ERR_INVALID_PARAM;
1724 }
1725
1726 /*
1727 * Check access. (returns true if a decision has been made.)
1728 */
1729 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
1730 return rc;
1731
1732 /*
1733 * Default policy is to allow the user to access his own
1734 * stuff but nothing else.
1735 */
1736 if (pObj->CreatorUid == pSession->Uid)
1737 return 0;
1738 return SUPDRV_ERR_PERMISSION_DENIED;
1739}
1740
1741
1742/**
1743 * Lock pages.
1744 *
1745 * @param pSession Session to which the locked memory should be associated.
1746 * @param pvR3 Start of the memory range to lock.
1747 * This must be page aligned.
1748 * @param cb Size of the memory range to lock.
1749 * This must be page aligned.
1750 */
1751SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PSUPPAGE paPages)
1752{
1753 int rc;
1754 SUPDRVMEMREF Mem = {0};
1755 const size_t cb = (size_t)cPages << PAGE_SHIFT;
1756 dprintf(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n",
1757 pSession, (void *)pvR3, cPages, paPages));
1758
1759 /*
1760 * Verify input.
1761 */
1762 if (RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3 || !pvR3)
1763 {
1764 dprintf(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
1765 return SUPDRV_ERR_INVALID_PARAM;
1766 }
1767 if (!paPages)
1768 {
1769 dprintf(("paPages is NULL!\n"));
1770 return SUPDRV_ERR_INVALID_PARAM;
1771 }
1772
1773#ifdef USE_NEW_OS_INTERFACE
1774 /*
1775 * Let IPRT do the job.
1776 */
1777 Mem.eType = MEMREF_TYPE_LOCKED;
1778 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
1779 if (RT_SUCCESS(rc))
1780 {
1781 AssertMsg(RTR0MemObjAddress(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddress(Mem.MemObj), pvR3));
1782 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
1783
1784 unsigned iPage = cPages;
1785 while (iPage-- > 0)
1786 {
1787 paPages[iPage].uReserved = 0;
1788 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
1789 if (RT_UNLIKELY(paPages[iPage].Phys == NIL_RTCCPHYS))
1790 {
1791 AssertMsgFailed(("iPage=%d\n", iPage));
1792 rc = VERR_INTERNAL_ERROR;
1793 break;
1794 }
1795 }
1796 if (RT_SUCCESS(rc))
1797 rc = supdrvMemAdd(&Mem, pSession);
1798 if (RT_FAILURE(rc))
1799 {
1800 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
1801 AssertRC(rc2);
1802 }
1803 }
1804
1805#else /* !USE_NEW_OS_INTERFACE */
1806
1807 /*
1808 * Let the OS specific code have a go.
1809 */
1810 Mem.pvR0 = NULL;
1811 Mem.pvR3 = pvR3;
1812 Mem.eType = MEMREF_TYPE_LOCKED;
1813 Mem.cb = cb;
1814 rc = supdrvOSLockMemOne(&Mem, paPages);
1815 if (rc)
1816 return rc;
1817
1818 /*
1819 * Everything when fine, add the memory reference to the session.
1820 */
1821 rc = supdrvMemAdd(&Mem, pSession);
1822 if (rc)
1823 supdrvOSUnlockMemOne(&Mem);
1824#endif /* !USE_NEW_OS_INTERFACE */
1825 return rc;
1826}
1827
1828
1829/**
1830 * Unlocks the memory pointed to by pv.
1831 *
1832 * @returns 0 on success.
1833 * @returns SUPDRV_ERR_* on failure
1834 * @param pSession Session to which the memory was locked.
1835 * @param pvR3 Memory to unlock.
1836 */
1837SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
1838{
1839 dprintf(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
1840 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
1841}
1842
1843
1844/**
1845 * Allocates a chunk of page aligned memory with contiguous and fixed physical
1846 * backing.
1847 *
1848 * @returns 0 on success.
1849 * @returns SUPDRV_ERR_* on failure.
1850 * @param pSession Session data.
1851 * @param cb Number of bytes to allocate.
1852 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
1853 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
1854 * @param pHCPhys Where to put the physical address of allocated memory.
1855 */
1856SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
1857{
1858 int rc;
1859 SUPDRVMEMREF Mem = {0};
1860 dprintf(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
1861
1862 /*
1863 * Validate input.
1864 */
1865 if (!pSession || !ppvR3 || !ppvR0 || !pHCPhys)
1866 {
1867 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
1868 pSession, ppvR0, ppvR3, pHCPhys));
1869 return SUPDRV_ERR_INVALID_PARAM;
1870
1871 }
1872 if (cPages == 0 || cPages >= 256)
1873 {
1874 dprintf(("Illegal request cPages=%d, must be greater than 0 and smaller than 256\n", cPages));
1875 return SUPDRV_ERR_INVALID_PARAM;
1876 }
1877
1878#ifdef USE_NEW_OS_INTERFACE
1879 /*
1880 * Let IPRT do the job.
1881 */
1882 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
1883 if (RT_SUCCESS(rc))
1884 {
1885 int rc2;
1886 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void *)-1, 0,
1887 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1888 if (RT_SUCCESS(rc))
1889 {
1890 Mem.eType = MEMREF_TYPE_CONT;
1891 rc = supdrvMemAdd(&Mem, pSession);
1892 if (!rc)
1893 {
1894 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
1895 *ppvR3 = (RTR3PTR)RTR0MemObjAddress(Mem.MapObjR3);
1896 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
1897 return 0;
1898 }
1899
1900 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1901 AssertRC(rc2);
1902 }
1903 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1904 AssertRC(rc2);
1905 }
1906
1907#else /* !USE_NEW_OS_INTERFACE */
1908
1909 /*
1910 * Let the OS specific code have a go.
1911 */
1912 Mem.pvR0 = NULL;
1913 Mem.pvR3 = NIL_RTR3PTR;
1914 Mem.eType = MEMREF_TYPE_CONT;
1915 Mem.cb = cPages << PAGE_SHIFT;
1916 rc = supdrvOSContAllocOne(&Mem, ppvR0, ppvR3, pHCPhys);
1917 if (rc)
1918 return rc;
1919 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)) || !(*pHCPhys & (PAGE_SIZE - 1)),
1920 ("Memory is not page aligned! *ppvR0=%p *ppvR3=%p phys=%VHp\n", ppvR0 ? *ppvR0 : NULL, *ppvR3, *pHCPhys));
1921
1922 /*
1923 * Everything when fine, add the memory reference to the session.
1924 */
1925 rc = supdrvMemAdd(&Mem, pSession);
1926 if (rc)
1927 supdrvOSContFreeOne(&Mem);
1928#endif /* !USE_NEW_OS_INTERFACE */
1929
1930 return rc;
1931}
1932
1933
1934/**
1935 * Frees memory allocated using SUPR0ContAlloc().
1936 *
1937 * @returns 0 on success.
1938 * @returns SUPDRV_ERR_* on failure.
1939 * @param pSession The session to which the memory was allocated.
1940 * @param uPtr Pointer to the memory (ring-3 or ring-0).
1941 */
1942SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
1943{
1944 dprintf(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
1945 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
1946}
1947
1948
1949/**
1950 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
1951 *
1952 * @returns 0 on success.
1953 * @returns SUPDRV_ERR_* on failure.
1954 * @param pSession Session data.
1955 * @param cPages Number of pages to allocate.
1956 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
1957 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
1958 * @param paPages Where to put the physical addresses of allocated memory.
1959 */
1960SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PSUPPAGE paPages)
1961{
1962 unsigned iPage;
1963 int rc;
1964 SUPDRVMEMREF Mem = {0};
1965 dprintf(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
1966
1967 /*
1968 * Validate input.
1969 */
1970 if (!pSession || !ppvR3 || !ppvR0 || !paPages)
1971 {
1972 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
1973 pSession, ppvR3, ppvR0, paPages));
1974 return SUPDRV_ERR_INVALID_PARAM;
1975
1976 }
1977 if (cPages < 1 || cPages > 256)
1978 {
1979 dprintf(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
1980 return SUPDRV_ERR_INVALID_PARAM;
1981 }
1982
1983#ifdef USE_NEW_OS_INTERFACE
1984 /*
1985 * Let IPRT do the work.
1986 */
1987 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
1988 if (RT_SUCCESS(rc))
1989 {
1990 int rc2;
1991 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void *)-1, 0,
1992 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
1993 if (RT_SUCCESS(rc))
1994 {
1995 Mem.eType = MEMREF_TYPE_LOW;
1996 rc = supdrvMemAdd(&Mem, pSession);
1997 if (!rc)
1998 {
1999 for (iPage = 0; iPage < cPages; iPage++)
2000 {
2001 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2002 paPages[iPage].uReserved = 0;
2003 AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
2004 }
2005 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2006 *ppvR3 = RTR0MemObjAddress(Mem.MapObjR3);
2007 return 0;
2008 }
2009
2010 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2011 AssertRC(rc2);
2012 }
2013
2014 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2015 AssertRC(rc2);
2016 }
2017
2018#else /* !USE_NEW_OS_INTERFACE */
2019
2020 /*
2021 * Let the OS specific code have a go.
2022 */
2023 Mem.pvR0 = NULL;
2024 Mem.pvR3 = NIL_RTR3PTR;
2025 Mem.eType = MEMREF_TYPE_LOW;
2026 Mem.cb = cPages << PAGE_SHIFT;
2027 rc = supdrvOSLowAllocOne(&Mem, ppvR0, ppvR3, paPages);
2028 if (rc)
2029 return rc;
2030 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR3));
2031 AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR0));
2032 for (iPage = 0; iPage < cPages; iPage++)
2033 AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
2034
2035 /*
2036 * Everything when fine, add the memory reference to the session.
2037 */
2038 rc = supdrvMemAdd(&Mem, pSession);
2039 if (rc)
2040 supdrvOSLowFreeOne(&Mem);
2041#endif /* !USE_NEW_OS_INTERFACE */
2042 return rc;
2043}
2044
2045
2046/**
2047 * Frees memory allocated using SUPR0LowAlloc().
2048 *
2049 * @returns 0 on success.
2050 * @returns SUPDRV_ERR_* on failure.
2051 * @param pSession The session to which the memory was allocated.
2052 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2053 */
2054SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2055{
2056 dprintf(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2057 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2058}
2059
2060
2061/**
2062 * Allocates a chunk of memory with both R0 and R3 mappings.
2063 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2064 *
2065 * @returns 0 on success.
2066 * @returns SUPDRV_ERR_* on failure.
2067 * @param pSession The session to associated the allocation with.
2068 * @param cb Number of bytes to allocate.
2069 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2070 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2071 */
2072SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2073{
2074 int rc;
2075 SUPDRVMEMREF Mem = {0};
2076 dprintf(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2077
2078 /*
2079 * Validate input.
2080 */
2081 if (!pSession || !ppvR0 || !ppvR3)
2082 {
2083 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p\n",
2084 pSession, ppvR0, ppvR3));
2085 return SUPDRV_ERR_INVALID_PARAM;
2086
2087 }
2088 if (cb < 1 || cb >= PAGE_SIZE * 256)
2089 {
2090 dprintf(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2091 return SUPDRV_ERR_INVALID_PARAM;
2092 }
2093
2094#ifdef USE_NEW_OS_INTERFACE
2095 /*
2096 * Let IPRT do the work.
2097 */
2098 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2099 if (RT_SUCCESS(rc))
2100 {
2101 int rc2;
2102 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void*)-1, 0,
2103 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2104 if (RT_SUCCESS(rc))
2105 {
2106 Mem.eType = MEMREF_TYPE_MEM;
2107 rc = supdrvMemAdd(&Mem, pSession);
2108 if (!rc)
2109 {
2110 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2111 *ppvR3 = (RTR3PTR)RTR0MemObjAddress(Mem.MapObjR3);
2112 return 0;
2113 }
2114 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2115 AssertRC(rc2);
2116 }
2117
2118 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2119 AssertRC(rc2);
2120 }
2121
2122#else /* !USE_NEW_OS_INTERFACE */
2123
2124 /*
2125 * Let the OS specific code have a go.
2126 */
2127 Mem.pvR0 = NULL;
2128 Mem.pvR3 = NIL_RTR3PTR;
2129 Mem.eType = MEMREF_TYPE_MEM;
2130 Mem.cb = cb;
2131 rc = supdrvOSMemAllocOne(&Mem, ppvR0, ppvR3);
2132 if (rc)
2133 return rc;
2134 AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR0=%p\n", *ppvR0));
2135 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR3=%p\n", *ppvR3));
2136
2137 /*
2138 * Everything when fine, add the memory reference to the session.
2139 */
2140 rc = supdrvMemAdd(&Mem, pSession);
2141 if (rc)
2142 supdrvOSMemFreeOne(&Mem);
2143#endif /* !USE_NEW_OS_INTERFACE */
2144 return rc;
2145}
2146
2147
2148/**
2149 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2150 *
2151 * @returns 0 on success.
2152 * @returns SUPDRV_ERR_* on failure.
2153 * @param pSession The session to which the memory was allocated.
2154 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2155 * @param paPages Where to store the physical addresses.
2156 */
2157SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages)
2158{
2159 PSUPDRVBUNDLE pBundle;
2160 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2161 dprintf(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2162
2163 /*
2164 * Validate input.
2165 */
2166 if (!pSession)
2167 {
2168 dprintf(("pSession must not be NULL!"));
2169 return SUPDRV_ERR_INVALID_PARAM;
2170 }
2171 if (!uPtr || !paPages)
2172 {
2173 dprintf(("Illegal address uPtr=%p or/and paPages=%p\n", (void *)uPtr, paPages));
2174 return SUPDRV_ERR_INVALID_PARAM;
2175 }
2176
2177 /*
2178 * Search for the address.
2179 */
2180 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2181 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2182 {
2183 if (pBundle->cUsed > 0)
2184 {
2185 unsigned i;
2186 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2187 {
2188#ifdef USE_NEW_OS_INTERFACE
2189 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2190 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2191 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2192 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2193 && (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MapObjR3) == uPtr)
2194 )
2195 )
2196 {
2197 const unsigned cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2198 unsigned iPage;
2199 for (iPage = 0; iPage < cPages; iPage++)
2200 {
2201 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2202 paPages[iPage].uReserved = 0;
2203 }
2204 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2205 return 0;
2206 }
2207#else /* !USE_NEW_OS_INTERFACE */
2208 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2209 && ( (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
2210 || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
2211 {
2212 supdrvOSMemGetPages(&pBundle->aMem[i], paPages);
2213 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2214 return 0;
2215 }
2216#endif
2217 }
2218 }
2219 }
2220 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2221 dprintf(("Failed to find %p!!!\n", (void *)uPtr));
2222 return SUPDRV_ERR_INVALID_PARAM;
2223}
2224
2225
2226/**
2227 * Free memory allocated by SUPR0MemAlloc().
2228 *
2229 * @returns 0 on success.
2230 * @returns SUPDRV_ERR_* on failure.
2231 * @param pSession The session owning the allocation.
2232 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2233 */
2234SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2235{
2236 dprintf(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2237 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2238}
2239
2240
2241/**
2242 * Maps the GIP into userspace and/or get the physical address of the GIP.
2243 *
2244 * @returns 0 on success.
2245 * @returns SUPDRV_ERR_* on failure.
2246 * @param pSession Session to which the GIP mapping should belong.
2247 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2248 * @param pHCPhysGip Where to store the physical address. (optional)
2249 *
2250 * @remark There is no reference counting on the mapping, so one call to this function
2251 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2252 * and remove the session as a GIP user.
2253 */
2254SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGid)
2255{
2256 int rc = 0;
2257 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2258 RTR3PTR pGip = NIL_RTR3PTR;
2259 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2260 dprintf(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGid=%p\n", pSession, ppGipR3, pHCPhysGid));
2261
2262 /*
2263 * Validate
2264 */
2265 if (!ppGipR3 && !pHCPhysGid)
2266 return 0;
2267
2268 RTSemFastMutexRequest(pDevExt->mtxGip);
2269 if (pDevExt->pGip)
2270 {
2271 /*
2272 * Map it?
2273 */
2274 if (ppGipR3)
2275 {
2276#ifdef USE_NEW_OS_INTERFACE
2277 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2278 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (void*)-1, 0,
2279 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2280 if (RT_SUCCESS(rc))
2281 {
2282 pGip = (RTR3PTR)RTR0MemObjAddress(pSession->GipMapObjR3);
2283 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2284 }
2285#else /* !USE_NEW_OS_INTERFACE */
2286 if (!pSession->pGip)
2287 rc = supdrvOSGipMap(pSession->pDevExt, &pSession->pGip);
2288 if (!rc)
2289 pGip = (RTR3PTR)pSession->pGip;
2290#endif /* !USE_NEW_OS_INTERFACE */
2291 }
2292
2293 /*
2294 * Get physical address.
2295 */
2296 if (pHCPhysGid && !rc)
2297 HCPhys = pDevExt->HCPhysGip;
2298
2299 /*
2300 * Reference globally.
2301 */
2302 if (!pSession->fGipReferenced && !rc)
2303 {
2304 pSession->fGipReferenced = 1;
2305 pDevExt->cGipUsers++;
2306 if (pDevExt->cGipUsers == 1)
2307 {
2308 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2309 unsigned i;
2310
2311 dprintf(("SUPR0GipMap: Resumes GIP updating\n"));
2312
2313 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2314 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2315 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2316
2317#ifdef USE_NEW_OS_INTERFACE
2318 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2319 AssertRC(rc); rc = 0;
2320#else
2321 supdrvOSGipResume(pDevExt);
2322#endif
2323 }
2324 }
2325 }
2326 else
2327 {
2328 rc = SUPDRV_ERR_GENERAL_FAILURE;
2329 dprintf(("SUPR0GipMap: GIP is not available!\n"));
2330 }
2331 RTSemFastMutexRelease(pDevExt->mtxGip);
2332
2333 /*
2334 * Write returns.
2335 */
2336 if (pHCPhysGid)
2337 *pHCPhysGid = HCPhys;
2338 if (ppGipR3)
2339 *ppGipR3 = pGip;
2340
2341#ifdef DEBUG_DARWIN_GIP
2342 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGid=%lx *ppGip=%p GipMapObjR3\n", rc, (unsigned long)HCPhys, pGip, pSession->GipMapObjR3));
2343#else
2344 dprintf(("SUPR0GipMap: returns %d *pHCPhysGid=%lx *ppGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)(uintptr_t)pGip));
2345#endif
2346 return rc;
2347}
2348
2349
2350/**
2351 * Unmaps any user mapping of the GIP and terminates all GIP access
2352 * from this session.
2353 *
2354 * @returns 0 on success.
2355 * @returns SUPDRV_ERR_* on failure.
2356 * @param pSession Session to which the GIP mapping should belong.
2357 */
2358SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2359{
2360 int rc = 0;
2361 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2362#ifdef DEBUG_DARWIN_GIP
2363 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2364 pSession,
2365 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2366 pSession->GipMapObjR3));
2367#else
2368 dprintf(("SUPR0GipUnmap: pSession=%p\n", pSession));
2369#endif
2370
2371 RTSemFastMutexRequest(pDevExt->mtxGip);
2372
2373 /*
2374 * Unmap anything?
2375 */
2376#ifdef USE_NEW_OS_INTERFACE
2377 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2378 {
2379 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2380 AssertRC(rc);
2381 if (RT_SUCCESS(rc))
2382 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2383 }
2384#else
2385 if (pSession->pGip)
2386 {
2387 rc = supdrvOSGipUnmap(pDevExt, pSession->pGip);
2388 if (!rc)
2389 pSession->pGip = NULL;
2390 }
2391#endif
2392
2393 /*
2394 * Dereference global GIP.
2395 */
2396 if (pSession->fGipReferenced && !rc)
2397 {
2398 pSession->fGipReferenced = 0;
2399 if ( pDevExt->cGipUsers > 0
2400 && !--pDevExt->cGipUsers)
2401 {
2402 dprintf(("SUPR0GipUnmap: Suspends GIP updating\n"));
2403#ifdef USE_NEW_OS_INTERFACE
2404 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = 0;
2405#else
2406 supdrvOSGipSuspend(pDevExt);
2407#endif
2408 }
2409 }
2410
2411 RTSemFastMutexRelease(pDevExt->mtxGip);
2412
2413 return rc;
2414}
2415
2416
2417/**
2418 * Adds a memory object to the session.
2419 *
2420 * @returns 0 on success.
2421 * @returns SUPDRV_ERR_* on failure.
2422 * @param pMem Memory tracking structure containing the
2423 * information to track.
2424 * @param pSession The session.
2425 */
2426static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
2427{
2428 PSUPDRVBUNDLE pBundle;
2429 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2430
2431 /*
2432 * Find free entry and record the allocation.
2433 */
2434 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2435 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2436 {
2437 if (pBundle->cUsed < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]))
2438 {
2439 unsigned i;
2440 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2441 {
2442#ifdef USE_NEW_OS_INTERFACE
2443 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
2444#else /* !USE_NEW_OS_INTERFACE */
2445 if ( !pBundle->aMem[i].pvR0
2446 && !pBundle->aMem[i].pvR3)
2447#endif /* !USE_NEW_OS_INTERFACE */
2448 {
2449 pBundle->cUsed++;
2450 pBundle->aMem[i] = *pMem;
2451 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2452 return 0;
2453 }
2454 }
2455 AssertFailed(); /* !!this can't be happening!!! */
2456 }
2457 }
2458 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2459
2460 /*
2461 * Need to allocate a new bundle.
2462 * Insert into the last entry in the bundle.
2463 */
2464 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
2465 if (!pBundle)
2466 return SUPDRV_ERR_NO_MEMORY;
2467
2468 /* take last entry. */
2469 pBundle->cUsed++;
2470 pBundle->aMem[sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]) - 1] = *pMem;
2471
2472 /* insert into list. */
2473 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2474 pBundle->pNext = pSession->Bundle.pNext;
2475 pSession->Bundle.pNext = pBundle;
2476 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2477
2478 return 0;
2479}
2480
2481
2482/**
2483 * Releases a memory object referenced by pointer and type.
2484 *
2485 * @returns 0 on success.
2486 * @returns SUPDRV_ERR_INVALID_PARAM on failure.
2487 * @param pSession Session data.
2488 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
2489 * @param eType Memory type.
2490 */
2491static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
2492{
2493 PSUPDRVBUNDLE pBundle;
2494 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2495
2496 /*
2497 * Validate input.
2498 */
2499 if (!pSession)
2500 {
2501 dprintf(("pSession must not be NULL!"));
2502 return SUPDRV_ERR_INVALID_PARAM;
2503 }
2504 if (!uPtr)
2505 {
2506 dprintf(("Illegal address %p\n", (void *)uPtr));
2507 return SUPDRV_ERR_INVALID_PARAM;
2508 }
2509
2510 /*
2511 * Search for the address.
2512 */
2513 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2514 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2515 {
2516 if (pBundle->cUsed > 0)
2517 {
2518 unsigned i;
2519 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2520 {
2521#ifdef USE_NEW_OS_INTERFACE
2522 if ( pBundle->aMem[i].eType == eType
2523 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2524 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2525 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2526 && (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MapObjR3) == uPtr))
2527 )
2528 {
2529 /* Make a copy of it and release it outside the spinlock. */
2530 SUPDRVMEMREF Mem = pBundle->aMem[i];
2531 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2532 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
2533 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
2534 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2535
2536 if (Mem.MapObjR3)
2537 {
2538 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
2539 AssertRC(rc); /** @todo figure out how to handle this. */
2540 }
2541 if (Mem.MemObj)
2542 {
2543 int rc = RTR0MemObjFree(Mem.MemObj, false);
2544 AssertRC(rc); /** @todo figure out how to handle this. */
2545 }
2546 return 0;
2547 }
2548#else /* !USE_NEW_OS_INTERFACE */
2549 if ( pBundle->aMem[i].eType == eType
2550 && ( (RTHCUINTPTR)pBundle->aMem[i].pvR0 == uPtr
2551 || (RTHCUINTPTR)pBundle->aMem[i].pvR3 == uPtr))
2552 {
2553 /* Make a copy of it and release it outside the spinlock. */
2554 SUPDRVMEMREF Mem = pBundle->aMem[i];
2555 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2556 pBundle->aMem[i].pvR0 = NULL;
2557 pBundle->aMem[i].pvR3 = NIL_RTR3PTR;
2558 pBundle->aMem[i].cb = 0;
2559 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2560
2561 /* Type specific free operation. */
2562 switch (Mem.eType)
2563 {
2564 case MEMREF_TYPE_LOCKED:
2565 supdrvOSUnlockMemOne(&Mem);
2566 break;
2567 case MEMREF_TYPE_CONT:
2568 supdrvOSContFreeOne(&Mem);
2569 break;
2570 case MEMREF_TYPE_LOW:
2571 supdrvOSLowFreeOne(&Mem);
2572 break;
2573 case MEMREF_TYPE_MEM:
2574 supdrvOSMemFreeOne(&Mem);
2575 break;
2576 default:
2577 case MEMREF_TYPE_UNUSED:
2578 break;
2579 }
2580 return 0;
2581 }
2582#endif /* !USE_NEW_OS_INTERFACE */
2583 }
2584 }
2585 }
2586 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2587 dprintf(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
2588 return SUPDRV_ERR_INVALID_PARAM;
2589}
2590
2591
2592#ifndef VBOX_WITHOUT_IDT_PATCHING
2593/**
2594 * Install IDT for the current CPU.
2595 *
2596 * @returns 0 on success.
2597 * @returns SUPDRV_ERR_NO_MEMORY or SUPDRV_ERROR_IDT_FAILED on failure.
2598 * @param pIn Input data.
2599 * @param pOut Output data.
2600 */
2601static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL_IN pIn, PSUPIDTINSTALL_OUT pOut)
2602{
2603 PSUPDRVPATCHUSAGE pUsagePre;
2604 PSUPDRVPATCH pPatchPre;
2605 RTIDTR Idtr;
2606 PSUPDRVPATCH pPatch;
2607 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2608 dprintf(("supdrvIOCtl_IdtInstall\n"));
2609
2610 /*
2611 * Preallocate entry for this CPU cause we don't wanna do
2612 * that inside the spinlock!
2613 */
2614 pUsagePre = (PSUPDRVPATCHUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2615 if (!pUsagePre)
2616 return SUPDRV_ERR_NO_MEMORY;
2617
2618 /*
2619 * Take the spinlock and see what we need to do.
2620 */
2621 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2622
2623 /* check if we already got a free patch. */
2624 if (!pDevExt->pIdtPatchesFree)
2625 {
2626 /*
2627 * Allocate a patch - outside the spinlock of course.
2628 */
2629 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2630
2631 pPatchPre = (PSUPDRVPATCH)RTMemExecAlloc(sizeof(*pPatchPre));
2632 if (!pPatchPre)
2633 return SUPDRV_ERR_NO_MEMORY;
2634
2635 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2636 }
2637 else
2638 {
2639 pPatchPre = pDevExt->pIdtPatchesFree;
2640 pDevExt->pIdtPatchesFree = pPatchPre->pNext;
2641 }
2642
2643 /* look for matching patch entry */
2644 ASMGetIDTR(&Idtr);
2645 pPatch = pDevExt->pIdtPatches;
2646 while (pPatch && pPatch->pvIdt != (void *)Idtr.pIdt)
2647 pPatch = pPatch->pNext;
2648
2649 if (!pPatch)
2650 {
2651 /*
2652 * Create patch.
2653 */
2654 pPatch = supdrvIdtPatchOne(pDevExt, pPatchPre);
2655 if (pPatch)
2656 pPatchPre = NULL; /* mark as used. */
2657 }
2658 else
2659 {
2660 /*
2661 * Simply increment patch usage.
2662 */
2663 pPatch->cUsage++;
2664 }
2665
2666 if (pPatch)
2667 {
2668 /*
2669 * Increment and add if need be the session usage record for this patch.
2670 */
2671 PSUPDRVPATCHUSAGE pUsage = pSession->pPatchUsage;
2672 while (pUsage && pUsage->pPatch != pPatch)
2673 pUsage = pUsage->pNext;
2674
2675 if (!pUsage)
2676 {
2677 /*
2678 * Add usage record.
2679 */
2680 pUsagePre->cUsage = 1;
2681 pUsagePre->pPatch = pPatch;
2682 pUsagePre->pNext = pSession->pPatchUsage;
2683 pSession->pPatchUsage = pUsagePre;
2684 pUsagePre = NULL; /* mark as used. */
2685 }
2686 else
2687 {
2688 /*
2689 * Increment usage count.
2690 */
2691 pUsage->cUsage++;
2692 }
2693 }
2694
2695 /* free patch - we accumulate them for paranoid saftly reasons. */
2696 if (pPatchPre)
2697 {
2698 pPatchPre->pNext = pDevExt->pIdtPatchesFree;
2699 pDevExt->pIdtPatchesFree = pPatchPre;
2700 }
2701
2702 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2703
2704 /*
2705 * Free unused preallocated buffers.
2706 */
2707 if (pUsagePre)
2708 RTMemFree(pUsagePre);
2709
2710 pOut->u8Idt = pDevExt->u8Idt;
2711
2712 return pPatch ? 0 : SUPDRV_ERR_IDT_FAILED;
2713}
2714
2715
2716/**
2717 * This creates a IDT patch entry.
2718 * If the first patch being installed it'll also determin the IDT entry
2719 * to use.
2720 *
2721 * @returns pPatch on success.
2722 * @returns NULL on failure.
2723 * @param pDevExt Pointer to globals.
2724 * @param pPatch Patch entry to use.
2725 * This will be linked into SUPDRVDEVEXT::pIdtPatches on
2726 * successful return.
2727 * @remark Call must be owning the SUPDRVDEVEXT::Spinlock!
2728 */
2729static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
2730{
2731 RTIDTR Idtr;
2732 PSUPDRVIDTE paIdt;
2733 dprintf(("supdrvIOCtl_IdtPatchOne: pPatch=%p\n", pPatch));
2734
2735 /*
2736 * Get IDT.
2737 */
2738 ASMGetIDTR(&Idtr);
2739 paIdt = (PSUPDRVIDTE)Idtr.pIdt;
2740 /*
2741 * Recent Linux kernels can be configured to 1G user /3G kernel.
2742 */
2743 if ((uintptr_t)paIdt < 0x40000000)
2744 {
2745 AssertMsgFailed(("bad paIdt=%p\n", paIdt));
2746 return NULL;
2747 }
2748
2749 if (!pDevExt->u8Idt)
2750 {
2751 /*
2752 * Test out the alternatives.
2753 *
2754 * At the moment we do not support chaining thus we ASSUME that one of
2755 * these 48 entries is unused (which is not a problem on Win32 and
2756 * Linux to my knowledge).
2757 */
2758 /** @todo we MUST change this detection to try grab an entry which is NOT in use. This can be
2759 * combined with gathering info about which guest system call gates we can hook up directly. */
2760 unsigned i;
2761 uint8_t u8Idt = 0;
2762 static uint8_t au8Ints[] =
2763 {
2764#ifdef __WIN__ /* We don't use 0xef and above because they are system stuff on linux (ef is IPI,
2765 * local apic timer, or some other frequently fireing thing). */
2766 0xef, 0xee, 0xed, 0xec,
2767#endif
2768 0xeb, 0xea, 0xe9, 0xe8,
2769 0xdf, 0xde, 0xdd, 0xdc,
2770 0x7b, 0x7a, 0x79, 0x78,
2771 0xbf, 0xbe, 0xbd, 0xbc,
2772 };
2773#if defined(__AMD64__) && defined(DEBUG)
2774 static int s_iWobble = 0;
2775 unsigned iMax = !(s_iWobble++ % 2) ? 0x80 : 0x100;
2776 dprintf(("IDT: Idtr=%p:%#x\n", (void *)Idtr.pIdt, (unsigned)Idtr.cbIdt));
2777 for (i = iMax - 0x80; i*16+15 < Idtr.cbIdt && i < iMax; i++)
2778 {
2779 dprintf(("%#x: %04x:%08x%04x%04x P=%d DPL=%d IST=%d Type1=%#x u32Reserved=%#x u5Reserved=%#x\n",
2780 i, paIdt[i].u16SegSel, paIdt[i].u32OffsetTop, paIdt[i].u16OffsetHigh, paIdt[i].u16OffsetLow,
2781 paIdt[i].u1Present, paIdt[i].u2DPL, paIdt[i].u3IST, paIdt[i].u5Type2,
2782 paIdt[i].u32Reserved, paIdt[i].u5Reserved));
2783 }
2784#endif
2785 /* look for entries which are not present or otherwise unused. */
2786 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2787 {
2788 u8Idt = au8Ints[i];
2789 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2790 && ( !paIdt[u8Idt].u1Present
2791 || paIdt[u8Idt].u5Type2 == 0))
2792 break;
2793 u8Idt = 0;
2794 }
2795 if (!u8Idt)
2796 {
2797 /* try again, look for a compatible entry .*/
2798 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2799 {
2800 u8Idt = au8Ints[i];
2801 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2802 && paIdt[u8Idt].u1Present
2803 && paIdt[u8Idt].u5Type2 == SUPDRV_IDTE_TYPE2_INTERRUPT_GATE
2804 && !(paIdt[u8Idt].u16SegSel & 3))
2805 break;
2806 u8Idt = 0;
2807 }
2808 if (!u8Idt)
2809 {
2810 dprintf(("Failed to find appropirate IDT entry!!\n"));
2811 return NULL;
2812 }
2813 }
2814 pDevExt->u8Idt = u8Idt;
2815 dprintf(("supdrvIOCtl_IdtPatchOne: u8Idt=%x\n", u8Idt));
2816 }
2817
2818 /*
2819 * Prepare the patch
2820 */
2821 memset(pPatch, 0, sizeof(*pPatch));
2822 pPatch->pvIdt = paIdt;
2823 pPatch->cUsage = 1;
2824 pPatch->pIdtEntry = &paIdt[pDevExt->u8Idt];
2825 pPatch->SavedIdt = paIdt[pDevExt->u8Idt];
2826 pPatch->ChangedIdt.u16OffsetLow = (uint32_t)((uintptr_t)&pPatch->auCode[0] & 0xffff);
2827 pPatch->ChangedIdt.u16OffsetHigh = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 16);
2828#ifdef __AMD64__
2829 pPatch->ChangedIdt.u32OffsetTop = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 32);
2830#endif
2831 pPatch->ChangedIdt.u16SegSel = ASMGetCS();
2832#ifdef __AMD64__
2833 pPatch->ChangedIdt.u3IST = 0;
2834 pPatch->ChangedIdt.u5Reserved = 0;
2835#else /* x86 */
2836 pPatch->ChangedIdt.u5Reserved = 0;
2837 pPatch->ChangedIdt.u3Type1 = 0;
2838#endif /* x86 */
2839 pPatch->ChangedIdt.u5Type2 = SUPDRV_IDTE_TYPE2_INTERRUPT_GATE;
2840 pPatch->ChangedIdt.u2DPL = 3;
2841 pPatch->ChangedIdt.u1Present = 1;
2842
2843 /*
2844 * Generate the patch code.
2845 */
2846 {
2847#ifdef __AMD64__
2848 union
2849 {
2850 uint8_t *pb;
2851 uint32_t *pu32;
2852 uint64_t *pu64;
2853 } u, uFixJmp, uFixCall, uNotNested;
2854 u.pb = &pPatch->auCode[0];
2855
2856 /* check the cookie */
2857 *u.pb++ = 0x3d; // cmp eax, GLOBALCOOKIE
2858 *u.pu32++ = pDevExt->u32Cookie;
2859
2860 *u.pb++ = 0x74; // jz @VBoxCall
2861 *u.pb++ = 2;
2862
2863 /* jump to forwarder code. */
2864 *u.pb++ = 0xeb;
2865 uFixJmp = u;
2866 *u.pb++ = 0xfe;
2867
2868 // @VBoxCall:
2869 *u.pb++ = 0x0f; // swapgs
2870 *u.pb++ = 0x01;
2871 *u.pb++ = 0xf8;
2872
2873 /*
2874 * Call VMMR0Entry
2875 * We don't have to push the arguments here, but we have top
2876 * reserve some stack space for the interrupt forwarding.
2877 */
2878# ifdef __WIN__
2879 *u.pb++ = 0x50; // push rax ; alignment filler.
2880 *u.pb++ = 0x41; // push r8 ; uArg
2881 *u.pb++ = 0x50;
2882 *u.pb++ = 0x52; // push rdx ; uOperation
2883 *u.pb++ = 0x51; // push rcx ; pVM
2884# else
2885 *u.pb++ = 0x51; // push rcx ; alignment filler.
2886 *u.pb++ = 0x52; // push rdx ; uArg
2887 *u.pb++ = 0x56; // push rsi ; uOperation
2888 *u.pb++ = 0x57; // push rdi ; pVM
2889# endif
2890
2891 *u.pb++ = 0xff; // call qword [pfnVMMR0Entry wrt rip]
2892 *u.pb++ = 0x15;
2893 uFixCall = u;
2894 *u.pu32++ = 0;
2895
2896 *u.pb++ = 0x48; // add rsp, 20h ; remove call frame.
2897 *u.pb++ = 0x81;
2898 *u.pb++ = 0xc4;
2899 *u.pu32++ = 0x20;
2900
2901 *u.pb++ = 0x0f; // swapgs
2902 *u.pb++ = 0x01;
2903 *u.pb++ = 0xf8;
2904
2905 /* Return to R3. */
2906 uNotNested = u;
2907 *u.pb++ = 0x48; // iretq
2908 *u.pb++ = 0xcf;
2909
2910 while ((uintptr_t)u.pb & 0x7) // align 8
2911 *u.pb++ = 0xcc;
2912
2913 /* Pointer to the VMMR0Entry. */ // pfnVMMR0Entry dq StubVMMR0Entry
2914 *uFixCall.pu32 = (uint32_t)(u.pb - uFixCall.pb - 4); uFixCall.pb = NULL;
2915 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
2916 *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0Entry : (uint64_t)u.pb + 8;
2917
2918 /* stub entry. */ // StubVMMR0Entry:
2919 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
2920 *u.pb++ = 0x33; // xor eax, eax
2921 *u.pb++ = 0xc0;
2922
2923 *u.pb++ = 0x48; // dec rax
2924 *u.pb++ = 0xff;
2925 *u.pb++ = 0xc8;
2926
2927 *u.pb++ = 0xc3; // ret
2928
2929 /* forward to the original handler using a retf. */
2930 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1); uFixJmp.pb = NULL;
2931
2932 *u.pb++ = 0x68; // push <target cs>
2933 *u.pu32++ = !pPatch->SavedIdt.u5Type2 ? ASMGetCS() : pPatch->SavedIdt.u16SegSel;
2934
2935 *u.pb++ = 0x68; // push <low target rip>
2936 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2937 ? (uint32_t)(uintptr_t)uNotNested.pb
2938 : (uint32_t)pPatch->SavedIdt.u16OffsetLow
2939 | (uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16;
2940
2941 *u.pb++ = 0xc7; // mov dword [rsp + 4], <high target rip>
2942 *u.pb++ = 0x44;
2943 *u.pb++ = 0x24;
2944 *u.pb++ = 0x04;
2945 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2946 ? (uint32_t)((uint64_t)uNotNested.pb >> 32)
2947 : pPatch->SavedIdt.u32OffsetTop;
2948
2949 *u.pb++ = 0x48; // retf ; does this require prefix?
2950 *u.pb++ = 0xcb;
2951
2952#else /* __X86__ */
2953
2954 union
2955 {
2956 uint8_t *pb;
2957 uint16_t *pu16;
2958 uint32_t *pu32;
2959 } u, uFixJmpNotNested, uFixJmp, uFixCall, uNotNested;
2960 u.pb = &pPatch->auCode[0];
2961
2962 /* check the cookie */
2963 *u.pb++ = 0x81; // cmp esi, GLOBALCOOKIE
2964 *u.pb++ = 0xfe;
2965 *u.pu32++ = pDevExt->u32Cookie;
2966
2967 *u.pb++ = 0x74; // jz VBoxCall
2968 uFixJmp = u;
2969 *u.pb++ = 0;
2970
2971 /* jump (far) to the original handler / not-nested-stub. */
2972 *u.pb++ = 0xea; // jmp far NotNested
2973 uFixJmpNotNested = u;
2974 *u.pu32++ = 0;
2975 *u.pu16++ = 0;
2976
2977 /* save selector registers. */ // VBoxCall:
2978 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1);
2979 *u.pb++ = 0x0f; // push fs
2980 *u.pb++ = 0xa0;
2981
2982 *u.pb++ = 0x1e; // push ds
2983
2984 *u.pb++ = 0x06; // push es
2985
2986 /* call frame */
2987 *u.pb++ = 0x51; // push ecx
2988
2989 *u.pb++ = 0x52; // push edx
2990
2991 *u.pb++ = 0x50; // push eax
2992
2993 /* load ds, es and perhaps fs before call. */
2994 *u.pb++ = 0xb8; // mov eax, KernelDS
2995 *u.pu32++ = ASMGetDS();
2996
2997 *u.pb++ = 0x8e; // mov ds, eax
2998 *u.pb++ = 0xd8;
2999
3000 *u.pb++ = 0x8e; // mov es, eax
3001 *u.pb++ = 0xc0;
3002
3003#ifdef __WIN__
3004 *u.pb++ = 0xb8; // mov eax, KernelFS
3005 *u.pu32++ = ASMGetFS();
3006
3007 *u.pb++ = 0x8e; // mov fs, eax
3008 *u.pb++ = 0xe0;
3009#endif
3010
3011 /* do the call. */
3012 *u.pb++ = 0xe8; // call _VMMR0Entry / StubVMMR0Entry
3013 uFixCall = u;
3014 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
3015 *u.pu32++ = 0xfffffffb;
3016
3017 *u.pb++ = 0x83; // add esp, 0ch ; cdecl
3018 *u.pb++ = 0xc4;
3019 *u.pb++ = 0x0c;
3020
3021 /* restore selector registers. */
3022 *u.pb++ = 0x07; // pop es
3023 //
3024 *u.pb++ = 0x1f; // pop ds
3025
3026 *u.pb++ = 0x0f; // pop fs
3027 *u.pb++ = 0xa1;
3028
3029 uNotNested = u; // NotNested:
3030 *u.pb++ = 0xcf; // iretd
3031
3032 /* the stub VMMR0Entry. */ // StubVMMR0Entry:
3033 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
3034 *u.pb++ = 0x33; // xor eax, eax
3035 *u.pb++ = 0xc0;
3036
3037 *u.pb++ = 0x48; // dec eax
3038
3039 *u.pb++ = 0xc3; // ret
3040
3041 /* Fixup the VMMR0Entry call. */
3042 if (pDevExt->pvVMMR0)
3043 *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0Entry - (uint32_t)(uFixCall.pu32 + 1);
3044 else
3045 *uFixCall.pu32 = (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)(uFixCall.pu32 + 1);
3046
3047 /* Fixup the forward / nested far jump. */
3048 if (!pPatch->SavedIdt.u5Type2)
3049 {
3050 *uFixJmpNotNested.pu32++ = (uint32_t)uNotNested.pb;
3051 *uFixJmpNotNested.pu16++ = ASMGetCS();
3052 }
3053 else
3054 {
3055 *uFixJmpNotNested.pu32++ = ((uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16) | pPatch->SavedIdt.u16OffsetLow;
3056 *uFixJmpNotNested.pu16++ = pPatch->SavedIdt.u16SegSel;
3057 }
3058#endif /* __X86__ */
3059 Assert(u.pb <= &pPatch->auCode[sizeof(pPatch->auCode)]);
3060#if 0
3061 /* dump the patch code */
3062 dprintf(("patch code: %p\n", &pPatch->auCode[0]));
3063 for (uFixCall.pb = &pPatch->auCode[0]; uFixCall.pb < u.pb; uFixCall.pb++)
3064 dprintf(("0x%02x,\n", *uFixCall.pb));
3065#endif
3066 }
3067
3068 /*
3069 * Install the patch.
3070 */
3071 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->ChangedIdt);
3072 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The stupid change code didn't work!!!!!\n"));
3073
3074 /*
3075 * Link in the patch.
3076 */
3077 pPatch->pNext = pDevExt->pIdtPatches;
3078 pDevExt->pIdtPatches = pPatch;
3079
3080 return pPatch;
3081}
3082
3083
3084/**
3085 * Removes the sessions IDT references.
3086 * This will uninstall our IDT patch if we left unreferenced.
3087 *
3088 * @returns 0 indicating success.
3089 * @param pDevExt Device globals.
3090 * @param pSession Session data.
3091 */
3092static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
3093{
3094 PSUPDRVPATCHUSAGE pUsage;
3095 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3096 dprintf(("supdrvIOCtl_IdtRemoveAll: pSession=%p\n", pSession));
3097
3098 /*
3099 * Take the spinlock.
3100 */
3101 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
3102
3103 /*
3104 * Walk usage list.
3105 */
3106 pUsage = pSession->pPatchUsage;
3107 while (pUsage)
3108 {
3109 if (pUsage->pPatch->cUsage <= pUsage->cUsage)
3110 supdrvIdtRemoveOne(pDevExt, pUsage->pPatch);
3111 else
3112 pUsage->pPatch->cUsage -= pUsage->cUsage;
3113
3114 /* next */
3115 pUsage = pUsage->pNext;
3116 }
3117
3118 /*
3119 * Empty the usage chain and we're done inside the spinlock.
3120 */
3121 pUsage = pSession->pPatchUsage;
3122 pSession->pPatchUsage = NULL;
3123
3124 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
3125
3126 /*
3127 * Free usage entries.
3128 */
3129 while (pUsage)
3130 {
3131 void *pvToFree = pUsage;
3132 pUsage->cUsage = 0;
3133 pUsage->pPatch = NULL;
3134 pUsage = pUsage->pNext;
3135 RTMemFree(pvToFree);
3136 }
3137
3138 return 0;
3139}
3140
3141
3142/**
3143 * Remove one patch.
3144 *
3145 * @param pDevExt Device globals.
3146 * @param pPatch Patch entry to remove.
3147 * @remark Caller must own SUPDRVDEVEXT::Spinlock!
3148 */
3149static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
3150{
3151 dprintf(("supdrvIdtRemoveOne: pPatch=%p\n", pPatch));
3152
3153 pPatch->cUsage = 0;
3154
3155 /*
3156 * If the IDT entry was changed it have to kick around for ever!
3157 * This will be attempted freed again, perhaps next time we'll succeed :-)
3158 */
3159 if (memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)))
3160 {
3161 AssertMsgFailed(("The hijacked IDT entry has CHANGED!!!\n"));
3162 return;
3163 }
3164
3165 /*
3166 * Unlink it.
3167 */
3168 if (pDevExt->pIdtPatches != pPatch)
3169 {
3170 PSUPDRVPATCH pPatchPrev = pDevExt->pIdtPatches;
3171 while (pPatchPrev)
3172 {
3173 if (pPatchPrev->pNext == pPatch)
3174 {
3175 pPatchPrev->pNext = pPatch->pNext;
3176 break;
3177 }
3178 pPatchPrev = pPatchPrev->pNext;
3179 }
3180 Assert(!pPatchPrev);
3181 }
3182 else
3183 pDevExt->pIdtPatches = pPatch->pNext;
3184 pPatch->pNext = NULL;
3185
3186
3187 /*
3188 * Verify and restore the IDT.
3189 */
3190 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3191 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->SavedIdt);
3192 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->SavedIdt, sizeof(pPatch->SavedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3193
3194 /*
3195 * Put it in the free list.
3196 * (This free list stuff is to calm my paranoia.)
3197 */
3198 pPatch->pvIdt = NULL;
3199 pPatch->pIdtEntry = NULL;
3200
3201 pPatch->pNext = pDevExt->pIdtPatchesFree;
3202 pDevExt->pIdtPatchesFree = pPatch;
3203}
3204
3205
3206/**
3207 * Write to an IDT entry.
3208 *
3209 * @param pvIdtEntry Where to write.
3210 * @param pNewIDTEntry What to write.
3211 */
3212static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry)
3213{
3214 RTUINTREG uCR0;
3215 RTUINTREG uFlags;
3216
3217 /*
3218 * On SMP machines (P4 hyperthreading included) we must preform a
3219 * 64-bit locked write when updating the IDT entry.
3220 *
3221 * The F00F bugfix for linux (and probably other OSes) causes
3222 * the IDT to be pointing to an readonly mapping. We get around that
3223 * by temporarily turning of WP. Since we're inside a spinlock at this
3224 * point, interrupts are disabled and there isn't any way the WP bit
3225 * flipping can cause any trouble.
3226 */
3227
3228 /* Save & Clear interrupt flag; Save & clear WP. */
3229 uFlags = ASMGetFlags();
3230 ASMSetFlags(uFlags & ~(RTUINTREG)(1 << 9)); /*X86_EFL_IF*/
3231 Assert(!(ASMGetFlags() & (1 << 9)));
3232 uCR0 = ASMGetCR0();
3233 ASMSetCR0(uCR0 & ~(RTUINTREG)(1 << 16)); /*X86_CR0_WP*/
3234
3235 /* Update IDT Entry */
3236#ifdef __AMD64__
3237 ASMAtomicXchgU128((volatile uint128_t *)pvIdtEntry, *(uint128_t *)(uintptr_t)pNewIDTEntry);
3238#else
3239 ASMAtomicXchgU64((volatile uint64_t *)pvIdtEntry, *(uint64_t *)(uintptr_t)pNewIDTEntry);
3240#endif
3241
3242 /* Restore CR0 & Flags */
3243 ASMSetCR0(uCR0);
3244 ASMSetFlags(uFlags);
3245}
3246#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3247
3248
3249/**
3250 * Opens an image. If it's the first time it's opened the call must upload
3251 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3252 *
3253 * This is the 1st step of the loading.
3254 *
3255 * @returns 0 on success.
3256 * @returns SUPDRV_ERR_* on failure.
3257 * @param pDevExt Device globals.
3258 * @param pSession Session data.
3259 * @param pIn Input.
3260 * @param pOut Output. (May overlap pIn.)
3261 */
3262static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN_IN pIn, PSUPLDROPEN_OUT pOut)
3263{
3264 PSUPDRVLDRIMAGE pImage;
3265 unsigned cb;
3266 void *pv;
3267 dprintf(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pIn->szName, pIn->cbImage));
3268
3269 /*
3270 * Check if we got an instance of the image already.
3271 */
3272 RTSemFastMutexRequest(pDevExt->mtxLdr);
3273 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3274 {
3275 if (!strcmp(pImage->szName, pIn->szName))
3276 {
3277 pImage->cUsage++;
3278 pOut->pvImageBase = pImage->pvImage;
3279 pOut->fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3280 supdrvLdrAddUsage(pSession, pImage);
3281 RTSemFastMutexRelease(pDevExt->mtxLdr);
3282 return 0;
3283 }
3284 }
3285 /* (not found - add it!) */
3286
3287 /*
3288 * Allocate memory.
3289 */
3290 cb = pIn->cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3291 pv = RTMemExecAlloc(cb);
3292 if (!pv)
3293 {
3294 RTSemFastMutexRelease(pDevExt->mtxLdr);
3295 return SUPDRV_ERR_NO_MEMORY;
3296 }
3297
3298 /*
3299 * Setup and link in the LDR stuff.
3300 */
3301 pImage = (PSUPDRVLDRIMAGE)pv;
3302 pImage->pvImage = ALIGNP(pImage + 1, 32);
3303 pImage->cbImage = pIn->cbImage;
3304 pImage->pfnModuleInit = NULL;
3305 pImage->pfnModuleTerm = NULL;
3306 pImage->uState = SUP_IOCTL_LDR_OPEN;
3307 pImage->cUsage = 1;
3308 strcpy(pImage->szName, pIn->szName);
3309
3310 pImage->pNext = pDevExt->pLdrImages;
3311 pDevExt->pLdrImages = pImage;
3312
3313 supdrvLdrAddUsage(pSession, pImage);
3314
3315 pOut->pvImageBase = pImage->pvImage;
3316 pOut->fNeedsLoading = 1;
3317 RTSemFastMutexRelease(pDevExt->mtxLdr);
3318 return 0;
3319}
3320
3321
3322/**
3323 * Loads the image bits.
3324 *
3325 * This is the 2nd step of the loading.
3326 *
3327 * @returns 0 on success.
3328 * @returns SUPDRV_ERR_* on failure.
3329 * @param pDevExt Device globals.
3330 * @param pSession Session data.
3331 * @param pIn Input.
3332 */
3333static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD_IN pIn)
3334{
3335 PSUPDRVLDRUSAGE pUsage;
3336 PSUPDRVLDRIMAGE pImage;
3337 int rc;
3338 dprintf(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pIn->pvImageBase, pIn->cbImage));
3339
3340 /*
3341 * Find the ldr image.
3342 */
3343 RTSemFastMutexRequest(pDevExt->mtxLdr);
3344 pUsage = pSession->pLdrUsage;
3345 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3346 pUsage = pUsage->pNext;
3347 if (!pUsage)
3348 {
3349 RTSemFastMutexRelease(pDevExt->mtxLdr);
3350 dprintf(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3351 return SUPDRV_ERR_INVALID_HANDLE;
3352 }
3353 pImage = pUsage->pImage;
3354 if (pImage->cbImage != pIn->cbImage)
3355 {
3356 RTSemFastMutexRelease(pDevExt->mtxLdr);
3357 dprintf(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pIn->cbImage));
3358 return SUPDRV_ERR_INVALID_HANDLE;
3359 }
3360 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3361 {
3362 unsigned uState = pImage->uState;
3363 RTSemFastMutexRelease(pDevExt->mtxLdr);
3364 if (uState != SUP_IOCTL_LDR_LOAD)
3365 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3366 return SUPDRV_ERR_ALREADY_LOADED;
3367 }
3368 switch (pIn->eEPType)
3369 {
3370 case EP_NOTHING:
3371 break;
3372 case EP_VMMR0:
3373 if (!pIn->EP.VMMR0.pvVMMR0 || !pIn->EP.VMMR0.pvVMMR0Entry)
3374 {
3375 RTSemFastMutexRelease(pDevExt->mtxLdr);
3376 dprintf(("pvVMMR0=%p or pIn->EP.VMMR0.pvVMMR0Entry=%p is NULL!\n",
3377 pIn->EP.VMMR0.pvVMMR0, pIn->EP.VMMR0.pvVMMR0Entry));
3378 return SUPDRV_ERR_INVALID_PARAM;
3379 }
3380 if ((uintptr_t)pIn->EP.VMMR0.pvVMMR0Entry - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3381 {
3382 RTSemFastMutexRelease(pDevExt->mtxLdr);
3383 dprintf(("SUP_IOCTL_LDR_LOAD: pvVMMR0Entry=%p is outside the image (%p %d bytes)\n",
3384 pIn->EP.VMMR0.pvVMMR0Entry, pImage->pvImage, pIn->cbImage));
3385 return SUPDRV_ERR_INVALID_PARAM;
3386 }
3387 break;
3388 default:
3389 RTSemFastMutexRelease(pDevExt->mtxLdr);
3390 dprintf(("Invalid eEPType=%d\n", pIn->eEPType));
3391 return SUPDRV_ERR_INVALID_PARAM;
3392 }
3393 if ( pIn->pfnModuleInit
3394 && (uintptr_t)pIn->pfnModuleInit - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3395 {
3396 RTSemFastMutexRelease(pDevExt->mtxLdr);
3397 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3398 pIn->pfnModuleInit, pImage->pvImage, pIn->cbImage));
3399 return SUPDRV_ERR_INVALID_PARAM;
3400 }
3401 if ( pIn->pfnModuleTerm
3402 && (uintptr_t)pIn->pfnModuleTerm - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3403 {
3404 RTSemFastMutexRelease(pDevExt->mtxLdr);
3405 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3406 pIn->pfnModuleTerm, pImage->pvImage, pIn->cbImage));
3407 return SUPDRV_ERR_INVALID_PARAM;
3408 }
3409
3410 /*
3411 * Copy the memory.
3412 */
3413 /* no need to do try/except as this is a buffered request. */
3414 memcpy(pImage->pvImage, &pIn->achImage[0], pImage->cbImage);
3415 pImage->uState = SUP_IOCTL_LDR_LOAD;
3416 pImage->pfnModuleInit = pIn->pfnModuleInit;
3417 pImage->pfnModuleTerm = pIn->pfnModuleTerm;
3418 pImage->offSymbols = pIn->offSymbols;
3419 pImage->cSymbols = pIn->cSymbols;
3420 pImage->offStrTab = pIn->offStrTab;
3421 pImage->cbStrTab = pIn->cbStrTab;
3422
3423 /*
3424 * Update any entry points.
3425 */
3426 switch (pIn->eEPType)
3427 {
3428 default:
3429 case EP_NOTHING:
3430 rc = 0;
3431 break;
3432 case EP_VMMR0:
3433 rc = supdrvLdrSetR0EP(pDevExt, pIn->EP.VMMR0.pvVMMR0, pIn->EP.VMMR0.pvVMMR0Entry);
3434 break;
3435 }
3436
3437 /*
3438 * On success call the module initialization.
3439 */
3440 dprintf(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3441 if (!rc && pImage->pfnModuleInit)
3442 {
3443 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3444 rc = pImage->pfnModuleInit();
3445 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3446 supdrvLdrUnsetR0EP(pDevExt);
3447 }
3448
3449 if (rc)
3450 pImage->uState = SUP_IOCTL_LDR_OPEN;
3451
3452 RTSemFastMutexRelease(pDevExt->mtxLdr);
3453 return rc;
3454}
3455
3456
3457/**
3458 * Frees a previously loaded (prep'ed) image.
3459 *
3460 * @returns 0 on success.
3461 * @returns SUPDRV_ERR_* on failure.
3462 * @param pDevExt Device globals.
3463 * @param pSession Session data.
3464 * @param pIn Input.
3465 */
3466static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE_IN pIn)
3467{
3468 PSUPDRVLDRUSAGE pUsagePrev;
3469 PSUPDRVLDRUSAGE pUsage;
3470 PSUPDRVLDRIMAGE pImage;
3471 dprintf(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pIn->pvImageBase));
3472
3473 /*
3474 * Find the ldr image.
3475 */
3476 RTSemFastMutexRequest(pDevExt->mtxLdr);
3477 pUsagePrev = NULL;
3478 pUsage = pSession->pLdrUsage;
3479 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3480 {
3481 pUsagePrev = pUsage;
3482 pUsage = pUsage->pNext;
3483 }
3484 if (!pUsage)
3485 {
3486 RTSemFastMutexRelease(pDevExt->mtxLdr);
3487 dprintf(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3488 return SUPDRV_ERR_INVALID_HANDLE;
3489 }
3490
3491 /*
3492 * Check if we can remove anything.
3493 */
3494 pImage = pUsage->pImage;
3495 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3496 {
3497 /* unlink it */
3498 if (pUsagePrev)
3499 pUsagePrev->pNext = pUsage->pNext;
3500 else
3501 pSession->pLdrUsage = pUsage->pNext;
3502 /* free it */
3503 pUsage->pImage = NULL;
3504 pUsage->pNext = NULL;
3505 RTMemFree(pUsage);
3506
3507 /*
3508 * Derefrence the image.
3509 */
3510 if (pImage->cUsage <= 1)
3511 supdrvLdrFree(pDevExt, pImage);
3512 else
3513 pImage->cUsage--;
3514 }
3515 else
3516 {
3517 /*
3518 * Dereference both image and usage.
3519 */
3520 pImage->cUsage--;
3521 pUsage->cUsage--;
3522 }
3523
3524 RTSemFastMutexRelease(pDevExt->mtxLdr);
3525 return 0;
3526}
3527
3528
3529/**
3530 * Gets the address of a symbol in an open image.
3531 *
3532 * @returns 0 on success.
3533 * @returns SUPDRV_ERR_* on failure.
3534 * @param pDevExt Device globals.
3535 * @param pSession Session data.
3536 * @param pIn Input.
3537 * @param pOut Output. (May overlap pIn.)
3538 */
3539static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL_IN pIn, PSUPLDRGETSYMBOL_OUT pOut)
3540{
3541 PSUPDRVLDRIMAGE pImage;
3542 PSUPDRVLDRUSAGE pUsage;
3543 uint32_t i;
3544 PSUPLDRSYM paSyms;
3545 const char *pchStrings;
3546 const size_t cbSymbol = strlen(pIn->szSymbol) + 1;
3547 void *pvSymbol = NULL;
3548 int rc = SUPDRV_ERR_GENERAL_FAILURE; /** @todo better error code. */
3549 dprintf2(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pIn->pvImageBase, pIn->szSymbol));
3550
3551 /*
3552 * Find the ldr image.
3553 */
3554 RTSemFastMutexRequest(pDevExt->mtxLdr);
3555 pUsage = pSession->pLdrUsage;
3556 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3557 pUsage = pUsage->pNext;
3558 if (!pUsage)
3559 {
3560 RTSemFastMutexRelease(pDevExt->mtxLdr);
3561 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3562 return SUPDRV_ERR_INVALID_HANDLE;
3563 }
3564 pImage = pUsage->pImage;
3565 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3566 {
3567 unsigned uState = pImage->uState;
3568 RTSemFastMutexRelease(pDevExt->mtxLdr);
3569 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3570 return SUPDRV_ERR_ALREADY_LOADED;
3571 }
3572
3573 /*
3574 * Search the symbol string.
3575 */
3576 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3577 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3578 for (i = 0; i < pImage->cSymbols; i++)
3579 {
3580 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3581 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3582 && !memcmp(pchStrings + paSyms[i].offName, pIn->szSymbol, cbSymbol))
3583 {
3584 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3585 rc = 0;
3586 break;
3587 }
3588 }
3589 RTSemFastMutexRelease(pDevExt->mtxLdr);
3590 pOut->pvSymbol = pvSymbol;
3591 return rc;
3592}
3593
3594
3595/**
3596 * Updates the IDT patches to point to the specified VMM R0 entry
3597 * point (i.e. VMMR0Enter()).
3598 *
3599 * @returns 0 on success.
3600 * @returns SUPDRV_ERR_* on failure.
3601 * @param pDevExt Device globals.
3602 * @param pSession Session data.
3603 * @param pVMMR0 VMMR0 image handle.
3604 * @param pVMMR0Entry VMMR0Entry address.
3605 * @remark Caller must own the loader mutex.
3606 */
3607static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry)
3608{
3609 int rc;
3610 dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0Entry=%p\n", pvVMMR0, pvVMMR0Entry));
3611
3612
3613 /*
3614 * Check if not yet set.
3615 */
3616 rc = 0;
3617 if (!pDevExt->pvVMMR0)
3618 {
3619#ifndef VBOX_WITHOUT_IDT_PATCHING
3620 PSUPDRVPATCH pPatch;
3621#endif
3622
3623 /*
3624 * Set it and update IDT patch code.
3625 */
3626 pDevExt->pvVMMR0 = pvVMMR0;
3627 pDevExt->pfnVMMR0Entry = pvVMMR0Entry;
3628#ifndef VBOX_WITHOUT_IDT_PATCHING
3629 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3630 {
3631# ifdef __AMD64__
3632 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup], (uint64_t)pvVMMR0);
3633# else /* __X86__ */
3634 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3635 (uint32_t)pvVMMR0 - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3636# endif
3637 }
3638#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3639 }
3640 else
3641 {
3642 /*
3643 * Return failure or success depending on whether the
3644 * values match or not.
3645 */
3646 if ( pDevExt->pvVMMR0 != pvVMMR0
3647 || (void *)pDevExt->pfnVMMR0Entry != pvVMMR0Entry)
3648 {
3649 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3650 rc = SUPDRV_ERR_INVALID_PARAM;
3651 }
3652 }
3653 return rc;
3654}
3655
3656
3657/**
3658 * Unsets the R0 entry point installed by supdrvLdrSetR0EP.
3659 *
3660 * @param pDevExt Device globals.
3661 */
3662static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt)
3663{
3664#ifndef VBOX_WITHOUT_IDT_PATCHING
3665 PSUPDRVPATCH pPatch;
3666#endif
3667
3668 pDevExt->pvVMMR0 = NULL;
3669 pDevExt->pfnVMMR0Entry = NULL;
3670
3671#ifndef VBOX_WITHOUT_IDT_PATCHING
3672 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3673 {
3674# ifdef __AMD64__
3675 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3676 (uint64_t)&pPatch->auCode[pPatch->offStub]);
3677# else /* __X86__ */
3678 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3679 (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3680# endif
3681 }
3682#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3683}
3684
3685
3686/**
3687 * Adds a usage reference in the specified session of an image.
3688 *
3689 * @param pSession Session in question.
3690 * @param pImage Image which the session is using.
3691 */
3692static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3693{
3694 PSUPDRVLDRUSAGE pUsage;
3695 dprintf(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3696
3697 /*
3698 * Referenced it already?
3699 */
3700 pUsage = pSession->pLdrUsage;
3701 while (pUsage)
3702 {
3703 if (pUsage->pImage == pImage)
3704 {
3705 pUsage->cUsage++;
3706 return;
3707 }
3708 pUsage = pUsage->pNext;
3709 }
3710
3711 /*
3712 * Allocate new usage record.
3713 */
3714 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3715 Assert(pUsage);
3716 if (pUsage)
3717 {
3718 pUsage->cUsage = 1;
3719 pUsage->pImage = pImage;
3720 pUsage->pNext = pSession->pLdrUsage;
3721 pSession->pLdrUsage = pUsage;
3722 }
3723 /* ignore errors... */
3724}
3725
3726
3727/**
3728 * Frees a load image.
3729 *
3730 * @param pDevExt Pointer to device extension.
3731 * @param pImage Pointer to the image we're gonna free.
3732 * This image must exit!
3733 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3734 */
3735static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3736{
3737 PSUPDRVLDRIMAGE pImagePrev;
3738 dprintf(("supdrvLdrFree: pImage=%p\n", pImage));
3739
3740 /* find it - arg. should've used doubly linked list. */
3741 Assert(pDevExt->pLdrImages);
3742 pImagePrev = NULL;
3743 if (pDevExt->pLdrImages != pImage)
3744 {
3745 pImagePrev = pDevExt->pLdrImages;
3746 while (pImagePrev->pNext != pImage)
3747 pImagePrev = pImagePrev->pNext;
3748 Assert(pImagePrev->pNext == pImage);
3749 }
3750
3751 /* unlink */
3752 if (pImagePrev)
3753 pImagePrev->pNext = pImage->pNext;
3754 else
3755 pDevExt->pLdrImages = pImage->pNext;
3756
3757 /* check if this is VMMR0.r0 and fix the Idt patches if it is. */
3758 if (pDevExt->pvVMMR0 == pImage->pvImage)
3759 supdrvLdrUnsetR0EP(pDevExt);
3760
3761 /* call termination function if fully loaded. */
3762 if ( pImage->pfnModuleTerm
3763 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3764 {
3765 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3766 pImage->pfnModuleTerm();
3767 }
3768
3769 /* free the image */
3770 pImage->cUsage = 0;
3771 pImage->pNext = 0;
3772 pImage->uState = SUP_IOCTL_LDR_FREE;
3773 RTMemExecFree(pImage);
3774}
3775
3776
3777/**
3778 * Gets the current paging mode of the CPU and stores in in pOut.
3779 */
3780static int supdrvIOCtl_GetPagingMode(PSUPGETPAGINGMODE_OUT pOut)
3781{
3782 RTUINTREG cr0 = ASMGetCR0();
3783 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3784 pOut->enmMode = SUPPAGINGMODE_INVALID;
3785 else
3786 {
3787 RTUINTREG cr4 = ASMGetCR4();
3788 uint32_t fNXEPlusLMA = 0;
3789 if (cr4 & X86_CR4_PAE)
3790 {
3791 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
3792 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
3793 {
3794 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3795 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3796 fNXEPlusLMA |= BIT(0);
3797 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3798 fNXEPlusLMA |= BIT(1);
3799 }
3800 }
3801
3802 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
3803 {
3804 case 0:
3805 pOut->enmMode = SUPPAGINGMODE_32_BIT;
3806 break;
3807
3808 case X86_CR4_PGE:
3809 pOut->enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
3810 break;
3811
3812 case X86_CR4_PAE:
3813 pOut->enmMode = SUPPAGINGMODE_PAE;
3814 break;
3815
3816 case X86_CR4_PAE | BIT(0):
3817 pOut->enmMode = SUPPAGINGMODE_PAE_NX;
3818 break;
3819
3820 case X86_CR4_PAE | X86_CR4_PGE:
3821 pOut->enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3822 break;
3823
3824 case X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3825 pOut->enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3826 break;
3827
3828 case BIT(1) | X86_CR4_PAE:
3829 pOut->enmMode = SUPPAGINGMODE_AMD64;
3830 break;
3831
3832 case BIT(1) | X86_CR4_PAE | BIT(0):
3833 pOut->enmMode = SUPPAGINGMODE_AMD64_NX;
3834 break;
3835
3836 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
3837 pOut->enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
3838 break;
3839
3840 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3841 pOut->enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
3842 break;
3843
3844 default:
3845 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
3846 pOut->enmMode = SUPPAGINGMODE_INVALID;
3847 break;
3848 }
3849 }
3850 return 0;
3851}
3852
3853
3854#if !defined(SUPDRV_OS_HAVE_LOW) && !defined(USE_NEW_OS_INTERFACE) /* Use same backend as the contiguous stuff */
3855/**
3856 * OS Specific code for allocating page aligned memory with fixed
3857 * physical backing below 4GB.
3858 *
3859 * @returns 0 on success.
3860 * @returns SUPDRV_ERR_* on failure.
3861 * @param pMem Memory reference record of the memory to be allocated.
3862 * (This is not linked in anywhere.)
3863 * @param ppvR3 Where to store the Ring-0 mapping of the allocated memory.
3864 * @param ppvR3 Where to store the Ring-3 mapping of the allocated memory.
3865 * @param paPagesOut Where to store the physical addresss.
3866 */
3867int VBOXCALL supdrvOSLowAllocOne(PSUPDRVMEMREF pMem, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PSUPPAGE paPagesOut)
3868{
3869 RTHCPHYS HCPhys;
3870 int rc = supdrvOSContAllocOne(pMem, ppvR0, ppvR3, &HCPhys);
3871 if (!rc)
3872 {
3873 unsigned iPage = pMem->cb >> PAGE_SHIFT;
3874 while (iPage-- > 0)
3875 {
3876 paPagesOut[iPage].Phys = HCPhys + (iPage << PAGE_SHIFT);
3877 paPagesOut[iPage].uReserved = 0;
3878 }
3879 }
3880 return rc;
3881}
3882
3883
3884/**
3885 * Frees low memory.
3886 *
3887 * @param pMem Memory reference record of the memory to be freed.
3888 */
3889void VBOXCALL supdrvOSLowFreeOne(PSUPDRVMEMREF pMem)
3890{
3891 supdrvOSContFreeOne(pMem);
3892}
3893#endif /* !SUPDRV_OS_HAVE_LOW */
3894
3895
3896#ifdef USE_NEW_OS_INTERFACE
3897/**
3898 * Creates the GIP.
3899 *
3900 * @returns negative errno.
3901 * @param pDevExt Instance data. GIP stuff may be updated.
3902 */
3903static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
3904{
3905 PSUPGLOBALINFOPAGE pGip;
3906 RTHCPHYS HCPhysGip;
3907 uint32_t u32SystemResolution;
3908 uint32_t u32Interval;
3909 int rc;
3910
3911 dprintf(("supdrvGipCreate:\n"));
3912
3913 /* assert order */
3914 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
3915 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
3916 Assert(!pDevExt->pGipTimer);
3917
3918 /*
3919 * Allocate a suitable page with a default kernel mapping.
3920 */
3921 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
3922 if (RT_FAILURE(rc))
3923 {
3924 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
3925 return rc;
3926 }
3927 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
3928 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
3929
3930 /*
3931 * Try bump up the system timer resolution.
3932 * The more interrupts the better...
3933 */
3934 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
3935 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
3936 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
3937 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
3938 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
3939 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
3940 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
3941 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
3942 )
3943 {
3944 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
3945 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
3946 }
3947
3948 /*
3949 * Find a reasonable update interval, something close to 10ms would be nice,
3950 * and create a recurring timer.
3951 */
3952 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
3953 while (u32Interval < 10000000 /* 10 ms */)
3954 u32Interval += u32SystemResolution;
3955
3956 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipTimer, pDevExt);
3957 if (RT_FAILURE(rc))
3958 {
3959 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %RU32 ns interval. rc=%d\n", u32Interval, rc));
3960 Assert(!pDevExt->pGipTimer);
3961 supdrvGipDestroy(pDevExt);
3962 return rc;
3963 }
3964
3965 /*
3966 * We're good.
3967 */
3968 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
3969 return 0;
3970}
3971
3972
3973/**
3974 * Terminates the GIP.
3975 *
3976 * @returns negative errno.
3977 * @param pDevExt Instance data. GIP stuff may be updated.
3978 */
3979static int supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
3980{
3981 int rc;
3982#ifdef DEBUG_DARWIN_GIP
3983 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
3984 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
3985 pDevExt->pGipTimer, pDevExt->GipMemObj));
3986#endif
3987
3988 /*
3989 * Invalid the GIP data.
3990 */
3991 if (pDevExt->pGip)
3992 {
3993 supdrvGipTerm(pDevExt->pGip);
3994 pDevExt->pGip = 0;
3995 }
3996
3997 /*
3998 * Destroy the timer and free the GIP memory object.
3999 */
4000 if (pDevExt->pGipTimer)
4001 {
4002 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4003 pDevExt->pGipTimer = NULL;
4004 }
4005
4006 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4007 {
4008 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4009 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4010 }
4011
4012 /*
4013 * Finally, release the system timer resolution request if one succeeded.
4014 */
4015 if (pDevExt->u32SystemTimerGranularityGrant)
4016 {
4017 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4018 pDevExt->u32SystemTimerGranularityGrant = 0;
4019 }
4020
4021 return 0;
4022}
4023
4024
4025/**
4026 * Timer callback function.
4027 * @param pTimer The timer.
4028 * @param pvUser The device extension.
4029 */
4030static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser)
4031{
4032 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4033 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4034}
4035#endif /* USE_NEW_OS_INTERFACE */
4036
4037
4038/**
4039 * Initializes the GIP data.
4040 *
4041 * @returns VBox status code.
4042 * @param pDevExt Pointer to the device instance data.
4043 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4044 * @param HCPhys The physical address of the GIP.
4045 * @param u64NanoTS The current nanosecond timestamp.
4046 * @param uUpdateHz The update freqence.
4047 */
4048int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4049{
4050 unsigned i;
4051#ifdef DEBUG_DARWIN_GIP
4052 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4053#else
4054 dprintf(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4055#endif
4056
4057 /*
4058 * Initialize the structure.
4059 */
4060 memset(pGip, 0, PAGE_SIZE);
4061 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4062 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4063 pGip->u32Mode = supdrvGipDeterminTscMode();
4064 pGip->u32UpdateHz = uUpdateHz;
4065 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4066 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4067
4068 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4069 {
4070 pGip->aCPUs[i].u32TransactionId = 2;
4071 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4072 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4073
4074 /*
4075 * We don't know the following values until we've executed updates.
4076 * So, we'll just insert very high values.
4077 */
4078 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4079 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4080 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4081 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4082 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4083 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4084 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4085 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4086 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4087 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4088 }
4089
4090 /*
4091 * Link it to the device extension.
4092 */
4093 pDevExt->pGip = pGip;
4094 pDevExt->HCPhysGip = HCPhys;
4095 pDevExt->cGipUsers = 0;
4096
4097 return 0;
4098}
4099
4100
4101/**
4102 * Determin the GIP TSC mode.
4103 *
4104 * @returns The most suitable TSC mode.
4105 */
4106static SUPGIPMODE supdrvGipDeterminTscMode(void)
4107{
4108#ifndef USE_NEW_OS_INTERFACE
4109 /*
4110 * The problem here is that AMD processors with power management features
4111 * may easily end up with different TSCs because the CPUs or even cores
4112 * on the same physical chip run at different frequencies to save power.
4113 *
4114 * It is rumoured that this will be corrected with Barcelona and it's
4115 * expected that this will be indicated by the TscInvariant bit in
4116 * cpuid(0x80000007). So, the "difficult" bit here is to correctly
4117 * identify the older CPUs which don't do different frequency and
4118 * can be relied upon to have somewhat uniform TSC between the cpus.
4119 */
4120 if (supdrvOSGetCPUCount() > 1)
4121 {
4122 uint32_t uEAX, uEBX, uECX, uEDX;
4123
4124 /* Permit user users override. */
4125 if (supdrvOSGetForcedAsyncTscMode())
4126 return SUPGIPMODE_ASYNC_TSC;
4127
4128 /* Check for "AuthenticAMD" */
4129 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4130 if (uEAX >= 1 && uEBX == 0x68747541 && uECX == 0x444d4163 && uEDX == 0x69746e65)
4131 {
4132 /* Check for APM support and that TscInvariant is cleared. */
4133 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4134 if (uEAX >= 0x80000007)
4135 {
4136 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4137 if ( !(uEDX & BIT(8))/* TscInvariant */
4138 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4139 return SUPGIPMODE_ASYNC_TSC;
4140 }
4141 }
4142 }
4143#endif
4144 return SUPGIPMODE_SYNC_TSC;
4145}
4146
4147
4148/**
4149 * Invalidates the GIP data upon termination.
4150 *
4151 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4152 */
4153void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4154{
4155 unsigned i;
4156 pGip->u32Magic = 0;
4157 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4158 {
4159 pGip->aCPUs[i].u64NanoTS = 0;
4160 pGip->aCPUs[i].u64TSC = 0;
4161 pGip->aCPUs[i].iTSCHistoryHead = 0;
4162 }
4163}
4164
4165
4166/**
4167 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4168 * updates all the per cpu data except the transaction id.
4169 *
4170 * @param pGip The GIP.
4171 * @param pGipCpu Pointer to the per cpu data.
4172 * @param u64NanoTS The current time stamp.
4173 */
4174static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4175{
4176 uint64_t u64TSC;
4177 uint64_t u64TSCDelta;
4178 uint32_t u32UpdateIntervalTSC;
4179 uint32_t u32UpdateIntervalTSCSlack;
4180 unsigned iTSCHistoryHead;
4181 uint64_t u64CpuHz;
4182
4183 /*
4184 * Update the NanoTS.
4185 */
4186 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4187
4188 /*
4189 * Calc TSC delta.
4190 */
4191 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4192 u64TSC = ASMReadTSC();
4193 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4194 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4195
4196 if (u64TSCDelta >> 32)
4197 {
4198 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4199 pGipCpu->cErrors++;
4200 }
4201
4202 /*
4203 * TSC History.
4204 */
4205 Assert(ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4206
4207 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4208 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4209 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4210
4211 /*
4212 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4213 */
4214 if (pGip->u32UpdateHz >= 1000)
4215 {
4216 uint32_t u32;
4217 u32 = pGipCpu->au32TSCHistory[0];
4218 u32 += pGipCpu->au32TSCHistory[1];
4219 u32 += pGipCpu->au32TSCHistory[2];
4220 u32 += pGipCpu->au32TSCHistory[3];
4221 u32 >>= 2;
4222 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4223 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4224 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4225 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4226 u32UpdateIntervalTSC >>= 2;
4227 u32UpdateIntervalTSC += u32;
4228 u32UpdateIntervalTSC >>= 1;
4229
4230 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4231 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4232 }
4233 else if (pGip->u32UpdateHz >= 90)
4234 {
4235 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4236 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4237 u32UpdateIntervalTSC >>= 1;
4238
4239 /* value choosen on a 2GHz thinkpad running windows */
4240 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4241 }
4242 else
4243 {
4244 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4245
4246 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4247 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4248 }
4249 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4250
4251 /*
4252 * CpuHz.
4253 */
4254 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4255 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4256}
4257
4258
4259/**
4260 * Updates the GIP.
4261 *
4262 * @param pGip Pointer to the GIP.
4263 * @param u64NanoTS The current nanosecond timesamp.
4264 */
4265void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4266{
4267 /*
4268 * Determin the relevant CPU data.
4269 */
4270 PSUPGIPCPU pGipCpu;
4271 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4272 pGipCpu = &pGip->aCPUs[0];
4273 else
4274 {
4275 unsigned iCpu = ASMGetApicId();
4276 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4277 return;
4278 pGipCpu = &pGip->aCPUs[iCpu];
4279 }
4280
4281 /*
4282 * Start update transaction.
4283 */
4284 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4285 {
4286 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4287 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4288 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4289 pGipCpu->cErrors++;
4290 return;
4291 }
4292
4293 /*
4294 * Recalc the update frequency every 0x800th time.
4295 */
4296 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4297 {
4298 if (pGip->u64NanoTSLastUpdateHz)
4299 {
4300#ifdef __AMD64__ /** @todo fix 64-bit div here to work on x86 linux. */
4301 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4302 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4303 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4304 {
4305 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4306 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4307 }
4308#endif
4309 }
4310 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4311 }
4312
4313 /*
4314 * Update the data.
4315 */
4316 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4317
4318 /*
4319 * Complete transaction.
4320 */
4321 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4322}
4323
4324
4325/**
4326 * Updates the per cpu GIP data for the calling cpu.
4327 *
4328 * @param pGip Pointer to the GIP.
4329 * @param u64NanoTS The current nanosecond timesamp.
4330 * @param iCpu The CPU index.
4331 */
4332void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4333{
4334 PSUPGIPCPU pGipCpu;
4335
4336 if (RT_LIKELY(iCpu <= RT_ELEMENTS(pGip->aCPUs)))
4337 {
4338 pGipCpu = &pGip->aCPUs[iCpu];
4339
4340 /*
4341 * Start update transaction.
4342 */
4343 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4344 {
4345 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4346 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4347 pGipCpu->cErrors++;
4348 return;
4349 }
4350
4351 /*
4352 * Update the data.
4353 */
4354 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4355
4356 /*
4357 * Complete transaction.
4358 */
4359 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4360 }
4361}
4362
4363
4364#ifndef DEBUG /** @todo change #ifndef DEBUG -> #ifdef LOG_ENABLED */
4365/**
4366 * Stub function for non-debug builds.
4367 */
4368RTDECL(PRTLOGGER) RTLogDefaultInstance(void)
4369{
4370 return NULL;
4371}
4372
4373RTDECL(PRTLOGGER) RTLogRelDefaultInstance(void)
4374{
4375 return NULL;
4376}
4377
4378/**
4379 * Stub function for non-debug builds.
4380 */
4381RTDECL(int) RTLogSetDefaultInstanceThread(PRTLOGGER pLogger, uintptr_t uKey)
4382{
4383 return 0;
4384}
4385
4386/**
4387 * Stub function for non-debug builds.
4388 */
4389RTDECL(void) RTLogLogger(PRTLOGGER pLogger, void *pvCallerRet, const char *pszFormat, ...)
4390{
4391}
4392
4393/**
4394 * Stub function for non-debug builds.
4395 */
4396RTDECL(void) RTLogLoggerEx(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, ...)
4397{
4398}
4399
4400/**
4401 * Stub function for non-debug builds.
4402 */
4403RTDECL(void) RTLogLoggerExV(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args)
4404{
4405}
4406#endif /* !DEBUG */
4407
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette