VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c@ 389

Last change on this file since 389 was 387, checked in by vboxsync, 18 years ago

Use RTR0ProcHandleSelf / RTProcSelf. Implemented clientDied() cleanup on darwin.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 136.4 KB
Line 
1/** @file
2 *
3 * VBox host drivers - Ring-0 support drivers - Shared code:
4 * Driver code for all host platforms
5 */
6
7/*
8 * Copyright (C) 2006 InnoTek Systemberatung GmbH
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License as published by the Free Software Foundation,
14 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
15 * distribution. VirtualBox OSE is distributed in the hope that it will
16 * be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * If you received this file as part of a commercial VirtualBox
19 * distribution, then only the terms of your commercial VirtualBox
20 * license agreement apply instead of the previous paragraph.
21 */
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#include "SUPDRV.h"
28#ifndef PAGE_SHIFT
29# include <iprt/param.h>
30#endif
31#include <iprt/alloc.h>
32#include <iprt/semaphore.h>
33#include <iprt/spinlock.h>
34#include <iprt/thread.h>
35#include <iprt/log.h>
36#ifdef VBOX_WITHOUT_IDT_PATCHING
37# include <VBox/vmm.h>
38#endif
39
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44/* from x86.h - clashes with linux thus this duplication */
45#undef X86_CR0_PG
46#define X86_CR0_PG BIT(31)
47#undef X86_CR0_PE
48#define X86_CR0_PE BIT(0)
49#undef X86_CPUID_AMD_FEATURE_EDX_NX
50#define X86_CPUID_AMD_FEATURE_EDX_NX BIT(20)
51#undef MSR_K6_EFER
52#define MSR_K6_EFER 0xc0000080
53#undef MSR_K6_EFER_NXE
54#define MSR_K6_EFER_NXE BIT(11)
55#undef MSR_K6_EFER_LMA
56#define MSR_K6_EFER_LMA BIT(10)
57#undef X86_CR4_PGE
58#define X86_CR4_PGE BIT(7)
59#undef X86_CR4_PAE
60#define X86_CR4_PAE BIT(5)
61#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
62#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE BIT(29)
63
64
65/** The frequency by which we recalculate the u32UpdateHz and
66 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
67#define GIP_UPDATEHZ_RECALC_FREQ 0x800
68
69
70/*******************************************************************************
71* Global Variables *
72*******************************************************************************/
73/**
74 * Array of the R0 SUP API.
75 */
76static SUPFUNC g_aFunctions[] =
77{
78 /* name function */
79 { "SUPR0ObjRegister", (void *)SUPR0ObjRegister },
80 { "SUPR0ObjAddRef", (void *)SUPR0ObjAddRef },
81 { "SUPR0ObjRelease", (void *)SUPR0ObjRelease },
82 { "SUPR0ObjVerifyAccess", (void *)SUPR0ObjVerifyAccess },
83 { "SUPR0LockMem", (void *)SUPR0LockMem },
84 { "SUPR0UnlockMem", (void *)SUPR0UnlockMem },
85 { "SUPR0ContAlloc", (void *)SUPR0ContAlloc },
86 { "SUPR0ContFree", (void *)SUPR0ContFree },
87 { "SUPR0MemAlloc", (void *)SUPR0MemAlloc },
88 { "SUPR0MemGetPhys", (void *)SUPR0MemGetPhys },
89 { "SUPR0MemFree", (void *)SUPR0MemFree },
90 { "SUPR0Printf", (void *)SUPR0Printf },
91 { "RTMemAlloc", (void *)RTMemAlloc },
92 { "RTMemAllocZ", (void *)RTMemAllocZ },
93 { "RTMemFree", (void *)RTMemFree },
94/* These doesn't work yet on linux - use fast mutexes!
95 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
96 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
97 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
98 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
99*/
100 { "RTSemFastMutexCreate", (void *)RTSemFastMutexCreate },
101 { "RTSemFastMutexDestroy", (void *)RTSemFastMutexDestroy },
102 { "RTSemFastMutexRequest", (void *)RTSemFastMutexRequest },
103 { "RTSemFastMutexRelease", (void *)RTSemFastMutexRelease },
104 { "RTSemEventCreate", (void *)RTSemEventCreate },
105 { "RTSemEventSignal", (void *)RTSemEventSignal },
106 { "RTSemEventWait", (void *)RTSemEventWait },
107 { "RTSemEventDestroy", (void *)RTSemEventDestroy },
108 { "RTSpinlockCreate", (void *)RTSpinlockCreate },
109 { "RTSpinlockDestroy", (void *)RTSpinlockDestroy },
110 { "RTSpinlockAcquire", (void *)RTSpinlockAcquire },
111 { "RTSpinlockRelease", (void *)RTSpinlockRelease },
112 { "RTSpinlockAcquireNoInts", (void *)RTSpinlockAcquireNoInts },
113 { "RTSpinlockReleaseNoInts", (void *)RTSpinlockReleaseNoInts },
114 { "RTThreadSelf", (void *)RTThreadSelf },
115 { "RTThreadSleep", (void *)RTThreadSleep },
116 { "RTThreadYield", (void *)RTThreadYield },
117 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
118 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
119 { "RTLogSetDefaultInstanceThread", (void *)RTLogSetDefaultInstanceThread },
120 { "RTLogLogger", (void *)RTLogLogger },
121 { "RTLogLoggerEx", (void *)RTLogLoggerEx },
122 { "RTLogLoggerExV", (void *)RTLogLoggerExV },
123 { "AssertMsg1", (void *)AssertMsg1 },
124 { "AssertMsg2", (void *)AssertMsg2 },
125};
126
127
128/*******************************************************************************
129* Internal Functions *
130*******************************************************************************/
131__BEGIN_DECLS
132static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
133static int supdrvMemRelease(PSUPDRVSESSION pSession, void *pv, SUPDRVMEMREFTYPE eType);
134#ifndef VBOX_WITHOUT_IDT_PATCHING
135static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL_IN pIn, PSUPIDTINSTALL_OUT pOut);
136static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
137static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession);
138static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch);
139static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry);
140#endif /* !VBOX_WITHOUT_IDT_PATCHING */
141static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN_IN pIn, PSUPLDROPEN_OUT pOut);
142static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD_IN pIn);
143static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE_IN pIn);
144static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL_IN pIn, PSUPLDRGETSYMBOL_OUT pOut);
145static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry);
146static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt);
147static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
148static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
149static int supdrvIOCtl_GetPagingMode(PSUPGETPAGINGMODE_OUT pOut);
150#ifdef USE_NEW_OS_INTERFACE
151static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
152static int supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
153static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser);
154#endif
155
156__END_DECLS
157
158
159/**
160 * Initializes the device extentsion structure.
161 *
162 * @returns 0 on success.
163 * @returns SUPDRV_ERR_ on failure.
164 * @param pDevExt The device extension to initialize.
165 */
166int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
167{
168 /*
169 * Initialize it.
170 */
171 int rc;
172 memset(pDevExt, 0, sizeof(*pDevExt));
173 rc = RTSpinlockCreate(&pDevExt->Spinlock);
174 if (!rc)
175 {
176 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
177 if (!rc)
178 {
179 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
180 if (!rc)
181 {
182#ifdef USE_NEW_OS_INTERFACE
183 rc = supdrvGipCreate(pDevExt);
184 if (RT_SUCCESS(rc))
185 {
186 pDevExt->u32Cookie = BIRD;
187 return 0;
188 }
189#else
190 pDevExt->u32Cookie = BIRD;
191 return 0;
192#endif
193 }
194 RTSemFastMutexDestroy(pDevExt->mtxLdr);
195 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
196 }
197 RTSpinlockDestroy(pDevExt->Spinlock);
198 pDevExt->Spinlock = NIL_RTSPINLOCK;
199 }
200 return rc;
201}
202
203/**
204 * Delete the device extension (e.g. cleanup members).
205 *
206 * @returns 0.
207 * @param pDevExt The device extension to delete.
208 */
209int VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
210{
211#ifndef VBOX_WITHOUT_IDT_PATCHING
212 PSUPDRVPATCH pPatch;
213#endif
214 PSUPDRVOBJ pObj;
215 PSUPDRVUSAGE pUsage;
216
217 /*
218 * Kill mutexes and spinlocks.
219 */
220 RTSemFastMutexDestroy(pDevExt->mtxGip);
221 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
222 RTSemFastMutexDestroy(pDevExt->mtxLdr);
223 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
224 RTSpinlockDestroy(pDevExt->Spinlock);
225 pDevExt->Spinlock = NIL_RTSPINLOCK;
226
227 /*
228 * Free lists.
229 */
230
231#ifndef VBOX_WITHOUT_IDT_PATCHING
232 /* patches */
233 /** @todo make sure we don't uninstall patches which has been patched by someone else. */
234 pPatch = pDevExt->pIdtPatchesFree;
235 pDevExt->pIdtPatchesFree = NULL;
236 while (pPatch)
237 {
238 void *pvFree = pPatch;
239 pPatch = pPatch->pNext;
240 RTMemExecFree(pvFree);
241 }
242#endif /* !VBOX_WITHOUT_IDT_PATCHING */
243
244 /* objects. */
245 pObj = pDevExt->pObjs;
246#if !defined(DEBUG_bird) || !defined(__LINUX__) /* breaks unloading, temporary, remove me! */
247 Assert(!pObj); /* (can trigger on forced unloads) */
248#endif
249 pDevExt->pObjs = NULL;
250 while (pObj)
251 {
252 void *pvFree = pObj;
253 pObj = pObj->pNext;
254 RTMemFree(pvFree);
255 }
256
257 /* usage records. */
258 pUsage = pDevExt->pUsageFree;
259 pDevExt->pUsageFree = NULL;
260 while (pUsage)
261 {
262 void *pvFree = pUsage;
263 pUsage = pUsage->pNext;
264 RTMemFree(pvFree);
265 }
266
267#ifdef USE_NEW_OS_INTERFACE
268 /* kill the GIP */
269 supdrvGipDestroy(pDevExt);
270#endif
271
272 return 0;
273}
274
275
276/**
277 * Create session.
278 *
279 * @returns 0 on success.
280 * @returns SUPDRV_ERR_ on failure.
281 * @param pDevExt Device extension.
282 * @param ppSession Where to store the pointer to the session data.
283 */
284int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION *ppSession)
285{
286 /*
287 * Allocate memory for the session data.
288 */
289 int rc = SUPDRV_ERR_NO_MEMORY;
290 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
291 if (pSession)
292 {
293 /* Initialize session data. */
294 rc = RTSpinlockCreate(&pSession->Spinlock);
295 if (!rc)
296 {
297 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
298 pSession->pDevExt = pDevExt;
299 pSession->u32Cookie = BIRD_INV;
300 //pSession->pLdrUsage = NULL;
301 //pSession->pPatchUsage = NULL;
302 //pSession->pUsage = NULL;
303 //pSession->pGip = NULL;
304 //pSession->fGipReferenced = false;
305 //pSession->Bundle.cUsed = 0
306
307 dprintf(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
308 return 0;
309 }
310
311 RTMemFree(pSession);
312 *ppSession = NULL;
313 }
314
315 dprintf(("Failed to create spinlock, rc=%d!\n", rc));
316 return rc;
317}
318
319
320/**
321 * Shared code for cleaning up a session.
322 *
323 * @param pDevExt Device extension.
324 * @param pSession Session data.
325 * This data will be freed by this routine.
326 */
327void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
328{
329 /*
330 * Cleanup the session first.
331 */
332 supdrvCleanupSession(pDevExt, pSession);
333
334 /*
335 * Free the rest of the session stuff.
336 */
337 RTSpinlockDestroy(pSession->Spinlock);
338 pSession->Spinlock = NIL_RTSPINLOCK;
339 pSession->pDevExt = NULL;
340 RTMemFree(pSession);
341 dprintf2(("supdrvCloseSession: returns\n"));
342}
343
344
345/**
346 * Shared code for cleaning up a session (but not quite freeing it).
347 *
348 * This is primarily intended for MAC OS X where we have to clean up the memory
349 * stuff before the file handle is closed.
350 *
351 * @param pDevExt Device extension.
352 * @param pSession Session data.
353 * This data will be freed by this routine.
354 */
355void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
356{
357 PSUPDRVBUNDLE pBundle;
358 dprintf(("supdrvCleanupSession: pSession=%p\n", pSession));
359
360 /*
361 * Remove logger instances related to this session.
362 * (This assumes the dprintf and dprintf2 macros doesn't use the normal logging.)
363 */
364 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
365
366#ifndef VBOX_WITHOUT_IDT_PATCHING
367 /*
368 * Uninstall any IDT patches installed for this session.
369 */
370 supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
371#endif
372
373 /*
374 * Release object references made in this session.
375 * In theory there should be noone racing us in this session.
376 */
377 dprintf2(("release objects - start\n"));
378 if (pSession->pUsage)
379 {
380 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
381 PSUPDRVUSAGE pUsage;
382 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
383
384 while ((pUsage = pSession->pUsage) != NULL)
385 {
386 PSUPDRVOBJ pObj = pUsage->pObj;
387 pSession->pUsage = pUsage->pNext;
388
389 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
390 if (pUsage->cUsage < pObj->cUsage)
391 {
392 pObj->cUsage -= pUsage->cUsage;
393 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
394 }
395 else
396 {
397 /* Destroy the object and free the record. */
398 if (pDevExt->pObjs == pObj)
399 pDevExt->pObjs = pObj->pNext;
400 else
401 {
402 PSUPDRVOBJ pObjPrev;
403 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
404 if (pObjPrev->pNext == pObj)
405 {
406 pObjPrev->pNext = pObj->pNext;
407 break;
408 }
409 Assert(pObjPrev);
410 }
411 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
412
413 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
414 RTMemFree(pObj);
415 }
416
417 /* free it and continue. */
418 RTMemFree(pUsage);
419
420 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
421 }
422
423 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
424 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
425 }
426 dprintf2(("release objects - done\n"));
427
428 /*
429 * Release memory allocated in the session.
430 *
431 * We do not serialize this as we assume that the application will
432 * not allocated memory while closing the file handle object.
433 */
434 dprintf2(("freeing memory:\n"));
435 pBundle = &pSession->Bundle;
436 while (pBundle)
437 {
438 PSUPDRVBUNDLE pToFree;
439 unsigned i;
440
441 /*
442 * Check and unlock all entries in the bundle.
443 */
444 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
445 {
446#ifdef USE_NEW_OS_INTERFACE
447 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
448 {
449 int rc;
450 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
451 {
452 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
453 AssertRC(rc); /** @todo figure out how to handle this. */
454 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
455 }
456 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, false);
457 AssertRC(rc); /** @todo figure out how to handle this. */
458 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
459 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
460 }
461
462#else /* !USE_NEW_OS_INTERFACE */
463 if ( pBundle->aMem[i].pvR0
464 || pBundle->aMem[i].pvR3)
465 {
466 dprintf2(("eType=%d pvR0=%p pvR3=%p cb=%d\n", pBundle->aMem[i].eType,
467 pBundle->aMem[i].pvR0, pBundle->aMem[i].pvR3, pBundle->aMem[i].cb));
468 switch (pBundle->aMem[i].eType)
469 {
470 case MEMREF_TYPE_LOCKED:
471 supdrvOSUnlockMemOne(&pBundle->aMem[i]);
472 break;
473 case MEMREF_TYPE_CONT:
474 supdrvOSContFreeOne(&pBundle->aMem[i]);
475 break;
476 case MEMREF_TYPE_LOW:
477 supdrvOSLowFreeOne(&pBundle->aMem[i]);
478 break;
479 case MEMREF_TYPE_MEM:
480 supdrvOSMemFreeOne(&pBundle->aMem[i]);
481 break;
482 default:
483 break;
484 }
485 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
486 }
487#endif /* !USE_NEW_OS_INTERFACE */
488 }
489
490 /*
491 * Advance and free previous bundle.
492 */
493 pToFree = pBundle;
494 pBundle = pBundle->pNext;
495
496 pToFree->pNext = NULL;
497 pToFree->cUsed = 0;
498 if (pToFree != &pSession->Bundle)
499 RTMemFree(pToFree);
500 }
501 dprintf2(("freeing memory - done\n"));
502
503 /*
504 * Loaded images needs to be dereferenced and possibly freed up.
505 */
506 RTSemFastMutexRequest(pDevExt->mtxLdr);
507 dprintf2(("freeing images:\n"));
508 if (pSession->pLdrUsage)
509 {
510 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
511 pSession->pLdrUsage = NULL;
512 while (pUsage)
513 {
514 void *pvFree = pUsage;
515 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
516 if (pImage->cUsage > pUsage->cUsage)
517 pImage->cUsage -= pUsage->cUsage;
518 else
519 supdrvLdrFree(pDevExt, pImage);
520 pUsage->pImage = NULL;
521 pUsage = pUsage->pNext;
522 RTMemFree(pvFree);
523 }
524 }
525 RTSemFastMutexRelease(pDevExt->mtxLdr);
526 dprintf2(("freeing images - done\n"));
527
528 /*
529 * Unmap the GIP.
530 */
531 dprintf2(("umapping GIP:\n"));
532#ifdef USE_NEW_OS_INTERFACE
533 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
534#else
535 if (pSession->pGip)
536#endif
537 {
538 SUPR0GipUnmap(pSession);
539#ifndef USE_NEW_OS_INTERFACE
540 pSession->pGip = NULL;
541#endif
542 pSession->fGipReferenced = 0;
543 }
544 dprintf2(("umapping GIP - done\n"));
545}
546
547
548#ifdef VBOX_WITHOUT_IDT_PATCHING
549/**
550 * Fast path I/O Control worker.
551 *
552 * @returns 0 on success.
553 * @returns One of the SUPDRV_ERR_* on failure.
554 * @param uIOCtl Function number.
555 * @param pDevExt Device extention.
556 * @param pSession Session data.
557 */
558int VBOXCALL supdrvIOCtlFast(unsigned uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
559{
560 if ( !pSession->pVM
561 || pDevExt->pfnVMMR0Entry)
562 return VERR_INTERNAL_ERROR;
563 switch (uIOCtl)
564 {
565 case SUP_IOCTL_FAST_DO_RAW_RUN:
566 return pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_RUN_GC, NULL);
567 case SUP_IOCTL_FAST_DO_HWACC_RUN:
568 return pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_HWACC_RUN_GUEST, NULL);
569 default:
570 return VERR_INTERNAL_ERROR;
571 }
572}
573#endif /* VBOX_WITHOUT_IDT_PATCHING */
574
575
576/**
577 * I/O Control worker.
578 *
579 * @returns 0 on success.
580 * @returns One of the SUPDRV_ERR_* on failure.
581 * @param uIOCtl Function number.
582 * @param pDevExt Device extention.
583 * @param pSession Session data.
584 * @param pvIn Input data.
585 * @param cbIn Size of input data.
586 * @param pvOut Output data.
587 * IMPORTANT! This buffer may be shared with the input
588 * data, thus no writing before done reading
589 * input data!!!
590 * @param cbOut Size of output data.
591 * @param pcbReturned Size of the returned data.
592 */
593int VBOXCALL supdrvIOCtl(unsigned int uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession,
594 void *pvIn, unsigned cbIn, void *pvOut, unsigned cbOut, unsigned *pcbReturned)
595{
596 *pcbReturned = 0;
597 switch (uIOCtl)
598 {
599 case SUP_IOCTL_COOKIE:
600 {
601 PSUPCOOKIE_IN pIn = (PSUPCOOKIE_IN)pvIn;
602 PSUPCOOKIE_OUT pOut = (PSUPCOOKIE_OUT)pvOut;
603
604 /*
605 * Validate.
606 */
607 if ( cbIn != sizeof(*pIn)
608 || cbOut != sizeof(*pOut))
609 {
610 dprintf(("SUP_IOCTL_COOKIE: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
611 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
612 return SUPDRV_ERR_INVALID_PARAM;
613 }
614 if (strncmp(pIn->szMagic, SUPCOOKIE_MAGIC, sizeof(pIn->szMagic)))
615 {
616 dprintf(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pIn->szMagic));
617 return SUPDRV_ERR_INVALID_MAGIC;
618 }
619 if (pIn->u32Version != SUPDRVIOC_VERSION)
620 {
621 dprintf(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Current: %#x\n", pIn->u32Version, SUPDRVIOC_VERSION));
622 return SUPDRV_ERR_VERSION_MISMATCH;
623 }
624
625 /*
626 * Fill in return data and be gone.
627 */
628 /** @todo secure cookie negotiation? */
629 pOut->u32Cookie = pDevExt->u32Cookie;
630 pOut->u32SessionCookie = pSession->u32Cookie;
631 pOut->u32Version = SUPDRVIOC_VERSION;
632 pOut->pSession = pSession;
633 pOut->cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
634 *pcbReturned = sizeof(*pOut);
635 return 0;
636 }
637
638
639 case SUP_IOCTL_QUERY_FUNCS:
640 {
641 unsigned cFunctions;
642 PSUPQUERYFUNCS_IN pIn = (PSUPQUERYFUNCS_IN)pvIn;
643 PSUPQUERYFUNCS_OUT pOut = (PSUPQUERYFUNCS_OUT)pvOut;
644
645 /*
646 * Validate.
647 */
648 if ( cbIn != sizeof(*pIn)
649 || cbOut < sizeof(*pOut))
650 {
651 dprintf(("SUP_IOCTL_QUERY_FUNCS: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
652 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
653 return SUPDRV_ERR_INVALID_PARAM;
654 }
655 if ( pIn->u32Cookie != pDevExt->u32Cookie
656 || pIn->u32SessionCookie != pSession->u32Cookie )
657 {
658 dprintf(("SUP_IOCTL_QUERY_FUNCS: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
659 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
660 return SUPDRV_ERR_INVALID_MAGIC;
661 }
662
663 /*
664 * Copy the functions.
665 */
666 cFunctions = (cbOut - RT_OFFSETOF(SUPQUERYFUNCS_OUT, aFunctions)) / sizeof(pOut->aFunctions[0]);
667 cFunctions = RT_MIN(cFunctions, ELEMENTS(g_aFunctions));
668 AssertMsg(cFunctions == ELEMENTS(g_aFunctions),
669 ("Why aren't R3 querying all the functions!?! cFunctions=%d while there are %d available\n",
670 cFunctions, ELEMENTS(g_aFunctions)));
671 pOut->cFunctions = cFunctions;
672 memcpy(&pOut->aFunctions[0], g_aFunctions, sizeof(pOut->aFunctions[0]) * cFunctions);
673 *pcbReturned = RT_OFFSETOF(SUPQUERYFUNCS_OUT, aFunctions[cFunctions]);
674 return 0;
675 }
676
677
678 case SUP_IOCTL_IDT_INSTALL:
679 {
680 PSUPIDTINSTALL_IN pIn = (PSUPIDTINSTALL_IN)pvIn;
681 PSUPIDTINSTALL_OUT pOut = (PSUPIDTINSTALL_OUT)pvOut;
682
683 /*
684 * Validate.
685 */
686 if ( cbIn != sizeof(*pIn)
687 || cbOut != sizeof(*pOut))
688 {
689 dprintf(("SUP_IOCTL_INSTALL: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
690 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
691 return SUPDRV_ERR_INVALID_PARAM;
692 }
693 if ( pIn->u32Cookie != pDevExt->u32Cookie
694 || pIn->u32SessionCookie != pSession->u32Cookie )
695 {
696 dprintf(("SUP_IOCTL_INSTALL: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
697 pIn->u32Cookie, pDevExt->u32Cookie,
698 pIn->u32SessionCookie, pSession->u32Cookie));
699 return SUPDRV_ERR_INVALID_MAGIC;
700 }
701
702 *pcbReturned = sizeof(*pOut);
703#ifndef VBOX_WITHOUT_IDT_PATCHING
704 return supdrvIOCtl_IdtInstall(pDevExt, pSession, pIn, pOut);
705#else
706 pOut->u8Idt = 3;
707 return 0;
708#endif
709 }
710
711
712 case SUP_IOCTL_IDT_REMOVE:
713 {
714 PSUPIDTREMOVE_IN pIn = (PSUPIDTREMOVE_IN)pvIn;
715
716 /*
717 * Validate.
718 */
719 if ( cbIn != sizeof(*pIn)
720 || cbOut != 0)
721 {
722 dprintf(("SUP_IOCTL_REMOVE: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
723 cbIn, sizeof(*pIn), cbOut, 0));
724 return SUPDRV_ERR_INVALID_PARAM;
725 }
726 if ( pIn->u32Cookie != pDevExt->u32Cookie
727 || pIn->u32SessionCookie != pSession->u32Cookie )
728 {
729 dprintf(("SUP_IOCTL_REMOVE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
730 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
731 return SUPDRV_ERR_INVALID_MAGIC;
732 }
733
734#ifndef VBOX_WITHOUT_IDT_PATCHING
735 return supdrvIOCtl_IdtRemoveAll(pDevExt, pSession);
736#else
737 return 0;
738#endif
739 }
740
741
742 case SUP_IOCTL_PINPAGES:
743 {
744 int rc;
745 PSUPPINPAGES_IN pIn = (PSUPPINPAGES_IN)pvIn;
746 PSUPPINPAGES_OUT pOut = (PSUPPINPAGES_OUT)pvOut;
747
748 /*
749 * Validate.
750 */
751 if ( cbIn != sizeof(*pIn)
752 || cbOut < sizeof(*pOut))
753 {
754 dprintf(("SUP_IOCTL_PINPAGES: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
755 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
756 return SUPDRV_ERR_INVALID_PARAM;
757 }
758 if ( pIn->u32Cookie != pDevExt->u32Cookie
759 || pIn->u32SessionCookie != pSession->u32Cookie )
760 {
761 dprintf(("SUP_IOCTL_PINPAGES: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
762 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
763 return SUPDRV_ERR_INVALID_MAGIC;
764 }
765 if (pIn->cb <= 0 || !pIn->pv)
766 {
767 dprintf(("SUP_IOCTL_PINPAGES: Illegal request %p %d\n", pIn->pv, pIn->cb));
768 return SUPDRV_ERR_INVALID_PARAM;
769 }
770 if ((unsigned)RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cb >> PAGE_SHIFT]) > cbOut)
771 {
772 dprintf(("SUP_IOCTL_PINPAGES: Output buffer is too small! %d required %d passed in.\n",
773 RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cb >> PAGE_SHIFT]), cbOut));
774 return SUPDRV_ERR_INVALID_PARAM;
775 }
776
777 /*
778 * Execute.
779 */
780 *pcbReturned = RT_OFFSETOF(SUPPINPAGES_OUT, aPages[pIn->cb >> PAGE_SHIFT]);
781 rc = SUPR0LockMem(pSession, pIn->pv, pIn->cb, &pOut->aPages[0]);
782 if (rc)
783 *pcbReturned = 0;
784 return rc;
785 }
786
787
788 case SUP_IOCTL_UNPINPAGES:
789 {
790 PSUPUNPINPAGES_IN pIn = (PSUPUNPINPAGES_IN)pvIn;
791
792 /*
793 * Validate.
794 */
795 if ( cbIn != sizeof(*pIn)
796 || cbOut != 0)
797 {
798 dprintf(("SUP_IOCTL_UNPINPAGES: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
799 cbIn, sizeof(*pIn), cbOut, 0));
800 return SUPDRV_ERR_INVALID_PARAM;
801 }
802 if ( pIn->u32Cookie != pDevExt->u32Cookie
803 || pIn->u32SessionCookie != pSession->u32Cookie)
804 {
805 dprintf(("SUP_IOCTL_UNPINPAGES: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
806 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
807 return SUPDRV_ERR_INVALID_MAGIC;
808 }
809
810 /*
811 * Execute.
812 */
813 return SUPR0UnlockMem(pSession, pIn->pv);
814 }
815
816 case SUP_IOCTL_CONT_ALLOC:
817 {
818 int rc;
819 PSUPCONTALLOC_IN pIn = (PSUPCONTALLOC_IN)pvIn;
820 PSUPCONTALLOC_OUT pOut = (PSUPCONTALLOC_OUT)pvOut;
821
822 /*
823 * Validate.
824 */
825 if ( cbIn != sizeof(*pIn)
826 || cbOut < sizeof(*pOut))
827 {
828 dprintf(("SUP_IOCTL_CONT_ALLOC: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
829 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
830 return SUPDRV_ERR_INVALID_PARAM;
831 }
832 if ( pIn->u32Cookie != pDevExt->u32Cookie
833 || pIn->u32SessionCookie != pSession->u32Cookie )
834 {
835 dprintf(("SUP_IOCTL_CONT_ALLOC: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
836 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
837 return SUPDRV_ERR_INVALID_MAGIC;
838 }
839
840 /*
841 * Execute.
842 */
843 rc = SUPR0ContAlloc(pSession, pIn->cb, &pOut->pvR0, &pOut->pvR3, &pOut->HCPhys);
844 if (!rc)
845 *pcbReturned = sizeof(*pOut);
846 return rc;
847 }
848
849
850 case SUP_IOCTL_CONT_FREE:
851 {
852 PSUPCONTFREE_IN pIn = (PSUPCONTFREE_IN)pvIn;
853
854 /*
855 * Validate.
856 */
857 if ( cbIn != sizeof(*pIn)
858 || cbOut != 0)
859 {
860 dprintf(("SUP_IOCTL_CONT_FREE: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
861 cbIn, sizeof(*pIn), cbOut, 0));
862 return SUPDRV_ERR_INVALID_PARAM;
863 }
864 if ( pIn->u32Cookie != pDevExt->u32Cookie
865 || pIn->u32SessionCookie != pSession->u32Cookie)
866 {
867 dprintf(("SUP_IOCTL_CONT_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
868 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
869 return SUPDRV_ERR_INVALID_MAGIC;
870 }
871
872 /*
873 * Execute.
874 */
875 return SUPR0ContFree(pSession, pIn->pv);
876 }
877
878
879 case SUP_IOCTL_LDR_OPEN:
880 {
881 PSUPLDROPEN_IN pIn = (PSUPLDROPEN_IN)pvIn;
882 PSUPLDROPEN_OUT pOut = (PSUPLDROPEN_OUT)pvOut;
883
884 /*
885 * Validate.
886 */
887 if ( cbIn != sizeof(*pIn)
888 || cbOut != sizeof(*pOut))
889 {
890 dprintf(("SUP_IOCTL_LDR_OPEN: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
891 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
892 return SUPDRV_ERR_INVALID_PARAM;
893 }
894 if ( pIn->u32Cookie != pDevExt->u32Cookie
895 || pIn->u32SessionCookie != pSession->u32Cookie)
896 {
897 dprintf(("SUP_IOCTL_LDR_OPEN: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
898 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
899 return SUPDRV_ERR_INVALID_MAGIC;
900 }
901 if ( pIn->cbImage <= 0
902 || pIn->cbImage >= 16*1024*1024 /*16MB*/)
903 {
904 dprintf(("SUP_IOCTL_LDR_OPEN: Invalid size %d. (max is 16MB)\n", pIn->cbImage));
905 return SUPDRV_ERR_INVALID_PARAM;
906 }
907 if (!memchr(pIn->szName, '\0', sizeof(pIn->szName)))
908 {
909 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: The image name isn't terminated!\n"));
910 return SUPDRV_ERR_INVALID_PARAM;
911 }
912 if (!pIn->szName[0])
913 {
914 dprintf(("SUP_IOCTL_LDR_OPEN: The image name is too short\n"));
915 return SUPDRV_ERR_INVALID_PARAM;
916 }
917 if (strpbrk(pIn->szName, ";:()[]{}/\\|&*%#@!~`\"'"))
918 {
919 dprintf(("SUP_IOCTL_LDR_OPEN: The name is invalid '%s'\n", pIn->szName));
920 return SUPDRV_ERR_INVALID_PARAM;
921 }
922
923 *pcbReturned = sizeof(*pOut);
924 return supdrvIOCtl_LdrOpen(pDevExt, pSession, pIn, pOut);
925 }
926
927
928 case SUP_IOCTL_LDR_LOAD:
929 {
930 PSUPLDRLOAD_IN pIn = (PSUPLDRLOAD_IN)pvIn;
931
932 /*
933 * Validate.
934 */
935 if ( cbIn <= sizeof(*pIn)
936 || cbOut != 0)
937 {
938 dprintf(("SUP_IOCTL_LDR_LOAD: Invalid input/output sizes. cbIn=%d expected greater than %d. cbOut=%d expected %d.\n",
939 cbIn, sizeof(*pIn), cbOut, 0));
940 return SUPDRV_ERR_INVALID_PARAM;
941 }
942 if ( pIn->u32Cookie != pDevExt->u32Cookie
943 || pIn->u32SessionCookie != pSession->u32Cookie)
944 {
945 dprintf(("SUP_IOCTL_LDR_LOAD: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
946 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
947 return SUPDRV_ERR_INVALID_MAGIC;
948 }
949 if ((unsigned)RT_OFFSETOF(SUPLDRLOAD_IN, achImage[pIn->cbImage]) > cbIn)
950 {
951 dprintf(("SUP_IOCTL_LDR_LOAD: Invalid size %d. InputBufferLength=%d\n",
952 pIn->cbImage, cbIn));
953 return SUPDRV_ERR_INVALID_PARAM;
954 }
955 if (pIn->cSymbols > 16384)
956 {
957 dprintf(("SUP_IOCTL_LDR_LOAD: Too many symbols. cSymbols=%u max=16384\n", pIn->cSymbols));
958 return SUPDRV_ERR_INVALID_PARAM;
959 }
960 if ( pIn->cSymbols
961 && ( pIn->offSymbols >= pIn->cbImage
962 || pIn->offSymbols + pIn->cSymbols * sizeof(SUPLDRSYM) > pIn->cbImage)
963 )
964 {
965 dprintf(("SUP_IOCTL_LDR_LOAD: symbol table is outside the image bits! offSymbols=%u cSymbols=%d cbImage=%d\n",
966 pIn->offSymbols, pIn->cSymbols, pIn->cbImage));
967 return SUPDRV_ERR_INVALID_PARAM;
968 }
969 if ( pIn->cbStrTab
970 && ( pIn->offStrTab >= pIn->cbImage
971 || pIn->offStrTab + pIn->cbStrTab > pIn->cbImage
972 || pIn->offStrTab + pIn->cbStrTab < pIn->offStrTab)
973 )
974 {
975 dprintf(("SUP_IOCTL_LDR_LOAD: string table is outside the image bits! offStrTab=%u cbStrTab=%d cbImage=%d\n",
976 pIn->offStrTab, pIn->cbStrTab, pIn->cbImage));
977 return SUPDRV_ERR_INVALID_PARAM;
978 }
979
980 if (pIn->cSymbols)
981 {
982 uint32_t i;
983 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pIn->achImage[pIn->offSymbols];
984 for (i = 0; i < pIn->cSymbols; i++)
985 {
986 if (paSyms[i].offSymbol >= pIn->cbImage)
987 {
988 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an invalid symbol offset: %#x (max=%#x)\n",
989 i, paSyms[i].offSymbol, pIn->cbImage));
990 return SUPDRV_ERR_INVALID_PARAM;
991 }
992 if (paSyms[i].offName >= pIn->cbStrTab)
993 {
994 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an invalid name offset: %#x (max=%#x)\n",
995 i, paSyms[i].offName, pIn->cbStrTab));
996 return SUPDRV_ERR_INVALID_PARAM;
997 }
998 if (!memchr(&pIn->achImage[pIn->offStrTab + paSyms[i].offName], '\0', pIn->cbStrTab - paSyms[i].offName))
999 {
1000 dprintf(("SUP_IOCTL_LDR_LOAD: symbol i=%d has an unterminated name! offName=%#x (max=%#x)\n",
1001 i, paSyms[i].offName, pIn->cbStrTab));
1002 return SUPDRV_ERR_INVALID_PARAM;
1003 }
1004 }
1005 }
1006
1007 return supdrvIOCtl_LdrLoad(pDevExt, pSession, pIn);
1008 }
1009
1010
1011 case SUP_IOCTL_LDR_FREE:
1012 {
1013 PSUPLDRFREE_IN pIn = (PSUPLDRFREE_IN)pvIn;
1014
1015 /*
1016 * Validate.
1017 */
1018 if ( cbIn != sizeof(*pIn)
1019 || cbOut != 0)
1020 {
1021 dprintf(("SUP_IOCTL_LDR_FREE: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1022 cbIn, sizeof(*pIn), cbOut, 0));
1023 return SUPDRV_ERR_INVALID_PARAM;
1024 }
1025 if ( pIn->u32Cookie != pDevExt->u32Cookie
1026 || pIn->u32SessionCookie != pSession->u32Cookie)
1027 {
1028 dprintf(("SUP_IOCTL_LDR_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1029 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1030 return SUPDRV_ERR_INVALID_MAGIC;
1031 }
1032
1033 return supdrvIOCtl_LdrFree(pDevExt, pSession, pIn);
1034 }
1035
1036
1037 case SUP_IOCTL_LDR_GET_SYMBOL:
1038 {
1039 PSUPLDRGETSYMBOL_IN pIn = (PSUPLDRGETSYMBOL_IN)pvIn;
1040 PSUPLDRGETSYMBOL_OUT pOut = (PSUPLDRGETSYMBOL_OUT)pvOut;
1041 char *pszEnd;
1042
1043 /*
1044 * Validate.
1045 */
1046 if ( cbIn < (unsigned)RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol[2])
1047 || cbOut != sizeof(*pOut))
1048 {
1049 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: Invalid input/output sizes. cbIn=%d expected >=%d. cbOut=%d expected at%d.\n",
1050 cbIn, RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol[2]), cbOut, 0));
1051 return SUPDRV_ERR_INVALID_PARAM;
1052 }
1053 if ( pIn->u32Cookie != pDevExt->u32Cookie
1054 || pIn->u32SessionCookie != pSession->u32Cookie)
1055 {
1056 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1057 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1058 return SUPDRV_ERR_INVALID_MAGIC;
1059 }
1060 pszEnd = memchr(pIn->szSymbol, '\0', cbIn - RT_OFFSETOF(SUPLDRGETSYMBOL_IN, szSymbol));
1061 if (!pszEnd)
1062 {
1063 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: The symbol name isn't terminated!\n"));
1064 return SUPDRV_ERR_INVALID_PARAM;
1065 }
1066 if (pszEnd - &pIn->szSymbol[0] >= 1024)
1067 {
1068 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: The symbol name too long (%d chars, max is %d)!\n",
1069 pszEnd - &pIn->szSymbol[0], 1024));
1070 return SUPDRV_ERR_INVALID_PARAM;
1071 }
1072
1073 pOut->pvSymbol = NULL;
1074 *pcbReturned = sizeof(*pOut);
1075 return supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pIn, pOut);
1076 }
1077
1078
1079 /** @todo this interface needs re-doing, we're accessing Ring-3 buffers directly here! */
1080 case SUP_IOCTL_CALL_VMMR0:
1081 {
1082 PSUPCALLVMMR0_IN pIn = (PSUPCALLVMMR0_IN)pvIn;
1083 PSUPCALLVMMR0_OUT pOut = (PSUPCALLVMMR0_OUT)pvOut;
1084
1085 /*
1086 * Validate.
1087 */
1088 if ( cbIn != sizeof(*pIn)
1089 || cbOut != sizeof(*pOut))
1090 {
1091 dprintf(("SUP_IOCTL_CALL_VMMR0: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1092 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
1093 return SUPDRV_ERR_INVALID_PARAM;
1094 }
1095 if ( pIn->u32Cookie != pDevExt->u32Cookie
1096 || pIn->u32SessionCookie != pSession->u32Cookie )
1097 {
1098 dprintf(("SUP_IOCTL_CALL_VMMR0: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1099 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1100 return SUPDRV_ERR_INVALID_MAGIC;
1101 }
1102
1103 /*
1104 * Do we have an entrypoint?
1105 */
1106 if (!pDevExt->pfnVMMR0Entry)
1107 return SUPDRV_ERR_GENERAL_FAILURE;
1108
1109 /*
1110 * Execute.
1111 */
1112 pOut->rc = pDevExt->pfnVMMR0Entry(pIn->pVM, pIn->uOperation, pIn->pvArg);
1113 *pcbReturned = sizeof(*pOut);
1114 return 0;
1115 }
1116
1117
1118 case SUP_IOCTL_GET_PAGING_MODE:
1119 {
1120 int rc;
1121 PSUPGETPAGINGMODE_IN pIn = (PSUPGETPAGINGMODE_IN)pvIn;
1122 PSUPGETPAGINGMODE_OUT pOut = (PSUPGETPAGINGMODE_OUT)pvOut;
1123
1124 /*
1125 * Validate.
1126 */
1127 if ( cbIn != sizeof(*pIn)
1128 || cbOut != sizeof(*pOut))
1129 {
1130 dprintf(("SUP_IOCTL_GET_PAGING_MODE: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1131 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
1132 return SUPDRV_ERR_INVALID_PARAM;
1133 }
1134 if ( pIn->u32Cookie != pDevExt->u32Cookie
1135 || pIn->u32SessionCookie != pSession->u32Cookie )
1136 {
1137 dprintf(("SUP_IOCTL_GET_PAGING_MODE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1138 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1139 return SUPDRV_ERR_INVALID_MAGIC;
1140 }
1141
1142 /*
1143 * Execute.
1144 */
1145 *pcbReturned = sizeof(*pOut);
1146 rc = supdrvIOCtl_GetPagingMode(pOut);
1147 if (rc)
1148 *pcbReturned = 0;
1149 return rc;
1150 }
1151
1152
1153 case SUP_IOCTL_LOW_ALLOC:
1154 {
1155 int rc;
1156 PSUPLOWALLOC_IN pIn = (PSUPLOWALLOC_IN)pvIn;
1157 PSUPLOWALLOC_OUT pOut = (PSUPLOWALLOC_OUT)pvOut;
1158
1159 /*
1160 * Validate.
1161 */
1162 if ( cbIn != sizeof(*pIn)
1163 || cbOut < sizeof(*pOut))
1164 {
1165 dprintf(("SUP_IOCTL_LOW_ALLOC: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1166 cbIn, sizeof(*pIn), cbOut, sizeof(*pOut)));
1167 return SUPDRV_ERR_INVALID_PARAM;
1168 }
1169 if ( pIn->u32Cookie != pDevExt->u32Cookie
1170 || pIn->u32SessionCookie != pSession->u32Cookie )
1171 {
1172 dprintf(("SUP_IOCTL_LOW_ALLOC: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1173 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1174 return SUPDRV_ERR_INVALID_MAGIC;
1175 }
1176 if ((unsigned)RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]) > cbOut)
1177 {
1178 dprintf(("SUP_IOCTL_LOW_ALLOC: Output buffer is too small! %d required %d passed in.\n",
1179 RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]), cbOut));
1180 return SUPDRV_ERR_INVALID_PARAM;
1181 }
1182
1183 /*
1184 * Execute.
1185 */
1186 *pcbReturned = RT_OFFSETOF(SUPLOWALLOC_OUT, aPages[pIn->cPages]);
1187 rc = SUPR0LowAlloc(pSession, pIn->cPages, &pOut->pvVirt, &pOut->aPages[0]);
1188 if (rc)
1189 *pcbReturned = 0;
1190 return rc;
1191 }
1192
1193
1194 case SUP_IOCTL_LOW_FREE:
1195 {
1196 PSUPLOWFREE_IN pIn = (PSUPLOWFREE_IN)pvIn;
1197
1198 /*
1199 * Validate.
1200 */
1201 if ( cbIn != sizeof(*pIn)
1202 || cbOut != 0)
1203 {
1204 dprintf(("SUP_IOCTL_LOW_FREE: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1205 cbIn, sizeof(*pIn), cbOut, 0));
1206 return SUPDRV_ERR_INVALID_PARAM;
1207 }
1208 if ( pIn->u32Cookie != pDevExt->u32Cookie
1209 || pIn->u32SessionCookie != pSession->u32Cookie)
1210 {
1211 dprintf(("SUP_IOCTL_LOW_FREE: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1212 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1213 return SUPDRV_ERR_INVALID_MAGIC;
1214 }
1215
1216 /*
1217 * Execute.
1218 */
1219 return SUPR0LowFree(pSession, pIn->pv);
1220 }
1221
1222
1223 case SUP_IOCTL_GIP_MAP:
1224 {
1225 int rc;
1226 PSUPGIPMAP_IN pIn = (PSUPGIPMAP_IN)pvIn;
1227 PSUPGIPMAP_OUT pOut = (PSUPGIPMAP_OUT)pvOut;
1228
1229 /*
1230 * Validate.
1231 */
1232 if ( cbIn != sizeof(*pIn)
1233 || cbOut != sizeof(*pOut))
1234 {
1235 dprintf(("SUP_IOCTL_GIP_MAP: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1236 cbIn, sizeof(*pIn), cbOut, 0));
1237 return SUPDRV_ERR_INVALID_PARAM;
1238 }
1239 if ( pIn->u32Cookie != pDevExt->u32Cookie
1240 || pIn->u32SessionCookie != pSession->u32Cookie)
1241 {
1242 dprintf(("SUP_IOCTL_GIP_MAP: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1243 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1244 return SUPDRV_ERR_INVALID_MAGIC;
1245 }
1246
1247 /*
1248 * Execute.
1249 */
1250 rc = SUPR0GipMap(pSession, &pOut->pGipR3, &pOut->HCPhysGip);
1251 if (!rc)
1252 {
1253 pOut->pGipR0 = pDevExt->pGip;
1254 *pcbReturned = sizeof(*pOut);
1255 }
1256 return rc;
1257 }
1258
1259
1260 case SUP_IOCTL_GIP_UNMAP:
1261 {
1262 PSUPGIPUNMAP_IN pIn = (PSUPGIPUNMAP_IN)pvIn;
1263
1264 /*
1265 * Validate.
1266 */
1267 if ( cbIn != sizeof(*pIn)
1268 || cbOut != 0)
1269 {
1270 dprintf(("SUP_IOCTL_GIP_UNMAP: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1271 cbIn, sizeof(*pIn), cbOut, 0));
1272 return SUPDRV_ERR_INVALID_PARAM;
1273 }
1274 if ( pIn->u32Cookie != pDevExt->u32Cookie
1275 || pIn->u32SessionCookie != pSession->u32Cookie)
1276 {
1277 dprintf(("SUP_IOCTL_GIP_UNMAP: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1278 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1279 return SUPDRV_ERR_INVALID_MAGIC;
1280 }
1281
1282 /*
1283 * Execute.
1284 */
1285 return SUPR0GipUnmap(pSession);
1286 }
1287
1288
1289 case SUP_IOCTL_SET_VM_FOR_FAST:
1290 {
1291 PSUPSETVMFORFAST_IN pIn = (PSUPSETVMFORFAST_IN)pvIn;
1292
1293 /*
1294 * Validate.
1295 */
1296 if ( cbIn != sizeof(*pIn)
1297 || cbOut != 0)
1298 {
1299 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: Invalid input/output sizes. cbIn=%d expected %d. cbOut=%d expected %d.\n",
1300 cbIn, sizeof(*pIn), cbOut, 0));
1301 return SUPDRV_ERR_INVALID_PARAM;
1302 }
1303 if ( pIn->u32Cookie != pDevExt->u32Cookie
1304 || pIn->u32SessionCookie != pSession->u32Cookie)
1305 {
1306 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: Cookie mismatch {%#x,%#x} != {%#x,%#x}!\n",
1307 pIn->u32Cookie, pDevExt->u32Cookie, pIn->u32SessionCookie, pSession->u32Cookie));
1308 return SUPDRV_ERR_INVALID_MAGIC;
1309 }
1310 if ( pIn->pVMR0 != NULL
1311 && ( !VALID_PTR(pIn->pVMR0)
1312 || ((uintptr_t)pIn->pVMR0 & (PAGE_SIZE - 1))
1313 )
1314 )
1315 {
1316 dprintf(("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p! Must be a valid, page aligned, pointer.\n", pIn->pVMR0));
1317 return SUPDRV_ERR_INVALID_POINTER;
1318 }
1319
1320 /*
1321 * Execute.
1322 */
1323#ifndef VBOX_WITHOUT_IDT_PATCHING
1324 OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: !VBOX_WITHOUT_IDT_PATCHING\n"));
1325 return SUPDRV_ERR_GENERAL_FAILURE;
1326#else
1327 pSession->pVM = pIn->pVMR0;
1328 return 0;
1329#endif
1330 }
1331
1332
1333 default:
1334 dprintf(("Unknown IOCTL %#x\n", uIOCtl));
1335 break;
1336 }
1337 return SUPDRV_ERR_GENERAL_FAILURE;
1338}
1339
1340
1341/**
1342 * Register a object for reference counting.
1343 * The object is registered with one reference in the specified session.
1344 *
1345 * @returns Unique identifier on success (pointer).
1346 * All future reference must use this identifier.
1347 * @returns NULL on failure.
1348 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1349 * @param pvUser1 The first user argument.
1350 * @param pvUser2 The second user argument.
1351 */
1352SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1353{
1354 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1355 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1356 PSUPDRVOBJ pObj;
1357 PSUPDRVUSAGE pUsage;
1358
1359 /*
1360 * Validate the input.
1361 */
1362 if (!pSession)
1363 {
1364 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1365 return NULL;
1366 }
1367 if ( enmType <= SUPDRVOBJTYPE_INVALID
1368 || enmType >= SUPDRVOBJTYPE_END)
1369 {
1370 AssertMsgFailed(("Invalid enmType=%d\n", enmType));
1371 return NULL;
1372 }
1373 if (!pfnDestructor)
1374 {
1375 AssertMsgFailed(("Invalid pfnDestructor=%d\n", pfnDestructor));
1376 return NULL;
1377 }
1378
1379 /*
1380 * Allocate and initialize the object.
1381 */
1382 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1383 if (!pObj)
1384 return NULL;
1385 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1386 pObj->enmType = enmType;
1387 pObj->pNext = NULL;
1388 pObj->cUsage = 1;
1389 pObj->pfnDestructor = pfnDestructor;
1390 pObj->pvUser1 = pvUser1;
1391 pObj->pvUser2 = pvUser2;
1392 pObj->CreatorUid = pSession->Uid;
1393 pObj->CreatorGid = pSession->Gid;
1394 pObj->CreatorProcess= pSession->Process;
1395 supdrvOSObjInitCreator(pObj, pSession);
1396
1397 /*
1398 * Allocate the usage record.
1399 * (We keep freed usage records around to simplity SUPR0ObjAddRef().)
1400 */
1401 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1402
1403 pUsage = pDevExt->pUsageFree;
1404 if (pUsage)
1405 pDevExt->pUsageFree = pUsage->pNext;
1406 else
1407 {
1408 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1409 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1410 if (!pUsage)
1411 {
1412 RTMemFree(pObj);
1413 return NULL;
1414 }
1415 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1416 }
1417
1418 /*
1419 * Insert the object and create the session usage record.
1420 */
1421 /* The object. */
1422 pObj->pNext = pDevExt->pObjs;
1423 pDevExt->pObjs = pObj;
1424
1425 /* The session record. */
1426 pUsage->cUsage = 1;
1427 pUsage->pObj = pObj;
1428 pUsage->pNext = pSession->pUsage;
1429 dprintf(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1430 pSession->pUsage = pUsage;
1431
1432 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1433
1434 dprintf(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1435 return pObj;
1436}
1437
1438
1439/**
1440 * Increment the reference counter for the object associating the reference
1441 * with the specified session.
1442 *
1443 * @returns 0 on success.
1444 * @returns SUPDRV_ERR_* on failure.
1445 * @param pvObj The identifier returned by SUPR0ObjRegister().
1446 * @param pSession The session which is referencing the object.
1447 */
1448SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1449{
1450 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1451 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1452 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1453 PSUPDRVUSAGE pUsagePre;
1454 PSUPDRVUSAGE pUsage;
1455
1456 /*
1457 * Validate the input.
1458 */
1459 if (!pSession)
1460 {
1461 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1462 return SUPDRV_ERR_INVALID_PARAM;
1463 }
1464 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1465 {
1466 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1467 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1468 return SUPDRV_ERR_INVALID_PARAM;
1469 }
1470
1471 /*
1472 * Preallocate the usage record.
1473 */
1474 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1475
1476 pUsagePre = pDevExt->pUsageFree;
1477 if (pUsagePre)
1478 pDevExt->pUsageFree = pUsagePre->pNext;
1479 else
1480 {
1481 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1482 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1483 if (!pUsagePre)
1484 return SUPDRV_ERR_NO_MEMORY;
1485 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1486 }
1487
1488 /*
1489 * Reference the object.
1490 */
1491 pObj->cUsage++;
1492
1493 /*
1494 * Look for the session record.
1495 */
1496 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1497 {
1498 dprintf(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1499 if (pUsage->pObj == pObj)
1500 break;
1501 }
1502 if (pUsage)
1503 pUsage->cUsage++;
1504 else
1505 {
1506 /* create a new session record. */
1507 pUsagePre->cUsage = 1;
1508 pUsagePre->pObj = pObj;
1509 pUsagePre->pNext = pSession->pUsage;
1510 pSession->pUsage = pUsagePre;
1511 dprintf(("SUPR0ObjRelease: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));
1512
1513 pUsagePre = NULL;
1514 }
1515
1516 /*
1517 * Put any unused usage record into the free list..
1518 */
1519 if (pUsagePre)
1520 {
1521 pUsagePre->pNext = pDevExt->pUsageFree;
1522 pDevExt->pUsageFree = pUsagePre;
1523 }
1524
1525 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1526
1527 return 0;
1528}
1529
1530
1531/**
1532 * Decrement / destroy a reference counter record for an object.
1533 *
1534 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1535 *
1536 * @returns 0 on success.
1537 * @returns SUPDRV_ERR_* on failure.
1538 * @param pvObj The identifier returned by SUPR0ObjRegister().
1539 * @param pSession The session which is referencing the object.
1540 */
1541SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1542{
1543 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1544 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1545 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1546 bool fDestroy = false;
1547 PSUPDRVUSAGE pUsage;
1548 PSUPDRVUSAGE pUsagePrev;
1549
1550 /*
1551 * Validate the input.
1552 */
1553 if (!pSession)
1554 {
1555 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1556 return SUPDRV_ERR_INVALID_PARAM;
1557 }
1558 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1559 {
1560 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1561 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1562 return SUPDRV_ERR_INVALID_PARAM;
1563 }
1564
1565 /*
1566 * Acquire the spinlock and look for the usage record.
1567 */
1568 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1569
1570 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1571 pUsage;
1572 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1573 {
1574 dprintf(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));
1575 if (pUsage->pObj == pObj)
1576 {
1577 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1578 if (pUsage->cUsage > 1)
1579 {
1580 pObj->cUsage--;
1581 pUsage->cUsage--;
1582 }
1583 else
1584 {
1585 /*
1586 * Free the session record.
1587 */
1588 if (pUsagePrev)
1589 pUsagePrev->pNext = pUsage->pNext;
1590 else
1591 pSession->pUsage = pUsage->pNext;
1592 pUsage->pNext = pDevExt->pUsageFree;
1593 pDevExt->pUsageFree = pUsage;
1594
1595 /* What about the object? */
1596 if (pObj->cUsage > 1)
1597 pObj->cUsage--;
1598 else
1599 {
1600 /*
1601 * Object is to be destroyed, unlink it.
1602 */
1603 fDestroy = true;
1604 if (pDevExt->pObjs == pObj)
1605 pDevExt->pObjs = pObj->pNext;
1606 else
1607 {
1608 PSUPDRVOBJ pObjPrev;
1609 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1610 if (pObjPrev->pNext == pObj)
1611 {
1612 pObjPrev->pNext = pObj->pNext;
1613 break;
1614 }
1615 Assert(pObjPrev);
1616 }
1617 }
1618 }
1619 break;
1620 }
1621 }
1622
1623 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1624
1625 /*
1626 * Call the destructor and free the object if required.
1627 */
1628 if (fDestroy)
1629 {
1630 pObj->u32Magic++;
1631 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1632 RTMemFree(pObj);
1633 }
1634
1635 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
1636 return pUsage ? 0 : SUPDRV_ERR_INVALID_PARAM;
1637}
1638
1639/**
1640 * Verifies that the current process can access the specified object.
1641 *
1642 * @returns 0 if access is granted.
1643 * @returns SUPDRV_ERR_PERMISSION_DENIED if denied access.
1644 * @returns SUPDRV_ERR_INVALID_PARAM if invalid parameter.
1645 *
1646 * @param pvObj The identifier returned by SUPR0ObjRegister().
1647 * @param pSession The session which wishes to access the object.
1648 * @param pszObjName Object string name. This is optional and depends on the object type.
1649 *
1650 * @remark The caller is responsible for making sure the object isn't removed while
1651 * we're inside this function. If uncertain about this, just call AddRef before calling us.
1652 */
1653SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
1654{
1655 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1656 int rc = SUPDRV_ERR_GENERAL_FAILURE;
1657
1658 /*
1659 * Validate the input.
1660 */
1661 if (!pSession)
1662 {
1663 AssertMsgFailed(("Invalid pSession=%p\n", pSession));
1664 return SUPDRV_ERR_INVALID_PARAM;
1665 }
1666 if (!pObj || pObj->u32Magic != SUPDRVOBJ_MAGIC)
1667 {
1668 AssertMsgFailed(("Invalid pvObj=%p magic=%#x (exepcted %#x)\n",
1669 pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC));
1670 return SUPDRV_ERR_INVALID_PARAM;
1671 }
1672
1673 /*
1674 * Check access. (returns true if a decision has been made.)
1675 */
1676 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
1677 return rc;
1678
1679 /*
1680 * Default policy is to allow the user to access his own
1681 * stuff but nothing else.
1682 */
1683 if (pObj->CreatorUid == pSession->Uid)
1684 return 0;
1685 return SUPDRV_ERR_PERMISSION_DENIED;
1686}
1687
1688
1689/**
1690 * Lock pages.
1691 *
1692 * @param pSession Session to which the locked memory should be associated.
1693 * @param pvR3 Start of the memory range to lock.
1694 * This must be page aligned.
1695 * @param cb Size of the memory range to lock.
1696 * This must be page aligned.
1697 */
1698SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, void *pvR3, unsigned cb, PSUPPAGE paPages)
1699{
1700 int rc;
1701 SUPDRVMEMREF Mem = {0};
1702 dprintf(("SUPR0LockMem: pSession=%p pvR3=%p cb=%d paPages=%p\n",
1703 pSession, pvR3, cb, paPages));
1704
1705 /*
1706 * Verify input.
1707 */
1708 if (RT_ALIGN_R3PT(pvR3, PAGE_SIZE, void *) != pvR3 || !pvR3)
1709 {
1710 dprintf(("pvR3 (%p) must be page aligned and not NULL!\n", pvR3));
1711 return SUPDRV_ERR_INVALID_PARAM;
1712 }
1713 if (RT_ALIGN(cb, PAGE_SIZE) != cb)
1714 {
1715 dprintf(("cb (%u) must be page aligned!\n", cb));
1716 return SUPDRV_ERR_INVALID_PARAM;
1717 }
1718 if (!paPages)
1719 {
1720 dprintf(("paPages is NULL!\n"));
1721 return SUPDRV_ERR_INVALID_PARAM;
1722 }
1723
1724#ifdef USE_NEW_OS_INTERFACE
1725 /*
1726 * Let IPRT do the job.
1727 */
1728 Mem.eType = MEMREF_TYPE_LOCKED;
1729 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb);
1730 if (RT_SUCCESS(rc))
1731 {
1732 unsigned iPage = cb >> PAGE_SHIFT;
1733 while (iPage-- > 0)
1734 {
1735 paPages[iPage].uReserved = 0;
1736 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
1737 if (RT_UNLIKELY(paPages[iPage].Phys == NIL_RTCCPHYS))
1738 {
1739 AssertMsgFailed(("iPage=%d\n", iPage));
1740 rc = VERR_INTERNAL_ERROR;
1741 break;
1742 }
1743 }
1744 if (RT_SUCCESS(rc))
1745 rc = supdrvMemAdd(&Mem, pSession);
1746 if (RT_FAILURE(rc))
1747 {
1748 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
1749 AssertRC(rc2);
1750 }
1751 }
1752
1753#else /* !USE_NEW_OS_INTERFACE */
1754
1755 /*
1756 * Let the OS specific code have a go.
1757 */
1758 Mem.pvR0 = NULL;
1759 Mem.pvR3 = pvR3;
1760 Mem.eType = MEMREF_TYPE_LOCKED;
1761 Mem.cb = cb;
1762 rc = supdrvOSLockMemOne(&Mem, paPages);
1763 if (rc)
1764 return rc;
1765
1766 /*
1767 * Everything when fine, add the memory reference to the session.
1768 */
1769 rc = supdrvMemAdd(&Mem, pSession);
1770 if (rc)
1771 supdrvOSUnlockMemOne(&Mem);
1772#endif /* !USE_NEW_OS_INTERFACE */
1773 return rc;
1774}
1775
1776
1777/**
1778 * Unlocks the memory pointed to by pv.
1779 *
1780 * @returns 0 on success.
1781 * @returns SUPDRV_ERR_* on failure
1782 * @param pSession Session to which the memory was locked.
1783 * @param pvR3 Memory to unlock.
1784 */
1785SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, void *pvR3)
1786{
1787 dprintf(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, pvR3));
1788 return supdrvMemRelease(pSession, pvR3, MEMREF_TYPE_LOCKED);
1789}
1790
1791
1792/**
1793 * Allocates a chunk of page aligned memory with contiguous and fixed physical
1794 * backing.
1795 *
1796 * @returns 0 on success.
1797 * @returns SUPDRV_ERR_* on failure.
1798 * @param pSession Session data.
1799 * @param cb Number of bytes to allocate.
1800 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory. optional
1801 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
1802 * @param pHCPhys Where to put the physical address of allocated memory.
1803 */
1804SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, unsigned cb, void **ppvR0, void **ppvR3, PRTHCPHYS pHCPhys)
1805{
1806 int rc;
1807 SUPDRVMEMREF Mem = {0};
1808 dprintf(("SUPR0ContAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cb, ppvR0, ppvR3, pHCPhys));
1809
1810 /*
1811 * Validate input.
1812 */
1813 if (!pSession || !ppvR3 || !pHCPhys)
1814 {
1815 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR3=%p pHCPhys=%p\n",
1816 pSession, ppvR3, pHCPhys));
1817 return SUPDRV_ERR_INVALID_PARAM;
1818
1819 }
1820 if (cb <= 64 || cb >= PAGE_SIZE * 256)
1821 {
1822 dprintf(("Illegal request cb=%d, must be greater than 64 and smaller than PAGE_SIZE*256\n", cb));
1823 return SUPDRV_ERR_INVALID_PARAM;
1824 }
1825
1826#ifdef USE_NEW_OS_INTERFACE
1827 /*
1828 * Let IPRT do the job.
1829 */
1830 rc = RTR0MemObjAllocCont(&Mem.MemObj, cb, true /* executable R0 mapping */);
1831 if (RT_SUCCESS(rc))
1832 {
1833 int rc2;
1834 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void *)-1, 0, RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ);
1835 if (RT_SUCCESS(rc))
1836 {
1837 Mem.eType = MEMREF_TYPE_CONT;
1838 rc = supdrvMemAdd(&Mem, pSession);
1839 if (!rc)
1840 {
1841 if (ppvR0)
1842 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
1843 *ppvR3 = RTR0MemObjAddress(Mem.MapObjR3);
1844 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
1845 return 0;
1846 }
1847
1848 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1849 AssertRC(rc2);
1850 }
1851 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1852 AssertRC(rc2);
1853 }
1854
1855#else /* !USE_NEW_OS_INTERFACE */
1856
1857 /*
1858 * Let the OS specific code have a go.
1859 */
1860 Mem.pvR0 = NULL;
1861 Mem.pvR3 = NULL;
1862 Mem.eType = MEMREF_TYPE_CONT;
1863 Mem.cb = cb;
1864 rc = supdrvOSContAllocOne(&Mem, ppvR0, ppvR3, pHCPhys);
1865 if (rc)
1866 return rc;
1867 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)) || !(*pHCPhys & (PAGE_SIZE - 1)),
1868 ("Memory is not page aligned! *ppvR0=%p *ppvR3=%p phys=%VHp\n", ppvR0 ? *ppvR0 : NULL, *ppvR3, *pHCPhys));
1869
1870 /*
1871 * Everything when fine, add the memory reference to the session.
1872 */
1873 rc = supdrvMemAdd(&Mem, pSession);
1874 if (rc)
1875 supdrvOSContFreeOne(&Mem);
1876#endif /* !USE_NEW_OS_INTERFACE */
1877
1878 return rc;
1879}
1880
1881
1882/**
1883 * Frees memory allocated using SUPR0ContAlloc().
1884 *
1885 * @returns 0 on success.
1886 * @returns SUPDRV_ERR_* on failure.
1887 * @param pSession The session to which the memory was allocated.
1888 * @param pv Pointer to the memory.
1889 */
1890SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, void *pv)
1891{
1892 dprintf(("SUPR0ContFree: pSession=%p pv=%p\n", pSession, pv));
1893 return supdrvMemRelease(pSession, pv, MEMREF_TYPE_CONT);
1894}
1895
1896
1897/**
1898 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
1899 *
1900 * @returns 0 on success.
1901 * @returns SUPDRV_ERR_* on failure.
1902 * @param pSession Session data.
1903 * @param cPages Number of pages to allocate.
1904 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
1905 * @param paPages Where to put the physical addresses of allocated memory.
1906 */
1907SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, unsigned cPages, void **ppvR3, PSUPPAGE paPages)
1908{
1909 unsigned iPage;
1910 int rc;
1911 SUPDRVMEMREF Mem = {0};
1912 dprintf(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p paPages=%p\n", pSession, cPages, ppvR3, paPages));
1913
1914 /*
1915 * Validate input.
1916 */
1917 if (!pSession || !ppvR3 || !paPages)
1918 {
1919 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR3=%p paPages=%p\n",
1920 pSession, ppvR3, paPages));
1921 return SUPDRV_ERR_INVALID_PARAM;
1922
1923 }
1924 if (cPages < 1 || cPages > 256)
1925 {
1926 dprintf(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
1927 return SUPDRV_ERR_INVALID_PARAM;
1928 }
1929
1930#ifdef USE_NEW_OS_INTERFACE
1931 /*
1932 * Let IPRT do the work.
1933 */
1934 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
1935 if (RT_SUCCESS(rc))
1936 {
1937 int rc2;
1938 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void *)-1, 0, RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ);
1939 if (RT_SUCCESS(rc))
1940 {
1941 Mem.eType = MEMREF_TYPE_LOW;
1942 rc = supdrvMemAdd(&Mem, pSession);
1943 if (!rc)
1944 {
1945 for (iPage = 0; iPage < cPages; iPage++)
1946 {
1947 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
1948 paPages[iPage].uReserved = 0;
1949 AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
1950 }
1951 /*if (ppvR0)
1952 *ppvR0 = RTR0MemObjAddress(Mem.MemObj); */
1953 *ppvR3 = RTR0MemObjAddress(Mem.MapObjR3);
1954 return 0;
1955 }
1956
1957 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
1958 AssertRC(rc2);
1959 }
1960
1961 rc2 = RTR0MemObjFree(Mem.MemObj, false);
1962 AssertRC(rc2);
1963 }
1964
1965#else /* !USE_NEW_OS_INTERFACE */
1966
1967 /*
1968 * Let the OS specific code have a go.
1969 */
1970 Mem.pvR0 = NULL;
1971 Mem.pvR3 = NULL;
1972 Mem.eType = MEMREF_TYPE_LOW;
1973 Mem.cb = cPages << PAGE_SHIFT;
1974 rc = supdrvOSLowAllocOne(&Mem, ppvR3, paPages);
1975 if (rc)
1976 return rc;
1977 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! virt=%p\n", *ppvR3));
1978 for (iPage = 0; iPage < cPages; iPage++)
1979 AssertMsg(!(paPages[iPage].Phys & (PAGE_SIZE - 1)), ("iPage=%d Phys=%VHp\n", paPages[iPage].Phys));
1980
1981 /*
1982 * Everything when fine, add the memory reference to the session.
1983 */
1984 rc = supdrvMemAdd(&Mem, pSession);
1985 if (rc)
1986 supdrvOSLowFreeOne(&Mem);
1987#endif /* !USE_NEW_OS_INTERFACE */
1988 return rc;
1989}
1990
1991
1992/**
1993 * Frees memory allocated using SUPR0LowAlloc().
1994 *
1995 * @returns 0 on success.
1996 * @returns SUPDRV_ERR_* on failure.
1997 * @param pSession The session to which the memory was allocated.
1998 * @param pv Pointer to the memory.
1999 */
2000SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, void *pv)
2001{
2002 dprintf(("SUPR0LowFree: pSession=%p pv=%p\n", pSession, pv));
2003 return supdrvMemRelease(pSession, pv, MEMREF_TYPE_LOW);
2004}
2005
2006
2007/**
2008 * Allocates a chunk of memory with both R0 and R3 mappings.
2009 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2010 *
2011 * @returns 0 on success.
2012 * @returns SUPDRV_ERR_* on failure.
2013 * @param pSession The session to associated the allocation with.
2014 * @param cb Number of bytes to allocate.
2015 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2016 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2017 */
2018SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, unsigned cb, void **ppvR0, void **ppvR3)
2019{
2020 int rc;
2021 SUPDRVMEMREF Mem = {0};
2022 dprintf(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2023
2024 /*
2025 * Validate input.
2026 */
2027 if (!pSession || !ppvR0 || !ppvR3)
2028 {
2029 dprintf(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p\n",
2030 pSession, ppvR0, ppvR3));
2031 return SUPDRV_ERR_INVALID_PARAM;
2032
2033 }
2034 if (cb < 1 || cb >= PAGE_SIZE * 256)
2035 {
2036 dprintf(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2037 return SUPDRV_ERR_INVALID_PARAM;
2038 }
2039
2040#ifdef USE_NEW_OS_INTERFACE
2041 /*
2042 * Let IPRT do the work.
2043 */
2044 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2045 if (RT_SUCCESS(rc))
2046 {
2047 int rc2;
2048 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (void*)-1, 0, RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ);
2049 if (RT_SUCCESS(rc))
2050 {
2051 Mem.eType = MEMREF_TYPE_MEM;
2052 rc = supdrvMemAdd(&Mem, pSession);
2053 if (!rc)
2054 {
2055 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2056 *ppvR3 = RTR0MemObjAddress(Mem.MapObjR3);
2057 return 0;
2058 }
2059 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2060 AssertRC(rc2);
2061 }
2062
2063 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2064 AssertRC(rc2);
2065 }
2066
2067#else /* !USE_NEW_OS_INTERFACE */
2068
2069 /*
2070 * Let the OS specific code have a go.
2071 */
2072 Mem.pvR0 = NULL;
2073 Mem.pvR3 = NULL;
2074 Mem.eType = MEMREF_TYPE_MEM;
2075 Mem.cb = cb;
2076 rc = supdrvOSMemAllocOne(&Mem, ppvR0, ppvR3);
2077 if (rc)
2078 return rc;
2079 AssertMsg(!((uintptr_t)*ppvR0 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR0=%p\n", *ppvR0));
2080 AssertMsg(!((uintptr_t)*ppvR3 & (PAGE_SIZE - 1)), ("Memory is not page aligned! pvR3=%p\n", *ppvR3));
2081
2082 /*
2083 * Everything when fine, add the memory reference to the session.
2084 */
2085 rc = supdrvMemAdd(&Mem, pSession);
2086 if (rc)
2087 supdrvOSMemFreeOne(&Mem);
2088#endif /* !USE_NEW_OS_INTERFACE */
2089 return rc;
2090}
2091
2092
2093/**
2094 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2095 *
2096 * @returns 0 on success.
2097 * @returns SUPDRV_ERR_* on failure.
2098 * @param pSession The session to which the memory was allocated.
2099 * @param pv The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2100 */
2101SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, void *pv, PSUPPAGE paPages)
2102{
2103 PSUPDRVBUNDLE pBundle;
2104 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2105 dprintf(("SUPR0MemGetPhys: pSession=%p pv=%p paPages=%p\n", pSession, pv, paPages));
2106
2107 /*
2108 * Validate input.
2109 */
2110 if (!pSession)
2111 {
2112 dprintf(("pSession must not be NULL!"));
2113 return SUPDRV_ERR_INVALID_PARAM;
2114 }
2115 if (!pv || !paPages)
2116 {
2117 dprintf(("Illegal address pv=%p or/and paPages=%p\n", pv, paPages));
2118 return SUPDRV_ERR_INVALID_PARAM;
2119 }
2120
2121 /*
2122 * Search for the address.
2123 */
2124 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2125 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2126 {
2127 if (pBundle->cUsed > 0)
2128 {
2129 unsigned i;
2130 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2131 {
2132#ifdef USE_NEW_OS_INTERFACE
2133 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2134 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2135 && ( RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pv
2136 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2137 && RTR0MemObjAddress(pBundle->aMem[i].MapObjR3) == pv)
2138 )
2139 )
2140 {
2141 const unsigned cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2142 unsigned iPage;
2143 for (iPage = 0; iPage < cPages; iPage++)
2144 {
2145 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2146 paPages[iPage].uReserved = 0;
2147 }
2148 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2149 return 0;
2150 }
2151#else /* !USE_NEW_OS_INTERFACE */
2152 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2153 && ( pBundle->aMem[i].pvR0 == pv
2154 || pBundle->aMem[i].pvR3 == pv))
2155 {
2156 supdrvOSMemGetPages(&pBundle->aMem[i], paPages);
2157 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2158 return 0;
2159 }
2160#endif
2161 }
2162 }
2163 }
2164 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2165 dprintf(("Failed to find %p!!!\n", pv));
2166 return SUPDRV_ERR_INVALID_PARAM;
2167}
2168
2169
2170/**
2171 * Free memory allocated by SUPR0MemAlloc().
2172 *
2173 * @returns 0 on success.
2174 * @returns SUPDRV_ERR_* on failure.
2175 * @param pSession The session owning the allocation.
2176 * @param pv The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2177 */
2178SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, void *pv)
2179{
2180 dprintf(("SUPR0MemFree: pSession=%p pv=%p\n", pSession, pv));
2181 return supdrvMemRelease(pSession, pv, MEMREF_TYPE_MEM);
2182}
2183
2184
2185/**
2186 * Maps the GIP into userspace and/or get the physical address of the GIP.
2187 *
2188 * @returns 0 on success.
2189 * @returns SUPDRV_ERR_* on failure.
2190 * @param pSession Session to which the GIP mapping should belong.
2191 * @param ppGip Where to store the address of the mapping. (optional)
2192 * @param pHCPhysGip Where to store the physical address. (optional)
2193 *
2194 * @remark There is no reference counting on the mapping, so one call to this function
2195 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2196 * and remove the session as a GIP user.
2197 */
2198SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PCSUPGLOBALINFOPAGE *ppGip, RTHCPHYS *pHCPhysGid)
2199{
2200 int rc = 0;
2201 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2202 PCSUPGLOBALINFOPAGE pGip = NULL;
2203 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2204 dprintf(("SUPR0GipMap: pSession=%p ppGip=%p pHCPhysGid=%p\n", pSession, ppGip, pHCPhysGid));
2205
2206 /*
2207 * Validate
2208 */
2209 if (!ppGip && !pHCPhysGid)
2210 return 0;
2211
2212 RTSemFastMutexRequest(pDevExt->mtxGip);
2213 if (pDevExt->pGip)
2214 {
2215 /*
2216 * Map it?
2217 */
2218 if (ppGip)
2219 {
2220#ifdef USE_NEW_OS_INTERFACE
2221 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2222 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (void*)-1, 0, RTMEM_PROT_READ);
2223 if (RT_SUCCESS(rc))
2224 {
2225 pGip = (PCSUPGLOBALINFOPAGE)RTR0MemObjAddress(pSession->GipMapObjR3);
2226 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2227 }
2228#else /* !USE_NEW_OS_INTERFACE */
2229 if (!pSession->pGip)
2230 rc = supdrvOSGipMap(pSession->pDevExt, &pSession->pGip);
2231 if (!rc)
2232 pGip = pSession->pGip;
2233#endif /* !USE_NEW_OS_INTERFACE */
2234 }
2235
2236 /*
2237 * Get physical address.
2238 */
2239 if (pHCPhysGid && !rc)
2240 HCPhys = pDevExt->HCPhysGip;
2241
2242 /*
2243 * Reference globally.
2244 */
2245 if (!pSession->fGipReferenced && !rc)
2246 {
2247 pSession->fGipReferenced = 1;
2248 pDevExt->cGipUsers++;
2249 if (pDevExt->cGipUsers == 1)
2250 {
2251 dprintf(("SUPR0GipMap: Resumes GIP updating\n"));
2252 ASMAtomicXchgU32(&pDevExt->pGip->u32TransactionId,
2253 pDevExt->pGip->u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2254 ASMAtomicXchgU64(&pDevExt->pGip->u64NanoTSLastUpdateHz, 0);
2255#ifdef USE_NEW_OS_INTERFACE
2256 rc = RTTimerStart(pDevExt->pGipTimer, 0); AssertRC(rc); rc = 0;
2257#else
2258 supdrvOSGipResume(pDevExt);
2259#endif
2260 }
2261 }
2262 }
2263 else
2264 {
2265 rc = SUPDRV_ERR_GENERAL_FAILURE;
2266 dprintf(("SUPR0GipMap: GIP is not available!\n"));
2267 }
2268 RTSemFastMutexRelease(pDevExt->mtxGip);
2269
2270 /*
2271 * Write returns.
2272 */
2273 if (pHCPhysGid)
2274 *pHCPhysGid = HCPhys;
2275 if (ppGip)
2276 *ppGip = pGip;
2277
2278 dprintf(("SUPR0GipMap: returns %d *pHCPhysGid=%lx *ppGip=%p\n", rc, (unsigned long)HCPhys, pGip));
2279 return rc;
2280}
2281
2282
2283/**
2284 * Unmaps any user mapping of the GIP and terminates all GIP access
2285 * from this session.
2286 *
2287 * @returns 0 on success.
2288 * @returns SUPDRV_ERR_* on failure.
2289 * @param pSession Session to which the GIP mapping should belong.
2290 */
2291SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2292{
2293 int rc = 0;
2294 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2295 dprintf(("SUPR0GipUnmap: pSession=%p\n", pSession));
2296
2297 RTSemFastMutexRequest(pDevExt->mtxGip);
2298
2299 /*
2300 * Unmap anything?
2301 */
2302#ifdef USE_NEW_OS_INTERFACE
2303 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2304 {
2305 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2306 AssertRC(rc);
2307 if (RT_SUCCESS(rc))
2308 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2309 }
2310#else
2311 if (pSession->pGip)
2312 {
2313 rc = supdrvOSGipUnmap(pDevExt, pSession->pGip);
2314 if (!rc)
2315 pSession->pGip = NULL;
2316 }
2317#endif
2318
2319 /*
2320 * Dereference global GIP.
2321 */
2322 if (pSession->fGipReferenced && !rc)
2323 {
2324 pSession->fGipReferenced = 0;
2325 if ( pDevExt->cGipUsers > 0
2326 && !--pDevExt->cGipUsers)
2327 {
2328 dprintf(("SUPR0GipUnmap: Suspends GIP updating\n"));
2329#ifdef USE_NEW_OS_INTERFACE
2330 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = 0;
2331#else
2332 supdrvOSGipSuspend(pDevExt);
2333#endif
2334 }
2335 }
2336
2337 RTSemFastMutexRelease(pDevExt->mtxGip);
2338
2339 return rc;
2340}
2341
2342
2343/**
2344 * Adds a memory object to the session.
2345 *
2346 * @returns 0 on success.
2347 * @returns SUPDRV_ERR_* on failure.
2348 * @param pMem Memory tracking structure containing the
2349 * information to track.
2350 * @param pSession The session.
2351 */
2352static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
2353{
2354 PSUPDRVBUNDLE pBundle;
2355 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2356
2357 /*
2358 * Find free entry and record the allocation.
2359 */
2360 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2361 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2362 {
2363 if (pBundle->cUsed < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]))
2364 {
2365 unsigned i;
2366 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2367 {
2368#ifdef USE_NEW_OS_INTERFACE
2369 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
2370#else /* !USE_NEW_OS_INTERFACE */
2371 if ( !pBundle->aMem[i].pvR0
2372 && !pBundle->aMem[i].pvR3)
2373#endif /* !USE_NEW_OS_INTERFACE */
2374 {
2375 pBundle->cUsed++;
2376 pBundle->aMem[i] = *pMem;
2377 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2378 return 0;
2379 }
2380 }
2381 AssertFailed(); /* !!this can't be happening!!! */
2382 }
2383 }
2384 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2385
2386 /*
2387 * Need to allocate a new bundle.
2388 * Insert into the last entry in the bundle.
2389 */
2390 pBundle = (PSUPDRVBUNDLE)RTMemAlloc(sizeof(*pBundle));
2391 if (!pBundle)
2392 return SUPDRV_ERR_NO_MEMORY;
2393 memset(pBundle, 0, sizeof(*pBundle));
2394
2395 /* take last entry. */
2396 pBundle->cUsed++;
2397 pBundle->aMem[sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]) - 1] = *pMem;
2398
2399 /* insert into list. */
2400 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2401 pBundle->pNext = pSession->Bundle.pNext;
2402 pSession->Bundle.pNext = pBundle;
2403 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2404
2405 return 0;
2406}
2407
2408
2409/**
2410 * Releases a memory object referenced by pointer and type.
2411 *
2412 * @returns 0 on success.
2413 * @returns SUPDRV_ERR_INVALID_PARAM on failure.
2414 * @param pSession Session data.
2415 * @param pv Pointer to memory. This is matched against both the R0 and R3 addresses.
2416 * @param eType Memory type.
2417 */
2418static int supdrvMemRelease(PSUPDRVSESSION pSession, void *pv, SUPDRVMEMREFTYPE eType)
2419{
2420 PSUPDRVBUNDLE pBundle;
2421 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2422
2423 /*
2424 * Validate input.
2425 */
2426 if (!pSession)
2427 {
2428 dprintf(("pSession must not be NULL!"));
2429 return SUPDRV_ERR_INVALID_PARAM;
2430 }
2431 if (!pv)
2432 {
2433 dprintf(("Illegal address %p\n", pv));
2434 return SUPDRV_ERR_INVALID_PARAM;
2435 }
2436
2437 /*
2438 * Search for the address.
2439 */
2440 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2441 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2442 {
2443 if (pBundle->cUsed > 0)
2444 {
2445 unsigned i;
2446 for (i = 0; i < sizeof(pBundle->aMem) / sizeof(pBundle->aMem[0]); i++)
2447 {
2448#ifdef USE_NEW_OS_INTERFACE
2449 if ( pBundle->aMem[i].eType == eType
2450 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2451 && ( RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pv
2452 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2453 && RTR0MemObjAddress(pBundle->aMem[i].MapObjR3) == pv))
2454 )
2455 {
2456 /* Make a copy of it and release it outside the spinlock. */
2457 SUPDRVMEMREF Mem = pBundle->aMem[i];
2458 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2459 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
2460 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
2461 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2462
2463 if (Mem.MapObjR3)
2464 {
2465 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
2466 AssertRC(rc); /** @todo figure out how to handle this. */
2467 }
2468 if (Mem.MemObj)
2469 {
2470 int rc = RTR0MemObjFree(Mem.MemObj, false);
2471 AssertRC(rc); /** @todo figure out how to handle this. */
2472 }
2473 return 0;
2474 }
2475#else /* !USE_NEW_OS_INTERFACE */
2476 if ( pBundle->aMem[i].eType == eType
2477 && ( pBundle->aMem[i].pvR0 == pv
2478 || pBundle->aMem[i].pvR3 == pv))
2479 {
2480 /* Make a copy of it and release it outside the spinlock. */
2481 SUPDRVMEMREF Mem = pBundle->aMem[i];
2482 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
2483 pBundle->aMem[i].pvR0 = NULL;
2484 pBundle->aMem[i].pvR3 = NULL;
2485 pBundle->aMem[i].cb = 0;
2486 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2487
2488 /* Type specific free operation. */
2489 switch (Mem.eType)
2490 {
2491 case MEMREF_TYPE_LOCKED:
2492 supdrvOSUnlockMemOne(&Mem);
2493 break;
2494 case MEMREF_TYPE_CONT:
2495 supdrvOSContFreeOne(&Mem);
2496 break;
2497 case MEMREF_TYPE_LOW:
2498 supdrvOSLowFreeOne(&Mem);
2499 break;
2500 case MEMREF_TYPE_MEM:
2501 supdrvOSMemFreeOne(&Mem);
2502 break;
2503 default:
2504 case MEMREF_TYPE_UNUSED:
2505 break;
2506 }
2507 return 0;
2508 }
2509#endif /* !USE_NEW_OS_INTERFACE */
2510 }
2511 }
2512 }
2513 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2514 dprintf(("Failed to find %p!!! (eType=%d)\n", pv, eType));
2515 return SUPDRV_ERR_INVALID_PARAM;
2516}
2517
2518
2519#ifndef VBOX_WITHOUT_IDT_PATCHING
2520/**
2521 * Install IDT for the current CPU.
2522 *
2523 * @returns 0 on success.
2524 * @returns SUPDRV_ERR_NO_MEMORY or SUPDRV_ERROR_IDT_FAILED on failure.
2525 * @param pIn Input data.
2526 * @param pOut Output data.
2527 */
2528static int supdrvIOCtl_IdtInstall(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPIDTINSTALL_IN pIn, PSUPIDTINSTALL_OUT pOut)
2529{
2530 PSUPDRVPATCHUSAGE pUsagePre;
2531 PSUPDRVPATCH pPatchPre;
2532 RTIDTR Idtr;
2533 PSUPDRVPATCH pPatch;
2534 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2535 dprintf(("supdrvIOCtl_IdtInstall\n"));
2536
2537 /*
2538 * Preallocate entry for this CPU cause we don't wanna do
2539 * that inside the spinlock!
2540 */
2541 pUsagePre = (PSUPDRVPATCHUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2542 if (!pUsagePre)
2543 return SUPDRV_ERR_NO_MEMORY;
2544
2545 /*
2546 * Take the spinlock and see what we need to do.
2547 */
2548 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2549
2550 /* check if we already got a free patch. */
2551 if (!pDevExt->pIdtPatchesFree)
2552 {
2553 /*
2554 * Allocate a patch - outside the spinlock of course.
2555 */
2556 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2557
2558 pPatchPre = (PSUPDRVPATCH)RTMemExecAlloc(sizeof(*pPatchPre));
2559 if (!pPatchPre)
2560 return SUPDRV_ERR_NO_MEMORY;
2561
2562 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
2563 }
2564 else
2565 {
2566 pPatchPre = pDevExt->pIdtPatchesFree;
2567 pDevExt->pIdtPatchesFree = pPatchPre->pNext;
2568 }
2569
2570 /* look for matching patch entry */
2571 ASMGetIDTR(&Idtr);
2572 pPatch = pDevExt->pIdtPatches;
2573 while (pPatch && pPatch->pvIdt != (void *)Idtr.pIdt)
2574 pPatch = pPatch->pNext;
2575
2576 if (!pPatch)
2577 {
2578 /*
2579 * Create patch.
2580 */
2581 pPatch = supdrvIdtPatchOne(pDevExt, pPatchPre);
2582 if (pPatch)
2583 pPatchPre = NULL; /* mark as used. */
2584 }
2585 else
2586 {
2587 /*
2588 * Simply increment patch usage.
2589 */
2590 pPatch->cUsage++;
2591 }
2592
2593 if (pPatch)
2594 {
2595 /*
2596 * Increment and add if need be the session usage record for this patch.
2597 */
2598 PSUPDRVPATCHUSAGE pUsage = pSession->pPatchUsage;
2599 while (pUsage && pUsage->pPatch != pPatch)
2600 pUsage = pUsage->pNext;
2601
2602 if (!pUsage)
2603 {
2604 /*
2605 * Add usage record.
2606 */
2607 pUsagePre->cUsage = 1;
2608 pUsagePre->pPatch = pPatch;
2609 pUsagePre->pNext = pSession->pPatchUsage;
2610 pSession->pPatchUsage = pUsagePre;
2611 pUsagePre = NULL; /* mark as used. */
2612 }
2613 else
2614 {
2615 /*
2616 * Increment usage count.
2617 */
2618 pUsage->cUsage++;
2619 }
2620 }
2621
2622 /* free patch - we accumulate them for paranoid saftly reasons. */
2623 if (pPatchPre)
2624 {
2625 pPatchPre->pNext = pDevExt->pIdtPatchesFree;
2626 pDevExt->pIdtPatchesFree = pPatchPre;
2627 }
2628
2629 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
2630
2631 /*
2632 * Free unused preallocated buffers.
2633 */
2634 if (pUsagePre)
2635 RTMemFree(pUsagePre);
2636
2637 pOut->u8Idt = pDevExt->u8Idt;
2638
2639 return pPatch ? 0 : SUPDRV_ERR_IDT_FAILED;
2640}
2641
2642
2643/**
2644 * This creates a IDT patch entry.
2645 * If the first patch being installed it'll also determin the IDT entry
2646 * to use.
2647 *
2648 * @returns pPatch on success.
2649 * @returns NULL on failure.
2650 * @param pDevExt Pointer to globals.
2651 * @param pPatch Patch entry to use.
2652 * This will be linked into SUPDRVDEVEXT::pIdtPatches on
2653 * successful return.
2654 * @remark Call must be owning the SUPDRVDEVEXT::Spinlock!
2655 */
2656static PSUPDRVPATCH supdrvIdtPatchOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
2657{
2658 RTIDTR Idtr;
2659 PSUPDRVIDTE paIdt;
2660 dprintf(("supdrvIOCtl_IdtPatchOne: pPatch=%p\n", pPatch));
2661
2662 /*
2663 * Get IDT.
2664 */
2665 ASMGetIDTR(&Idtr);
2666 paIdt = (PSUPDRVIDTE)Idtr.pIdt;
2667 if ((uintptr_t)paIdt < 0x80000000)
2668 {
2669 AssertMsgFailed(("bad paIdt=%p\n", paIdt));
2670 return NULL;
2671 }
2672
2673 if (!pDevExt->u8Idt)
2674 {
2675 /*
2676 * Test out the alternatives.
2677 *
2678 * At the moment we do not support chaining thus we ASSUME that one of
2679 * these 48 entries is unused (which is not a problem on Win32 and
2680 * Linux to my knowledge).
2681 */
2682 /** @todo we MUST change this detection to try grab an entry which is NOT in use. This can be
2683 * combined with gathering info about which guest system call gates we can hook up directly. */
2684 unsigned i;
2685 uint8_t u8Idt = 0;
2686 static uint8_t au8Ints[] =
2687 {
2688#ifdef __WIN32__ /* We don't use 0xef and above because they are system stuff on linux (ef is IPI,
2689 * local apic timer, or some other frequently fireing thing). */
2690 0xef, 0xee, 0xed, 0xec,
2691#endif
2692 0xeb, 0xea, 0xe9, 0xe8,
2693 0xdf, 0xde, 0xdd, 0xdc,
2694 0x7b, 0x7a, 0x79, 0x78,
2695 0xbf, 0xbe, 0xbd, 0xbc,
2696 };
2697#if defined(__AMD64__)
2698 dprintf(("IDT: Idtr=%p:%#x\n", (void *)Idtr.pIdt, (unsigned)Idtr.cbIdt));
2699 for (i = 0; i*16+15 < Idtr.cbIdt; i++)
2700 {
2701 dprintf(("%#x: %04x:%08x%04x%04x P=%d DPL=%d IST=%d Type1=%#x u32Reserved=%#x u5Reserved=%#x\n",
2702 i, paIdt[i].u16SegSel, paIdt[i].u32OffsetTop, paIdt[i].u16OffsetHigh, paIdt[i].u16OffsetLow,
2703 paIdt[i].u1Present, paIdt[i].u2DPL, paIdt[i].u3IST, paIdt[i].u5Type2,
2704 paIdt[i].u32Reserved, paIdt[i].u5Reserved));
2705 }
2706#endif
2707 /* look for entries which are not present or otherwise unused. */
2708 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2709 {
2710 u8Idt = au8Ints[i];
2711 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2712 && ( !paIdt[u8Idt].u1Present
2713 || paIdt[u8Idt].u5Type2 == 0))
2714 break;
2715 u8Idt = 0;
2716 }
2717 if (!u8Idt)
2718 {
2719 /* try again, look for a compatible entry .*/
2720 for (i = 0; i < sizeof(au8Ints) / sizeof(au8Ints[0]); i++)
2721 {
2722 u8Idt = au8Ints[i];
2723 if ( u8Idt * sizeof(SUPDRVIDTE) < Idtr.cbIdt
2724 && paIdt[u8Idt].u1Present
2725 && paIdt[u8Idt].u5Type2 == SUPDRV_IDTE_TYPE2_INTERRUPT_GATE
2726 && !(paIdt[u8Idt].u16SegSel & 3))
2727 break;
2728 u8Idt = 0;
2729 }
2730 if (!u8Idt)
2731 {
2732 dprintf(("Failed to find appropirate IDT entry!!\n"));
2733 return NULL;
2734 }
2735 }
2736 pDevExt->u8Idt = u8Idt;
2737 dprintf(("supdrvIOCtl_IdtPatchOne: u8Idt=%x\n", u8Idt));
2738 }
2739
2740 /*
2741 * Prepare the patch
2742 */
2743 memset(pPatch, 0, sizeof(*pPatch));
2744 pPatch->pvIdt = paIdt;
2745 pPatch->cUsage = 1;
2746 pPatch->pIdtEntry = &paIdt[pDevExt->u8Idt];
2747 pPatch->SavedIdt = paIdt[pDevExt->u8Idt];
2748 pPatch->ChangedIdt.u16OffsetLow = (uint32_t)((uintptr_t)&pPatch->auCode[0] & 0xffff);
2749 pPatch->ChangedIdt.u16OffsetHigh = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 16);
2750#ifdef __AMD64__
2751 pPatch->ChangedIdt.u32OffsetTop = (uint32_t)((uintptr_t)&pPatch->auCode[0] >> 32);
2752#endif
2753 pPatch->ChangedIdt.u16SegSel = ASMGetCS();
2754#ifdef __AMD64__
2755 pPatch->ChangedIdt.u3IST = 0;
2756 pPatch->ChangedIdt.u5Reserved = 0;
2757#else /* x86 */
2758 pPatch->ChangedIdt.u5Reserved = 0;
2759 pPatch->ChangedIdt.u3Type1 = 0;
2760#endif /* x86 */
2761 pPatch->ChangedIdt.u5Type2 = SUPDRV_IDTE_TYPE2_INTERRUPT_GATE;
2762 pPatch->ChangedIdt.u2DPL = 3;
2763 pPatch->ChangedIdt.u1Present = 1;
2764
2765 /*
2766 * Generate the patch code.
2767 */
2768 {
2769#ifdef __AMD64__
2770 union
2771 {
2772 uint8_t *pb;
2773 uint32_t *pu32;
2774 uint64_t *pu64;
2775 } u, uFixJmp, uFixCall, uNotNested;
2776 u.pb = &pPatch->auCode[0];
2777
2778 /* check the cookie */
2779 *u.pb++ = 0x3d; // cmp eax, GLOBALCOOKIE
2780 *u.pu32++ = pDevExt->u32Cookie;
2781
2782 *u.pb++ = 0x74; // jz @VBoxCall
2783 *u.pb++ = 2;
2784
2785 /* jump to forward code. */
2786 *u.pb++ = 0xeb;
2787 uFixJmp = u;
2788 *u.pb++ = 0xfe;
2789
2790 /* Call _VMMR0Entry */ // @VBoxCall:
2791 /* (This pushing of the arguments is NOT necessary, but it may ease debugging.) */
2792# ifdef __WIN64__
2793 *u.pb++ = 0x50; // push rax ; alignment filler.
2794 *u.pb++ = 0x41; // push r8 ; uArg
2795 *u.pb++ = 0x50;
2796 *u.pb++ = 0x52; // push rdx ; uOperation
2797 *u.pb++ = 0x51; // push rcx ; pVM
2798# else
2799 *u.pb++ = 0x51; // push rcx ; alignment filler.
2800 *u.pb++ = 0x52; // push rdx ; uArg
2801 *u.pb++ = 0x56; // push rsi ; uOperation
2802 *u.pb++ = 0x57; // push rdi ; pVM
2803# endif
2804
2805 *u.pb++ = 0xff; // call qword [pfnVMMR0Entry wrt rip]
2806 *u.pb++ = 0x15;
2807 uFixCall = u;
2808 *u.pu32++ = 0;
2809
2810 *u.pb++ = 0x48; // add rsp, 20h ; remove call frame.
2811 *u.pb++ = 0x81;
2812 *u.pb++ = 0xc4;
2813 *u.pu32++ = 0x20;
2814
2815 /* Return to R3. */
2816 uNotNested = u;
2817 *u.pb++ = 0x48; // iretq
2818 *u.pb++ = 0xcf;
2819
2820 while ((uintptr_t)u.pb & 0x7) // align 8
2821 *u.pb++ = 0xcc;
2822
2823 /* Pointer to the VMMR0Entry. */ // pfnVMMR0Entry dq StubVMMR0Entry
2824 *uFixCall.pu32 = (uint32_t)(u.pb - uFixCall.pb - 4); uFixCall.pb = NULL;
2825 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
2826 *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0Entry : (uint64_t)u.pb + 8;
2827
2828 /* stub entry. */ // StubVMMR0Entry:
2829 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
2830 *u.pb++ = 0x33; // xor eax, eax
2831 *u.pb++ = 0xc0;
2832
2833 *u.pb++ = 0x48; // dec rax
2834 *u.pb++ = 0xff;
2835 *u.pb++ = 0xc8;
2836
2837 *u.pb++ = 0xc3; // ret
2838
2839 /* forward to the original handler using a retf. */
2840 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1); uFixJmp.pb = NULL;
2841
2842 *u.pb++ = 0x68; // push <target cs>
2843 *u.pu32++ = !pPatch->SavedIdt.u5Type2 ? ASMGetCS() : pPatch->SavedIdt.u16SegSel;
2844
2845 *u.pb++ = 0x68; // push <low target rip>
2846 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2847 ? (uint32_t)(uintptr_t)uNotNested.pb
2848 : (uint32_t)pPatch->SavedIdt.u16OffsetLow
2849 | (uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16;
2850
2851 *u.pb++ = 0xc7; // mov dword [rsp + 4], <high target rip>
2852 *u.pb++ = 0x44;
2853 *u.pb++ = 0x24;
2854 *u.pb++ = 0x04;
2855 *u.pu32++ = !pPatch->SavedIdt.u5Type2
2856 ? (uint32_t)((uint64_t)uNotNested.pb >> 32)
2857 : pPatch->SavedIdt.u32OffsetTop;
2858
2859 *u.pb++ = 0x48; // retf ; does this require prefix?
2860 *u.pb++ = 0xcb;
2861
2862#else /* __X86__ */
2863
2864 union
2865 {
2866 uint8_t *pb;
2867 uint16_t *pu16;
2868 uint32_t *pu32;
2869 } u, uFixJmpNotNested, uFixJmp, uFixCall, uNotNested;
2870 u.pb = &pPatch->auCode[0];
2871
2872 /* check the cookie */
2873 *u.pb++ = 0x81; // cmp esi, GLOBALCOOKIE
2874 *u.pb++ = 0xfe;
2875 *u.pu32++ = pDevExt->u32Cookie;
2876
2877 *u.pb++ = 0x74; // jz VBoxCall
2878 uFixJmp = u;
2879 *u.pb++ = 0;
2880
2881 /* jump (far) to the original handler / not-nested-stub. */
2882 *u.pb++ = 0xea; // jmp far NotNested
2883 uFixJmpNotNested = u;
2884 *u.pu32++ = 0;
2885 *u.pu16++ = 0;
2886
2887 /* save selector registers. */ // VBoxCall:
2888 *uFixJmp.pb = (uint8_t)(u.pb - uFixJmp.pb - 1);
2889 *u.pb++ = 0x0f; // push fs
2890 *u.pb++ = 0xa0;
2891
2892 *u.pb++ = 0x1e; // push ds
2893
2894 *u.pb++ = 0x06; // push es
2895
2896 /* call frame */
2897 *u.pb++ = 0x51; // push ecx
2898
2899 *u.pb++ = 0x52; // push edx
2900
2901 *u.pb++ = 0x50; // push eax
2902
2903 /* load ds, es and perhaps fs before call. */
2904 *u.pb++ = 0xb8; // mov eax, KernelDS
2905 *u.pu32++ = ASMGetDS();
2906
2907 *u.pb++ = 0x8e; // mov ds, eax
2908 *u.pb++ = 0xd8;
2909
2910 *u.pb++ = 0x8e; // mov es, eax
2911 *u.pb++ = 0xc0;
2912
2913#ifdef __WIN32__
2914 *u.pb++ = 0xb8; // mov eax, KernelFS
2915 *u.pu32++ = ASMGetFS();
2916
2917 *u.pb++ = 0x8e; // mov fs, eax
2918 *u.pb++ = 0xe0;
2919#endif
2920
2921 /* do the call. */
2922 *u.pb++ = 0xe8; // call _VMMR0Entry / StubVMMR0Entry
2923 uFixCall = u;
2924 pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
2925 *u.pu32++ = 0xfffffffb;
2926
2927 *u.pb++ = 0x83; // add esp, 0ch ; cdecl
2928 *u.pb++ = 0xc4;
2929 *u.pb++ = 0x0c;
2930
2931 /* restore selector registers. */
2932 *u.pb++ = 0x07; // pop es
2933 //
2934 *u.pb++ = 0x1f; // pop ds
2935
2936 *u.pb++ = 0x0f; // pop fs
2937 *u.pb++ = 0xa1;
2938
2939 uNotNested = u; // NotNested:
2940 *u.pb++ = 0xcf; // iretd
2941
2942 /* the stub VMMR0Entry. */ // StubVMMR0Entry:
2943 pPatch->offStub = (uint16_t)(u.pb - &pPatch->auCode[0]);
2944 *u.pb++ = 0x33; // xor eax, eax
2945 *u.pb++ = 0xc0;
2946
2947 *u.pb++ = 0x48; // dec eax
2948
2949 *u.pb++ = 0xc3; // ret
2950
2951 /* Fixup the VMMR0Entry call. */
2952 if (pDevExt->pvVMMR0)
2953 *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0Entry - (uint32_t)(uFixCall.pu32 + 1);
2954 else
2955 *uFixCall.pu32 = (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)(uFixCall.pu32 + 1);
2956
2957 /* Fixup the forward / nested far jump. */
2958 if (!pPatch->SavedIdt.u5Type2)
2959 {
2960 *uFixJmpNotNested.pu32++ = (uint32_t)uNotNested.pb;
2961 *uFixJmpNotNested.pu16++ = ASMGetCS();
2962 }
2963 else
2964 {
2965 *uFixJmpNotNested.pu32++ = ((uint32_t)pPatch->SavedIdt.u16OffsetHigh << 16) | pPatch->SavedIdt.u16OffsetLow;
2966 *uFixJmpNotNested.pu16++ = pPatch->SavedIdt.u16SegSel;
2967 }
2968#endif /* __X86__ */
2969 Assert(u.pb <= &pPatch->auCode[sizeof(pPatch->auCode)]);
2970#if 0
2971 /* dump the patch code */
2972 dprintf(("patch code: %p\n", &pPatch->auCode[0]));
2973 for (uFixCall.pb = &pPatch->auCode[0]; uFixCall.pb < u.pb; uFixCall.pb++)
2974 dprintf(("0x%02x,\n", *uFixCall.pb));
2975#endif
2976 }
2977
2978 /*
2979 * Install the patch.
2980 */
2981 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->ChangedIdt);
2982 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The stupid change code didn't work!!!!!\n"));
2983
2984 /*
2985 * Link in the patch.
2986 */
2987 pPatch->pNext = pDevExt->pIdtPatches;
2988 pDevExt->pIdtPatches = pPatch;
2989
2990 return pPatch;
2991}
2992
2993
2994/**
2995 * Removes the sessions IDT references.
2996 * This will uninstall our IDT patch if we left unreferenced.
2997 *
2998 * @returns 0 indicating success.
2999 * @param pDevExt Device globals.
3000 * @param pSession Session data.
3001 */
3002static int supdrvIOCtl_IdtRemoveAll(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
3003{
3004 PSUPDRVPATCHUSAGE pUsage;
3005 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3006 dprintf(("supdrvIOCtl_IdtRemoveAll: pSession=%p\n", pSession));
3007
3008 /*
3009 * Take the spinlock.
3010 */
3011 RTSpinlockAcquireNoInts(pDevExt->Spinlock, &SpinlockTmp);
3012
3013 /*
3014 * Walk usage list.
3015 */
3016 pUsage = pSession->pPatchUsage;
3017 while (pUsage)
3018 {
3019 if (pUsage->pPatch->cUsage <= pUsage->cUsage)
3020 supdrvIdtRemoveOne(pDevExt, pUsage->pPatch);
3021 else
3022 pUsage->pPatch->cUsage -= pUsage->cUsage;
3023
3024 /* next */
3025 pUsage = pUsage->pNext;
3026 }
3027
3028 /*
3029 * Empty the usage chain and we're done inside the spinlock.
3030 */
3031 pUsage = pSession->pPatchUsage;
3032 pSession->pPatchUsage = NULL;
3033
3034 RTSpinlockReleaseNoInts(pDevExt->Spinlock, &SpinlockTmp);
3035
3036 /*
3037 * Free usage entries.
3038 */
3039 while (pUsage)
3040 {
3041 void *pvToFree = pUsage;
3042 pUsage->cUsage = 0;
3043 pUsage->pPatch = NULL;
3044 pUsage = pUsage->pNext;
3045 RTMemFree(pvToFree);
3046 }
3047
3048 return 0;
3049}
3050
3051
3052/**
3053 * Remove one patch.
3054 *
3055 * @param pDevExt Device globals.
3056 * @param pPatch Patch entry to remove.
3057 * @remark Caller must own SUPDRVDEVEXT::Spinlock!
3058 */
3059static void supdrvIdtRemoveOne(PSUPDRVDEVEXT pDevExt, PSUPDRVPATCH pPatch)
3060{
3061 dprintf(("supdrvIdtRemoveOne: pPatch=%p\n", pPatch));
3062
3063 pPatch->cUsage = 0;
3064
3065 /*
3066 * If the IDT entry was changed it have to kick around for ever!
3067 * This will be attempted freed again, perhaps next time we'll succeed :-)
3068 */
3069 if (memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)))
3070 {
3071 AssertMsgFailed(("The hijacked IDT entry has CHANGED!!!\n"));
3072 return;
3073 }
3074
3075 /*
3076 * Unlink it.
3077 */
3078 if (pDevExt->pIdtPatches != pPatch)
3079 {
3080 PSUPDRVPATCH pPatchPrev = pDevExt->pIdtPatches;
3081 while (pPatchPrev)
3082 {
3083 if (pPatchPrev->pNext == pPatch)
3084 {
3085 pPatchPrev->pNext = pPatch->pNext;
3086 break;
3087 }
3088 pPatchPrev = pPatchPrev->pNext;
3089 }
3090 Assert(!pPatchPrev);
3091 }
3092 else
3093 pDevExt->pIdtPatches = pPatch->pNext;
3094 pPatch->pNext = NULL;
3095
3096
3097 /*
3098 * Verify and restore the IDT.
3099 */
3100 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->ChangedIdt, sizeof(pPatch->ChangedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3101 supdrvIdtWrite(pPatch->pIdtEntry, &pPatch->SavedIdt);
3102 AssertMsg(!memcmp((void *)pPatch->pIdtEntry, &pPatch->SavedIdt, sizeof(pPatch->SavedIdt)), ("The hijacked IDT entry has CHANGED!!!\n"));
3103
3104 /*
3105 * Put it in the free list.
3106 * (This free list stuff is to calm my paranoia.)
3107 */
3108 pPatch->pvIdt = NULL;
3109 pPatch->pIdtEntry = NULL;
3110
3111 pPatch->pNext = pDevExt->pIdtPatchesFree;
3112 pDevExt->pIdtPatchesFree = pPatch;
3113}
3114
3115
3116/**
3117 * Write to an IDT entry.
3118 *
3119 * @param pvIdtEntry Where to write.
3120 * @param pNewIDTEntry What to write.
3121 */
3122static void supdrvIdtWrite(volatile void *pvIdtEntry, const SUPDRVIDTE *pNewIDTEntry)
3123{
3124 RTUINTREG uCR0;
3125 RTUINTREG uFlags;
3126
3127 /*
3128 * On SMP machines (P4 hyperthreading included) we must preform a
3129 * 64-bit locked write when updating the IDT entry.
3130 *
3131 * The F00F bugfix for linux (and probably other OSes) causes
3132 * the IDT to be pointing to an readonly mapping. We get around that
3133 * by temporarily turning of WP. Since we're inside a spinlock at this
3134 * point, interrupts are disabled and there isn't any way the WP bit
3135 * flipping can cause any trouble.
3136 */
3137
3138 /* Save & Clear interrupt flag; Save & clear WP. */
3139 uFlags = ASMGetFlags();
3140 ASMSetFlags(uFlags & ~(RTUINTREG)(1 << 9)); /*X86_EFL_IF*/
3141 Assert(!(ASMGetFlags() & (1 << 9)));
3142 uCR0 = ASMGetCR0();
3143 ASMSetCR0(uCR0 & ~(RTUINTREG)(1 << 16)); /*X86_CR0_WP*/
3144
3145 /* Update IDT Entry */
3146#ifdef __AMD64__
3147 ASMAtomicXchgU128((volatile uint128_t *)pvIdtEntry, *(uint128_t *)(uintptr_t)pNewIDTEntry);
3148#else
3149 ASMAtomicXchgU64((volatile uint64_t *)pvIdtEntry, *(uint64_t *)(uintptr_t)pNewIDTEntry);
3150#endif
3151
3152 /* Restore CR0 & Flags */
3153 ASMSetCR0(uCR0);
3154 ASMSetFlags(uFlags);
3155}
3156#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3157
3158
3159/**
3160 * Opens an image. If it's the first time it's opened the call must upload
3161 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3162 *
3163 * This is the 1st step of the loading.
3164 *
3165 * @returns 0 on success.
3166 * @returns SUPDRV_ERR_* on failure.
3167 * @param pDevExt Device globals.
3168 * @param pSession Session data.
3169 * @param pIn Input.
3170 * @param pOut Output. (May overlap pIn.)
3171 */
3172static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN_IN pIn, PSUPLDROPEN_OUT pOut)
3173{
3174 PSUPDRVLDRIMAGE pImage;
3175 unsigned cb;
3176 void *pv;
3177 dprintf(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pIn->szName, pIn->cbImage));
3178
3179 /*
3180 * Check if we got an instance of the image already.
3181 */
3182 RTSemFastMutexRequest(pDevExt->mtxLdr);
3183 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3184 {
3185 if (!strcmp(pImage->szName, pIn->szName))
3186 {
3187 pImage->cUsage++;
3188 pOut->pvImageBase = pImage->pvImage;
3189 pOut->fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3190 supdrvLdrAddUsage(pSession, pImage);
3191 RTSemFastMutexRelease(pDevExt->mtxLdr);
3192 return 0;
3193 }
3194 }
3195 /* (not found - add it!) */
3196
3197 /*
3198 * Allocate memory.
3199 */
3200 cb = pIn->cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3201 pv = RTMemExecAlloc(cb);
3202 if (!pv)
3203 {
3204 RTSemFastMutexRelease(pDevExt->mtxLdr);
3205 return SUPDRV_ERR_NO_MEMORY;
3206 }
3207
3208 /*
3209 * Setup and link in the LDR stuff.
3210 */
3211 pImage = (PSUPDRVLDRIMAGE)pv;
3212 pImage->pvImage = ALIGNP(pImage + 1, 32);
3213 pImage->cbImage = pIn->cbImage;
3214 pImage->pfnModuleInit = NULL;
3215 pImage->pfnModuleTerm = NULL;
3216 pImage->uState = SUP_IOCTL_LDR_OPEN;
3217 pImage->cUsage = 1;
3218 strcpy(pImage->szName, pIn->szName);
3219
3220 pImage->pNext = pDevExt->pLdrImages;
3221 pDevExt->pLdrImages = pImage;
3222
3223 supdrvLdrAddUsage(pSession, pImage);
3224
3225 pOut->pvImageBase = pImage->pvImage;
3226 pOut->fNeedsLoading = 1;
3227 RTSemFastMutexRelease(pDevExt->mtxLdr);
3228 return 0;
3229}
3230
3231
3232/**
3233 * Loads the image bits.
3234 *
3235 * This is the 2nd step of the loading.
3236 *
3237 * @returns 0 on success.
3238 * @returns SUPDRV_ERR_* on failure.
3239 * @param pDevExt Device globals.
3240 * @param pSession Session data.
3241 * @param pIn Input.
3242 */
3243static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD_IN pIn)
3244{
3245 PSUPDRVLDRUSAGE pUsage;
3246 PSUPDRVLDRIMAGE pImage;
3247 int rc;
3248 dprintf(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pIn->pvImageBase, pIn->cbImage));
3249
3250 /*
3251 * Find the ldr image.
3252 */
3253 RTSemFastMutexRequest(pDevExt->mtxLdr);
3254 pUsage = pSession->pLdrUsage;
3255 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3256 pUsage = pUsage->pNext;
3257 if (!pUsage)
3258 {
3259 RTSemFastMutexRelease(pDevExt->mtxLdr);
3260 dprintf(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3261 return SUPDRV_ERR_INVALID_HANDLE;
3262 }
3263 pImage = pUsage->pImage;
3264 if (pImage->cbImage != pIn->cbImage)
3265 {
3266 RTSemFastMutexRelease(pDevExt->mtxLdr);
3267 dprintf(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pIn->cbImage));
3268 return SUPDRV_ERR_INVALID_HANDLE;
3269 }
3270 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3271 {
3272 unsigned uState = pImage->uState;
3273 RTSemFastMutexRelease(pDevExt->mtxLdr);
3274 if (uState != SUP_IOCTL_LDR_LOAD)
3275 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3276 return SUPDRV_ERR_ALREADY_LOADED;
3277 }
3278 switch (pIn->eEPType)
3279 {
3280 case EP_NOTHING:
3281 break;
3282 case EP_VMMR0:
3283 if (!pIn->EP.VMMR0.pvVMMR0 || !pIn->EP.VMMR0.pvVMMR0Entry)
3284 {
3285 RTSemFastMutexRelease(pDevExt->mtxLdr);
3286 dprintf(("pvVMMR0=%p or pIn->EP.VMMR0.pvVMMR0Entry=%p is NULL!\n",
3287 pIn->EP.VMMR0.pvVMMR0, pIn->EP.VMMR0.pvVMMR0Entry));
3288 return SUPDRV_ERR_INVALID_PARAM;
3289 }
3290 if ((uintptr_t)pIn->EP.VMMR0.pvVMMR0Entry - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3291 {
3292 RTSemFastMutexRelease(pDevExt->mtxLdr);
3293 dprintf(("SUP_IOCTL_LDR_LOAD: pvVMMR0Entry=%p is outside the image (%p %d bytes)\n",
3294 pIn->EP.VMMR0.pvVMMR0Entry, pImage->pvImage, pIn->cbImage));
3295 return SUPDRV_ERR_INVALID_PARAM;
3296 }
3297 break;
3298 default:
3299 RTSemFastMutexRelease(pDevExt->mtxLdr);
3300 dprintf(("Invalid eEPType=%d\n", pIn->eEPType));
3301 return SUPDRV_ERR_INVALID_PARAM;
3302 }
3303 if ( pIn->pfnModuleInit
3304 && (uintptr_t)pIn->pfnModuleInit - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3305 {
3306 RTSemFastMutexRelease(pDevExt->mtxLdr);
3307 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3308 pIn->pfnModuleInit, pImage->pvImage, pIn->cbImage));
3309 return SUPDRV_ERR_INVALID_PARAM;
3310 }
3311 if ( pIn->pfnModuleTerm
3312 && (uintptr_t)pIn->pfnModuleTerm - (uintptr_t)pImage->pvImage >= pIn->cbImage)
3313 {
3314 RTSemFastMutexRelease(pDevExt->mtxLdr);
3315 dprintf(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3316 pIn->pfnModuleTerm, pImage->pvImage, pIn->cbImage));
3317 return SUPDRV_ERR_INVALID_PARAM;
3318 }
3319
3320 /*
3321 * Copy the memory.
3322 */
3323 /* no need to do try/except as this is a buffered request. */
3324 memcpy(pImage->pvImage, &pIn->achImage[0], pImage->cbImage);
3325 pImage->uState = SUP_IOCTL_LDR_LOAD;
3326 pImage->pfnModuleInit = pIn->pfnModuleInit;
3327 pImage->pfnModuleTerm = pIn->pfnModuleTerm;
3328 pImage->offSymbols = pIn->offSymbols;
3329 pImage->cSymbols = pIn->cSymbols;
3330 pImage->offStrTab = pIn->offStrTab;
3331 pImage->cbStrTab = pIn->cbStrTab;
3332
3333 /*
3334 * Update any entry points.
3335 */
3336 switch (pIn->eEPType)
3337 {
3338 default:
3339 case EP_NOTHING:
3340 rc = 0;
3341 break;
3342 case EP_VMMR0:
3343 rc = supdrvLdrSetR0EP(pDevExt, pIn->EP.VMMR0.pvVMMR0, pIn->EP.VMMR0.pvVMMR0Entry);
3344 break;
3345 }
3346
3347 /*
3348 * On success call the module initialization.
3349 */
3350 dprintf(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3351 if (!rc && pImage->pfnModuleInit)
3352 {
3353 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3354 rc = pImage->pfnModuleInit();
3355 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3356 supdrvLdrUnsetR0EP(pDevExt);
3357 }
3358
3359 if (rc)
3360 pImage->uState = SUP_IOCTL_LDR_OPEN;
3361
3362 RTSemFastMutexRelease(pDevExt->mtxLdr);
3363 return rc;
3364}
3365
3366
3367/**
3368 * Frees a previously loaded (prep'ed) image.
3369 *
3370 * @returns 0 on success.
3371 * @returns SUPDRV_ERR_* on failure.
3372 * @param pDevExt Device globals.
3373 * @param pSession Session data.
3374 * @param pIn Input.
3375 */
3376static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE_IN pIn)
3377{
3378 PSUPDRVLDRUSAGE pUsagePrev;
3379 PSUPDRVLDRUSAGE pUsage;
3380 PSUPDRVLDRIMAGE pImage;
3381 dprintf(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pIn->pvImageBase));
3382
3383 /*
3384 * Find the ldr image.
3385 */
3386 RTSemFastMutexRequest(pDevExt->mtxLdr);
3387 pUsagePrev = NULL;
3388 pUsage = pSession->pLdrUsage;
3389 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3390 {
3391 pUsagePrev = pUsage;
3392 pUsage = pUsage->pNext;
3393 }
3394 if (!pUsage)
3395 {
3396 RTSemFastMutexRelease(pDevExt->mtxLdr);
3397 dprintf(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3398 return SUPDRV_ERR_INVALID_HANDLE;
3399 }
3400
3401 /*
3402 * Check if we can remove anything.
3403 */
3404 pImage = pUsage->pImage;
3405 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3406 {
3407 /* unlink it */
3408 if (pUsagePrev)
3409 pUsagePrev->pNext = pUsage->pNext;
3410 else
3411 pSession->pLdrUsage = pUsage->pNext;
3412 /* free it */
3413 pUsage->pImage = NULL;
3414 pUsage->pNext = NULL;
3415 RTMemFree(pUsage);
3416
3417 /*
3418 * Derefrence the image.
3419 */
3420 if (pImage->cUsage <= 1)
3421 supdrvLdrFree(pDevExt, pImage);
3422 else
3423 pImage->cUsage--;
3424 }
3425 else
3426 {
3427 /*
3428 * Dereference both image and usage.
3429 */
3430 pImage->cUsage--;
3431 pUsage->cUsage--;
3432 }
3433
3434 RTSemFastMutexRelease(pDevExt->mtxLdr);
3435 return 0;
3436}
3437
3438
3439/**
3440 * Gets the address of a symbol in an open image.
3441 *
3442 * @returns 0 on success.
3443 * @returns SUPDRV_ERR_* on failure.
3444 * @param pDevExt Device globals.
3445 * @param pSession Session data.
3446 * @param pIn Input.
3447 * @param pOut Output. (May overlap pIn.)
3448 */
3449static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL_IN pIn, PSUPLDRGETSYMBOL_OUT pOut)
3450{
3451 PSUPDRVLDRIMAGE pImage;
3452 PSUPDRVLDRUSAGE pUsage;
3453 uint32_t i;
3454 PSUPLDRSYM paSyms;
3455 const char *pchStrings;
3456 const size_t cbSymbol = strlen(pIn->szSymbol) + 1;
3457 void *pvSymbol = NULL;
3458 int rc = SUPDRV_ERR_GENERAL_FAILURE; /** @todo better error code. */
3459 dprintf2(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pIn->pvImageBase, pIn->szSymbol));
3460
3461 /*
3462 * Find the ldr image.
3463 */
3464 RTSemFastMutexRequest(pDevExt->mtxLdr);
3465 pUsage = pSession->pLdrUsage;
3466 while (pUsage && pUsage->pImage->pvImage != pIn->pvImageBase)
3467 pUsage = pUsage->pNext;
3468 if (!pUsage)
3469 {
3470 RTSemFastMutexRelease(pDevExt->mtxLdr);
3471 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3472 return SUPDRV_ERR_INVALID_HANDLE;
3473 }
3474 pImage = pUsage->pImage;
3475 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3476 {
3477 unsigned uState = pImage->uState;
3478 RTSemFastMutexRelease(pDevExt->mtxLdr);
3479 dprintf(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3480 return SUPDRV_ERR_ALREADY_LOADED;
3481 }
3482
3483 /*
3484 * Search the symbol string.
3485 */
3486 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3487 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3488 for (i = 0; i < pImage->cSymbols; i++)
3489 {
3490 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3491 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3492 && !memcmp(pchStrings + paSyms[i].offName, pIn->szSymbol, cbSymbol))
3493 {
3494 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3495 rc = 0;
3496 break;
3497 }
3498 }
3499 RTSemFastMutexRelease(pDevExt->mtxLdr);
3500 pOut->pvSymbol = pvSymbol;
3501 return rc;
3502}
3503
3504
3505/**
3506 * Updates the IDT patches to point to the specified VMM R0 entry
3507 * point (i.e. VMMR0Enter()).
3508 *
3509 * @returns 0 on success.
3510 * @returns SUPDRV_ERR_* on failure.
3511 * @param pDevExt Device globals.
3512 * @param pSession Session data.
3513 * @param pVMMR0 VMMR0 image handle.
3514 * @param pVMMR0Entry VMMR0Entry address.
3515 * @remark Caller must own the loader mutex.
3516 */
3517static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry)
3518{
3519 int rc;
3520 dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0Entry=%p\n", pvVMMR0, pvVMMR0Entry));
3521
3522
3523 /*
3524 * Check if not yet set.
3525 */
3526 rc = 0;
3527 if (!pDevExt->pvVMMR0)
3528 {
3529#ifndef VBOX_WITHOUT_IDT_PATCHING
3530 PSUPDRVPATCH pPatch;
3531#endif
3532
3533 /*
3534 * Set it and update IDT patch code.
3535 */
3536 pDevExt->pvVMMR0 = pvVMMR0;
3537 pDevExt->pfnVMMR0Entry = pvVMMR0Entry;
3538#ifndef VBOX_WITHOUT_IDT_PATCHING
3539 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3540 {
3541# ifdef __AMD64__
3542 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup], (uint64_t)pvVMMR0);
3543# else /* __X86__ */
3544 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3545 (uint32_t)pvVMMR0 - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3546# endif
3547 }
3548#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3549 }
3550 else
3551 {
3552 /*
3553 * Return failure or success depending on whether the
3554 * values match or not.
3555 */
3556 if ( pDevExt->pvVMMR0 != pvVMMR0
3557 || (void *)pDevExt->pfnVMMR0Entry != pvVMMR0Entry)
3558 {
3559 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3560 rc = SUPDRV_ERR_INVALID_PARAM;
3561 }
3562 }
3563 return rc;
3564}
3565
3566
3567/**
3568 * Unsets the R0 entry point installed by supdrvLdrSetR0EP.
3569 *
3570 * @param pDevExt Device globals.
3571 */
3572static void supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt)
3573{
3574#ifndef VBOX_WITHOUT_IDT_PATCHING
3575 PSUPDRVPATCH pPatch;
3576#endif
3577
3578 pDevExt->pvVMMR0 = NULL;
3579 pDevExt->pfnVMMR0Entry = NULL;
3580
3581#ifndef VBOX_WITHOUT_IDT_PATCHING
3582 for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
3583 {
3584# ifdef __AMD64__
3585 ASMAtomicXchgU64((volatile uint64_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3586 (uint64_t)&pPatch->auCode[pPatch->offStub]);
3587# else /* __X86__ */
3588 ASMAtomicXchgU32((volatile uint32_t *)&pPatch->auCode[pPatch->offVMMR0EntryFixup],
3589 (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)&pPatch->auCode[pPatch->offVMMR0EntryFixup + 4]);
3590# endif
3591 }
3592#endif /* !VBOX_WITHOUT_IDT_PATCHING */
3593}
3594
3595
3596/**
3597 * Adds a usage reference in the specified session of an image.
3598 *
3599 * @param pSession Session in question.
3600 * @param pImage Image which the session is using.
3601 */
3602static void supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3603{
3604 PSUPDRVLDRUSAGE pUsage;
3605 dprintf(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3606
3607 /*
3608 * Referenced it already?
3609 */
3610 pUsage = pSession->pLdrUsage;
3611 while (pUsage)
3612 {
3613 if (pUsage->pImage == pImage)
3614 {
3615 pUsage->cUsage++;
3616 return;
3617 }
3618 pUsage = pUsage->pNext;
3619 }
3620
3621 /*
3622 * Allocate new usage record.
3623 */
3624 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3625 Assert(pUsage);
3626 if (pUsage)
3627 {
3628 pUsage->cUsage = 1;
3629 pUsage->pImage = pImage;
3630 pUsage->pNext = pSession->pLdrUsage;
3631 pSession->pLdrUsage = pUsage;
3632 }
3633 /* ignore errors... */
3634}
3635
3636
3637/**
3638 * Frees a load image.
3639 *
3640 * @param pDevExt Pointer to device extension.
3641 * @param pImage Pointer to the image we're gonna free.
3642 * This image must exit!
3643 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3644 */
3645static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3646{
3647 PSUPDRVLDRIMAGE pImagePrev;
3648 dprintf(("supdrvLdrFree: pImage=%p\n", pImage));
3649
3650 /* find it - arg. should've used doubly linked list. */
3651 Assert(pDevExt->pLdrImages);
3652 pImagePrev = NULL;
3653 if (pDevExt->pLdrImages != pImage)
3654 {
3655 pImagePrev = pDevExt->pLdrImages;
3656 while (pImagePrev->pNext != pImage)
3657 pImagePrev = pImagePrev->pNext;
3658 Assert(pImagePrev->pNext == pImage);
3659 }
3660
3661 /* unlink */
3662 if (pImagePrev)
3663 pImagePrev->pNext = pImage->pNext;
3664 else
3665 pDevExt->pLdrImages = pImage->pNext;
3666
3667 /* check if this is VMMR0.r0 and fix the Idt patches if it is. */
3668 if (pDevExt->pvVMMR0 == pImage->pvImage)
3669 supdrvLdrUnsetR0EP(pDevExt);
3670
3671 /* call termination function if fully loaded. */
3672 if ( pImage->pfnModuleTerm
3673 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3674 {
3675 dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3676 pImage->pfnModuleTerm();
3677 }
3678
3679 /* free the image */
3680 pImage->cUsage = 0;
3681 pImage->pNext = 0;
3682 pImage->uState = SUP_IOCTL_LDR_FREE;
3683 RTMemExecFree(pImage);
3684}
3685
3686
3687/**
3688 * Gets the current paging mode of the CPU and stores in in pOut.
3689 */
3690static int supdrvIOCtl_GetPagingMode(PSUPGETPAGINGMODE_OUT pOut)
3691{
3692 RTUINTREG cr0 = ASMGetCR0();
3693 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3694 pOut->enmMode = SUPPAGINGMODE_INVALID;
3695 else
3696 {
3697 RTUINTREG cr4 = ASMGetCR4();
3698 uint32_t fNXEPlusLMA = 0;
3699 if (cr4 & X86_CR4_PAE)
3700 {
3701 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
3702 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
3703 {
3704 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
3705 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
3706 fNXEPlusLMA |= BIT(0);
3707 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
3708 fNXEPlusLMA |= BIT(1);
3709 }
3710 }
3711
3712 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
3713 {
3714 case 0:
3715 pOut->enmMode = SUPPAGINGMODE_32_BIT;
3716 break;
3717
3718 case X86_CR4_PGE:
3719 pOut->enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
3720 break;
3721
3722 case X86_CR4_PAE:
3723 pOut->enmMode = SUPPAGINGMODE_PAE;
3724 break;
3725
3726 case X86_CR4_PAE | BIT(0):
3727 pOut->enmMode = SUPPAGINGMODE_PAE_NX;
3728 break;
3729
3730 case X86_CR4_PAE | X86_CR4_PGE:
3731 pOut->enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3732 break;
3733
3734 case X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3735 pOut->enmMode = SUPPAGINGMODE_PAE_GLOBAL;
3736 break;
3737
3738 case BIT(1) | X86_CR4_PAE:
3739 pOut->enmMode = SUPPAGINGMODE_AMD64;
3740 break;
3741
3742 case BIT(1) | X86_CR4_PAE | BIT(0):
3743 pOut->enmMode = SUPPAGINGMODE_AMD64_NX;
3744 break;
3745
3746 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
3747 pOut->enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
3748 break;
3749
3750 case BIT(1) | X86_CR4_PAE | X86_CR4_PGE | BIT(0):
3751 pOut->enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
3752 break;
3753
3754 default:
3755 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
3756 pOut->enmMode = SUPPAGINGMODE_INVALID;
3757 break;
3758 }
3759 }
3760 return 0;
3761}
3762
3763
3764#if !defined(SUPDRV_OS_HAVE_LOW) && !defined(USE_NEW_OS_INTERFACE) /* Use same backend as the contiguous stuff */
3765/**
3766 * OS Specific code for allocating page aligned memory with fixed
3767 * physical backing below 4GB.
3768 *
3769 * @returns 0 on success.
3770 * @returns SUPDRV_ERR_* on failure.
3771 * @param pMem Memory reference record of the memory to be allocated.
3772 * (This is not linked in anywhere.)
3773 * @param ppvR3 Where to store the Ring-3 mapping of the allocated memory.
3774 * @param paPagesOut Where to store the physical addresss.
3775 */
3776int VBOXCALL supdrvOSLowAllocOne(PSUPDRVMEMREF pMem, void **ppvR3, PSUPPAGE paPagesOut)
3777{
3778 RTHCPHYS HCPhys;
3779 int rc = supdrvOSContAllocOne(pMem, NULL, ppvR3, &HCPhys);
3780 if (!rc)
3781 {
3782 unsigned iPage = pMem->cb >> PAGE_SHIFT;
3783 while (iPage-- > 0)
3784 {
3785 paPagesOut[iPage].Phys = HCPhys + (iPage << PAGE_SHIFT);
3786 paPagesOut[iPage].uReserved = 0;
3787 }
3788 }
3789 return rc;
3790}
3791
3792
3793/**
3794 * Frees low memory.
3795 *
3796 * @param pMem Memory reference record of the memory to be freed.
3797 */
3798void VBOXCALL supdrvOSLowFreeOne(PSUPDRVMEMREF pMem)
3799{
3800 supdrvOSContFreeOne(pMem);
3801}
3802#endif /* !SUPDRV_OS_HAVE_LOW */
3803
3804
3805#ifdef USE_NEW_OS_INTERFACE
3806/**
3807 * Creates the GIP.
3808 *
3809 * @returns negative errno.
3810 * @param pDevExt Instance data. GIP stuff may be updated.
3811 */
3812static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
3813{
3814 PSUPGLOBALINFOPAGE pGip;
3815 RTHCPHYS HCPhysGip;
3816 uint32_t u32SystemResolution;
3817 uint32_t u32Interval;
3818 int rc;
3819
3820 dprintf(("supdrvGipCreate:\n"));
3821
3822 /* assert order */
3823 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
3824 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
3825 Assert(!pDevExt->pGipTimer);
3826
3827 /*
3828 * Allocate a suitable page with a default kernel mapping.
3829 */
3830 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
3831 if (RT_FAILURE(rc))
3832 {
3833 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
3834 return rc;
3835 }
3836 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
3837 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
3838
3839 /*
3840 * Try bump up the system timer resolution.
3841 * The more interrupts the better...
3842 */
3843 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
3844 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
3845 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
3846 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
3847 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
3848 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
3849 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
3850 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
3851 )
3852 {
3853 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
3854 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
3855 }
3856
3857 /*
3858 * Find a reasonable update interval, something close to 10ms would be nice,
3859 * and create a recurring timer.
3860 */
3861 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
3862 while (u32Interval < 10000000 /* 10 ms */)
3863 u32Interval += u32SystemResolution;
3864
3865 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipTimer, pDevExt);
3866 if (RT_FAILURE(rc))
3867 {
3868 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %RU32 ns interval. rc=%d\n", u32Interval, rc));
3869 Assert(!pDevExt->pGipTimer);
3870 supdrvGipDestroy(pDevExt);
3871 return rc;
3872 }
3873
3874 /*
3875 * We're good.
3876 */
3877 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
3878 return 0;
3879}
3880
3881
3882/**
3883 * Terminates the GIP.
3884 *
3885 * @returns negative errno.
3886 * @param pDevExt Instance data. GIP stuff may be updated.
3887 */
3888static int supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
3889{
3890 int rc;
3891
3892 /*
3893 * Invalid the GIP data.
3894 */
3895 if (pDevExt->pGip)
3896 {
3897 supdrvGipTerm(pDevExt->pGip);
3898 pDevExt->pGip = 0;
3899 }
3900
3901 /*
3902 * Destroy the timer and free the GIP memory object.
3903 */
3904 if (pDevExt->pGipTimer)
3905 {
3906 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
3907 pDevExt->pGipTimer = NULL;
3908 }
3909
3910 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
3911 {
3912 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
3913 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
3914 }
3915
3916 /*
3917 * Finally, release the system timer resolution request if one succeeded.
3918 */
3919 if (pDevExt->u32SystemTimerGranularityGrant)
3920 {
3921 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
3922 pDevExt->u32SystemTimerGranularityGrant = 0;
3923 }
3924
3925 return 0;
3926}
3927
3928
3929/**
3930 * Timer callback function.
3931 * @param pTimer The timer.
3932 * @param pvUser The device extension.
3933 */
3934static DECLCALLBACK(void) supdrvGipTimer(PRTTIMER pTimer, void *pvUser)
3935{
3936 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
3937 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
3938}
3939#endif /* USE_NEW_OS_INTERFACE */
3940
3941
3942/**
3943 * Initializes the GIP data.
3944 *
3945 * @returns VBox status code.
3946 * @param pDevExt Pointer to the device instance data.
3947 * @param pGip Pointer to the read-write kernel mapping of the GIP.
3948 * @param HCPhys The physical address of the GIP.
3949 * @param u64NanoTS The current nanosecond timestamp.
3950 * @param uUpdateHz The update freqence.
3951 */
3952int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
3953{
3954 dprintf(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
3955
3956 memset(pGip, 0, PAGE_SIZE);
3957 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
3958 pGip->u32UpdateHz = uUpdateHz;
3959 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
3960 pGip->u32TransactionId = 2;
3961 pGip->u64NanoTS = u64NanoTS;
3962 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
3963 pGip->u64TSC = ASMReadTSC();
3964
3965 /*
3966 * We don't know the following values until we've executed updates.
3967 * So, we'll just insert very high values.
3968 */
3969 pGip->u64CpuHz = _4G + 1;
3970 pGip->u32UpdateIntervalTSC = _2G / 4;
3971 pGip->au32TSCHistory[0] = _2G / 4;
3972 pGip->au32TSCHistory[1] = _2G / 4;
3973 pGip->au32TSCHistory[2] = _2G / 4;
3974 pGip->au32TSCHistory[3] = _2G / 4;
3975 pGip->au32TSCHistory[4] = _2G / 4;
3976 pGip->au32TSCHistory[5] = _2G / 4;
3977 pGip->au32TSCHistory[6] = _2G / 4;
3978 pGip->au32TSCHistory[7] = _2G / 4;
3979
3980 /*
3981 * Link it to the device extension.
3982 */
3983 pDevExt->pGip = pGip;
3984 pDevExt->HCPhysGip = HCPhys;
3985 pDevExt->cGipUsers = 0;
3986
3987 return 0;
3988}
3989
3990
3991/**
3992 * Invalidates the GIP data upon termination.
3993 *
3994 * @param pGip Pointer to the read-write kernel mapping of the GIP.
3995 */
3996void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
3997{
3998 pGip->iTSCHistoryHead = ~0;
3999 pGip->u64NanoTS = 0;
4000 pGip->u64TSC = 0;
4001 pGip->u32Magic = 0;
4002 pGip->iTSCHistoryHead = 0;
4003}
4004
4005
4006/**
4007 * Updates the GIP.
4008 *
4009 * @param pGip Pointer to the GIP.
4010 * @param u64NanoTS The current nanosecond timesamp.
4011 */
4012void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4013{
4014 uint64_t u64TSC;
4015 uint64_t u64TSCDelta;
4016 uint32_t u32UpdateIntervalTSC;
4017 uint32_t u32UpdateIntervalTSCSlack;
4018 unsigned iTSCHistoryHead;
4019 uint64_t u64CpuHz;
4020
4021 /*
4022 * Start update transaction.
4023 */
4024 if (!(ASMAtomicIncU32(&pGip->u32TransactionId) & 1))
4025 {
4026 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4027 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGip->u32TransactionId));
4028 ASMAtomicIncU32(&pGip->u32TransactionId);
4029 pGip->cErrors++;
4030 return;
4031 }
4032
4033 ASMAtomicXchgU64(&pGip->u64NanoTS, u64NanoTS);
4034
4035 /*
4036 * Recalc the update frequency every 0x800th time.
4037 */
4038 if (!(pGip->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4039 {
4040 if (pGip->u64NanoTSLastUpdateHz)
4041 {
4042#ifdef __AMD64__ /** @todo fix 64-bit div here to work on x86 linux. */
4043 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4044 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4045 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4046 {
4047 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4048 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4049 }
4050#endif
4051 }
4052 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4053 }
4054
4055 /*
4056 * Calc TSC delta.
4057 */
4058 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4059 u64TSC = ASMReadTSC();
4060 u64TSCDelta = u64TSC - pGip->u64TSC;
4061 ASMAtomicXchgU64(&pGip->u64TSC, u64TSC);
4062
4063 if (u64TSCDelta >> 32)
4064 {
4065 u64TSCDelta = pGip->u32UpdateIntervalTSC;
4066 pGip->cErrors++;
4067 }
4068
4069 /*
4070 * TSC History.
4071 */
4072 Assert(ELEMENTS(pGip->au32TSCHistory) == 8);
4073
4074 iTSCHistoryHead = (pGip->iTSCHistoryHead + 1) & 7;
4075 ASMAtomicXchgU32(&pGip->iTSCHistoryHead, iTSCHistoryHead);
4076 ASMAtomicXchgU32(&pGip->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4077
4078 /*
4079 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4080 */
4081 if (pGip->u32UpdateHz >= 1000)
4082 {
4083 uint32_t u32;
4084 u32 = pGip->au32TSCHistory[0];
4085 u32 += pGip->au32TSCHistory[1];
4086 u32 += pGip->au32TSCHistory[2];
4087 u32 += pGip->au32TSCHistory[3];
4088 u32 >>= 2;
4089 u32UpdateIntervalTSC = pGip->au32TSCHistory[4];
4090 u32UpdateIntervalTSC += pGip->au32TSCHistory[5];
4091 u32UpdateIntervalTSC += pGip->au32TSCHistory[6];
4092 u32UpdateIntervalTSC += pGip->au32TSCHistory[7];
4093 u32UpdateIntervalTSC >>= 2;
4094 u32UpdateIntervalTSC += u32;
4095 u32UpdateIntervalTSC >>= 1;
4096
4097 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4098 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4099 }
4100 else if (pGip->u32UpdateHz >= 90)
4101 {
4102 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4103 u32UpdateIntervalTSC += pGip->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4104 u32UpdateIntervalTSC >>= 1;
4105
4106 /* value choosen on a 2GHz thinkpad running windows */
4107 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4108 }
4109 else
4110 {
4111 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4112
4113 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4114 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4115 }
4116 ASMAtomicXchgU32(&pGip->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4117
4118 /*
4119 * CpuHz.
4120 */
4121 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4122 ASMAtomicXchgU64(&pGip->u64CpuHz, u64CpuHz);
4123
4124 /*
4125 * Complete transaction.
4126 */
4127 ASMAtomicIncU32(&pGip->u32TransactionId);
4128}
4129
4130
4131#ifndef DEBUG /** @todo change #ifndef DEBUG -> #ifdef LOG_ENABLED */
4132/**
4133 * Stub function for non-debug builds.
4134 */
4135RTDECL(PRTLOGGER) RTLogDefaultInstance(void)
4136{
4137 return NULL;
4138}
4139
4140RTDECL(PRTLOGGER) RTLogRelDefaultInstance(void)
4141{
4142 return NULL;
4143}
4144
4145/**
4146 * Stub function for non-debug builds.
4147 */
4148RTDECL(int) RTLogSetDefaultInstanceThread(PRTLOGGER pLogger, uintptr_t uKey)
4149{
4150 return 0;
4151}
4152
4153/**
4154 * Stub function for non-debug builds.
4155 */
4156RTDECL(void) RTLogLogger(PRTLOGGER pLogger, void *pvCallerRet, const char *pszFormat, ...)
4157{
4158}
4159
4160/**
4161 * Stub function for non-debug builds.
4162 */
4163RTDECL(void) RTLogLoggerEx(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, ...)
4164{
4165}
4166
4167/**
4168 * Stub function for non-debug builds.
4169 */
4170RTDECL(void) RTLogLoggerExV(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args)
4171{
4172}
4173#endif /* !DEBUG */
4174
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette