VirtualBox

Changeset 4811 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Sep 14, 2007 5:53:56 PM (18 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
24514
Message:

Split VMMR0Entry into VMMR0EntryInt, VMMR0EntryFast and VMMr0EntryEx. This will prevent the SUPCallVMMR0Ex path from causing harm and messing up the paths that has to be optimized.

Location:
trunk/src/VBox
Files:
20 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/HostDrivers/Support/SUPDRV.h

    r4800 r4811  
    565565    /** Patch usage records. (protected by SUPDRVDEVEXT::SpinLock) */
    566566    PSUPDRVPATCHUSAGE volatile  pPatchUsage;
    567 #else
     567#endif
    568568    /** The VM associated with the session. */
    569569    PVM                         pVM;
    570 #endif
    571570    /** List of generic usage records. (protected by SUPDRVDEVEXT::SpinLock) */
    572571    PSUPDRVUSAGE volatile       pUsage;
     
    642641     * 0 if the code VMM isn't loaded and Idt are nops. */
    643642    void * volatile         pvVMMR0;
    644     /** VMMR0Entry() pointer. */
    645     DECLR0CALLBACKMEMBER(int, pfnVMMR0Entry, (PVM pVM, unsigned uOperation, void *pvArg));
     643    /** VMMR0EntryInt() pointer. */
     644    DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryInt, (PVM pVM, unsigned uOperation, void *pvArg));
     645    /** VMMR0EntryFast() pointer. */
     646    DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryFast, (PVM pVM, unsigned uOperation));
     647    /** VMMR0EntryEx() pointer. */
     648    DECLR0CALLBACKMEMBER(int, pfnVMMR0EntryEx, (PVM pVM, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg));
    646649
    647650    /** Linked list of loaded code. */
     
    736739*******************************************************************************/
    737740int  VBOXCALL   supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr);
    738 #ifdef VBOX_WITHOUT_IDT_PATCHING
    739741int  VBOXCALL   supdrvIOCtlFast(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession);
    740 #endif
    741742int  VBOXCALL   supdrvInitDevExt(PSUPDRVDEVEXT pDevExt);
    742743void VBOXCALL   supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt);
  • trunk/src/VBox/HostDrivers/Support/SUPDRVIOC.h

    r4806 r4811  
    388388                    /** The module handle (i.e. address). */
    389389                    RTR0PTR         pvVMMR0;
    390                     /** Address of VMMR0Entry function. */
    391                     RTR0PTR         pvVMMR0Entry;
     390                    /** Address of VMMR0EntryInt function. */
     391                    RTR0PTR         pvVMMR0EntryInt;
     392                    /** Address of VMMR0EntryFast function. */
     393                    RTR0PTR         pvVMMR0EntryFast;
     394                    /** Address of VMMR0EntryEx function. */
     395                    RTR0PTR         pvVMMR0EntryEx;
    392396                } VMMR0;
    393397            }               EP;
     
    496500#endif
    497501            /** Argument to use when no request packet is supplied. */
    498             RTR0UINTPTR     uArg;
     502            uint64_t        u64Arg;
    499503        } In;
    500504    } u;
  • trunk/src/VBox/HostDrivers/Support/SUPDRVShared.c

    r4806 r4811  
    3030#include <iprt/process.h>
    3131#include <iprt/log.h>
    32 #ifdef VBOX_WITHOUT_IDT_PATCHING
    33 # include <VBox/vmm.h>
    34 # include <VBox/err.h>
    35 #endif
    3632
    3733
     
    169165static int      supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
    170166static int      supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
    171 static int      supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry);
     167static int      supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
    172168static void     supdrvLdrUnsetR0EP(PSUPDRVDEVEXT pDevExt);
    173169static void     supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
     
    572568
    573569
    574 #ifdef VBOX_WITHOUT_IDT_PATCHING
    575570/**
    576571 * Fast path I/O Control worker.
     
    592587    ASMIntDisable();
    593588
    594     if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0Entry))
     589    if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
    595590    {
    596591        switch (uIOCtl)
    597592        {
    598593            case SUP_IOCTL_FAST_DO_RAW_RUN:
    599                 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_RAW_RUN, NULL);
     594                rc = pDevExt->pfnVMMR0EntryFast(pSession->pVM, SUP_VMMR0_DO_RAW_RUN);
    600595                break;
    601596            case SUP_IOCTL_FAST_DO_HWACC_RUN:
    602                 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_HWACC_RUN, NULL);
     597                rc = pDevExt->pfnVMMR0EntryFast(pSession->pVM, SUP_VMMR0_DO_HWACC_RUN);
    603598                break;
    604599            case SUP_IOCTL_FAST_DO_NOP:
    605                 rc = pDevExt->pfnVMMR0Entry(pSession->pVM, VMMR0_DO_NOP, NULL);
     600                rc = pDevExt->pfnVMMR0EntryFast(pSession->pVM, SUP_VMMR0_DO_NOP);
    606601                break;
    607602            default:
     
    616611    return rc;
    617612}
    618 #endif /* VBOX_WITHOUT_IDT_PATCHING */
    619613
    620614
     
    964958
    965959                /* execute */
    966                 if (RT_LIKELY(pDevExt->pfnVMMR0Entry))
    967                     pReq->Hdr.rc = pDevExt->pfnVMMR0Entry(pReq->u.In.pVMR0, pReq->u.In.uOperation, (void *)pReq->u.In.uArg);
     960                if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
     961                    pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg);
    968962                else
    969963                    pReq->Hdr.rc = VERR_WRONG_ORDER;
     
    977971
    978972                /* execute */
    979                 if (RT_LIKELY(pDevExt->pfnVMMR0Entry))
    980                     pReq->Hdr.rc = pDevExt->pfnVMMR0Entry(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq);
     973                if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
     974                    pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg);
    981975                else
    982976                    pReq->Hdr.rc = VERR_WRONG_ORDER;
     
    10561050                               ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
    10571051            /* execute */
    1058 #ifndef VBOX_WITHOUT_IDT_PATCHING
    1059             OSDBGPRINT(("SUP_IOCTL_SET_VM_FOR_FAST: !VBOX_WITHOUT_IDT_PATCHING\n"));
    1060             pReq->Hdr.rc = VERR_NOT_SUPPORTED;
    1061 #else
    10621052            pSession->pVM = pReq->u.In.pVMR0;
    10631053            pReq->Hdr.rc = VINF_SUCCESS;
    1064 #endif
    10651054            return 0;
    10661055        }
     
    27372726# endif
    27382727
    2739     *u.pb++ = 0xff;                     //  call    qword [pfnVMMR0Entry wrt rip]
     2728    *u.pb++ = 0xff;                     //  call    qword [pfnVMMR0EntryInt wrt rip]
    27402729    *u.pb++ = 0x15;
    27412730    uFixCall = u;
     
    27592748        *u.pb++ = 0xcc;
    27602749
    2761     /* Pointer to the VMMR0Entry. */    //  pfnVMMR0Entry dq StubVMMR0Entry
     2750    /* Pointer to the VMMR0Entry. */    //  pfnVMMR0EntryInt dq StubVMMR0Entry
    27622751    *uFixCall.pu32 = (uint32_t)(u.pb - uFixCall.pb - 4);                uFixCall.pb = NULL;
    27632752    pPatch->offVMMR0EntryFixup = (uint16_t)(u.pb - &pPatch->auCode[0]);
    2764     *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0Entry : (uint64_t)u.pb + 8;
     2753    *u.pu64++ = pDevExt->pvVMMR0 ? (uint64_t)pDevExt->pfnVMMR0EntryInt : (uint64_t)u.pb + 8;
    27652754
    27662755    /* stub entry. */                   //  StubVMMR0Entry:
     
    28892878    /* Fixup the VMMR0Entry call. */
    28902879    if (pDevExt->pvVMMR0)
    2891         *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0Entry - (uint32_t)(uFixCall.pu32 + 1);
     2880        *uFixCall.pu32 = (uint32_t)pDevExt->pfnVMMR0EntryInt - (uint32_t)(uFixCall.pu32 + 1);
    28922881    else
    28932882        *uFixCall.pu32 = (uint32_t)&pPatch->auCode[pPatch->offStub] - (uint32_t)(uFixCall.pu32 + 1);
     
    32183207            break;
    32193208        case SUPLDRLOADEP_VMMR0:
    3220             if (!pReq->u.In.EP.VMMR0.pvVMMR0 || !pReq->u.In.EP.VMMR0.pvVMMR0Entry)
     3209            if (    !pReq->u.In.EP.VMMR0.pvVMMR0
     3210                ||  !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
     3211                ||  !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
     3212                ||  !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
    32213213            {
    32223214                RTSemFastMutexRelease(pDevExt->mtxLdr);
    3223                 dprintf(("pvVMMR0=%p or pReq->u.In.EP.VMMR0.pvVMMR0Entry=%p is NULL!\n",
    3224                          pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0Entry));
     3215                dprintf(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
     3216                         pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
     3217                         pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
    32253218                return VERR_INVALID_PARAMETER;
    32263219            }
    3227             if ((uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0Entry - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
     3220            if (    (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt  - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
     3221                ||  (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
     3222                ||  (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx   - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
    32283223            {
    32293224                RTSemFastMutexRelease(pDevExt->mtxLdr);
    3230                 dprintf(("SUP_IOCTL_LDR_LOAD: pvVMMR0Entry=%p is outside the image (%p %d bytes)\n",
    3231                          pReq->u.In.EP.VMMR0.pvVMMR0Entry, pImage->pvImage, pReq->u.In.cbImage));
     3225                dprintf(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
     3226                         pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
     3227                         pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
    32323228                return VERR_INVALID_PARAMETER;
    32333229            }
     
    32783274            break;
    32793275        case SUPLDRLOADEP_VMMR0:
    3280             rc = supdrvLdrSetR0EP(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0Entry);
     3276            rc = supdrvLdrSetR0EP(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
     3277                                  pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
    32813278            break;
    32823279    }
     
    32863283     */
    32873284    dprintf(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
    3288     if (!rc && pImage->pfnModuleInit)
     3285    if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
    32893286    {
    32903287        dprintf(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
     
    34433440 *
    34443441 * @returns IPRT status code.
    3445  * @param   pDevExt     Device globals.
    3446  * @param   pSession    Session data.
    3447  * @param   pVMMR0      VMMR0 image handle.
    3448  * @param   pVMMR0Entry VMMR0Entry address.
     3442 * @param   pDevExt             Device globals.
     3443 * @param   pSession            Session data.
     3444 * @param   pVMMR0              VMMR0 image handle.
     3445 * @param   pvVMMR0EntryInt     VMMR0EntryInt address.
     3446 * @param   pvVMMR0EntryFast    VMMR0EntryFast address.
     3447 * @param   pvVMMR0EntryEx      VMMR0EntryEx address.
    34493448 * @remark  Caller must own the loader mutex.
    34503449 */
    3451 static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0Entry)
     3450static int supdrvLdrSetR0EP(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
    34523451{
    34533452    int rc = VINF_SUCCESS;
    3454     dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0Entry=%p\n", pvVMMR0, pvVMMR0Entry));
     3453    dprintf(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
    34553454
    34563455
     
    34673466         * Set it and update IDT patch code.
    34683467         */
    3469         pDevExt->pvVMMR0        = pvVMMR0;
    3470         pDevExt->pfnVMMR0Entry  = pvVMMR0Entry;
     3468        pDevExt->pvVMMR0            = pvVMMR0;
     3469        pDevExt->pfnVMMR0EntryInt   = pvVMMR0EntryInt;
     3470        pDevExt->pfnVMMR0EntryFast  = pvVMMR0EntryFast;
     3471        pDevExt->pfnVMMR0EntryEx    = pvVMMR0EntryEx;
    34713472#ifndef VBOX_WITHOUT_IDT_PATCHING
    34723473        for (pPatch = pDevExt->pIdtPatches; pPatch; pPatch = pPatch->pNext)
     
    34843485    {
    34853486        /*
    3486          * Return failure or success depending on whether the
    3487          * values match or not.
     3487         * Return failure or success depending on whether the values match or not.
    34883488         */
    34893489        if (    pDevExt->pvVMMR0 != pvVMMR0
    3490             ||  (void *)pDevExt->pfnVMMR0Entry != pvVMMR0Entry)
     3490            ||  (void *)pDevExt->pfnVMMR0EntryInt   != pvVMMR0EntryInt
     3491            ||  (void *)pDevExt->pfnVMMR0EntryFast  != pvVMMR0EntryFast
     3492            ||  (void *)pDevExt->pfnVMMR0EntryEx    != pvVMMR0EntryEx)
    34913493        {
    34923494            AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
     
    35093511#endif
    35103512
    3511     pDevExt->pvVMMR0        = NULL;
    3512     pDevExt->pfnVMMR0Entry  = NULL;
     3513    pDevExt->pvVMMR0            = NULL;
     3514    pDevExt->pfnVMMR0EntryInt   = NULL;
     3515    pDevExt->pfnVMMR0EntryFast  = NULL;
     3516    pDevExt->pfnVMMR0EntryEx    = NULL;
    35133517
    35143518#ifndef VBOX_WITHOUT_IDT_PATCHING
  • trunk/src/VBox/HostDrivers/Support/SUPLib.cpp

    r4806 r4811  
    459459 * For later.
    460460 */
    461 static int supCallVMMR0ExFake(PVMR0 pVMR0, unsigned uOperation, void *pvVMMReq, size_t cbVMMReq)
     461static int supCallVMMR0ExFake(PVMR0 pVMR0, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
    462462{
    463463    AssertMsgFailed(("%d\n", uOperation));
     
    466466
    467467
    468 SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsigned uOperation, void *pvVMMReq, size_t cbVMMReq)
    469 {
     468SUPR3DECL(int) SUPCallVMMR0Fast(PVMR0 pVMR0, unsigned uOperation)
     469{
     470    if (RT_LIKELY(uOperation == SUP_VMMR0_DO_RAW_RUN))
     471        return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_RAW_RUN);
     472    if (RT_LIKELY(uOperation == SUP_VMMR0_DO_HWACC_RUN))
     473        return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_HWACC_RUN);
     474    if (RT_LIKELY(uOperation == SUP_VMMR0_DO_NOP))
     475        return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_NOP);
     476
     477    AssertMsgFailed(("%#x\n", uOperation));
     478    return VERR_INTERNAL_ERROR;
     479}
     480
     481
     482SUPR3DECL(int) SUPCallVMMR0Ex(PVMR0 pVMR0, unsigned uOperation, uint64_t u64Arg, PSUPVMMR0REQHDR pReqHdr)
     483{
     484#if 0 /* temp hack. */
    470485    /*
    471486     * The following operations don't belong here.
    472487     */
    473     AssertMsgReturn(    uOperation != VMMR0_DO_RAW_RUN
    474                     &&  uOperation != VMMR0_DO_HWACC_RUN
    475                     &&  uOperation != VMMR0_DO_NOP,
     488    AssertMsgReturn(    uOperation != SUP_VMMR0_DO_RAW_RUN
     489                    &&  uOperation != SUP_VMMR0_DO_HWACC_RUN
     490                    &&  uOperation != SUP_VMMR0_DO_NOP,
    476491                    ("%#x\n", uOperation),
    477492                    VERR_INTERNAL_ERROR);
     493#else
     494    if (    (    uOperation == SUP_VMMR0_DO_RAW_RUN
     495             ||  uOperation == SUP_VMMR0_DO_HWACC_RUN
     496             ||  uOperation == SUP_VMMR0_DO_NOP)
     497        &&  !pReqHdr
     498        &&  !u64Arg)
     499        return (int) SUPCallVMMR0Fast(pVMR0, uOperation);
     500#endif
     501
    478502    /* fake */
    479503    if (RT_UNLIKELY(g_u32FakeMode))
    480         return supCallVMMR0ExFake(pVMR0, uOperation, pvVMMReq, cbVMMReq);
     504        return supCallVMMR0ExFake(pVMR0, uOperation, u64Arg, pReqHdr);
    481505
    482506    int rc;
    483     if (!cbVMMReq)
     507    if (!pReqHdr)
    484508    {
    485509        /* no data. */
     
    493517        Req.u.In.pVMR0 = pVMR0;
    494518        Req.u.In.uOperation = uOperation;
    495         Req.u.In.uArg = (uintptr_t)pvVMMReq;
     519        Req.u.In.u64Arg = u64Arg;
    496520        rc = suplibOsIOCtl(SUP_IOCTL_CALL_VMMR0(0), &Req, SUP_IOCTL_CALL_VMMR0_SIZE(0));
    497521        if (RT_SUCCESS(rc))
    498522            rc = Req.Hdr.rc;
    499523    }
    500     else if (SUP_IOCTL_CALL_VMMR0_SIZE(cbVMMReq) < _4K) /* FreeBSD won't copy more than 4K. */
    501     {
    502         AssertPtr(pvVMMReq);
    503         PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)alloca(SUP_IOCTL_CALL_VMMR0_SIZE(cbVMMReq));
     524    else if (SUP_IOCTL_CALL_VMMR0_SIZE(pReqHdr->cbReq) < _4K) /* FreeBSD won't copy more than 4K. */
     525    {
     526        AssertPtrReturn(pReqHdr, VERR_INVALID_POINTER);
     527        AssertReturn(pReqHdr->u32Magic != SUPVMMR0REQHDR_MAGIC, VERR_INVALID_MAGIC);
     528        const size_t cbReq = pReqHdr->cbReq;
     529
     530        PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)alloca(SUP_IOCTL_CALL_VMMR0_SIZE(cbReq));
    504531        pReq->Hdr.u32Cookie = g_u32Cookie;
    505532        pReq->Hdr.u32SessionCookie = g_u32SessionCookie;
    506         pReq->Hdr.cbIn = SUP_IOCTL_CALL_VMMR0_SIZE_IN(cbVMMReq);
    507         pReq->Hdr.cbOut = SUP_IOCTL_CALL_VMMR0_SIZE_OUT(cbVMMReq);
     533        pReq->Hdr.cbIn = SUP_IOCTL_CALL_VMMR0_SIZE_IN(cbReq);
     534        pReq->Hdr.cbOut = SUP_IOCTL_CALL_VMMR0_SIZE_OUT(cbReq);
    508535        pReq->Hdr.fFlags = SUPREQHDR_FLAGS_DEFAULT;
    509536        pReq->Hdr.rc = VERR_INTERNAL_ERROR;
    510537        pReq->u.In.pVMR0 = pVMR0;
    511538        pReq->u.In.uOperation = uOperation;
    512         pReq->u.In.uArg = 0;
    513         memcpy(&pReq->abReqPkt[0], pvVMMReq, cbVMMReq);
    514         rc = suplibOsIOCtl(SUP_IOCTL_CALL_VMMR0(cbVMMReq), pReq, SUP_IOCTL_CALL_VMMR0_SIZE(cbVMMReq));
     539        pReq->u.In.u64Arg = u64Arg;
     540        memcpy(&pReq->abReqPkt[0], pReqHdr, cbReq);
     541        rc = suplibOsIOCtl(SUP_IOCTL_CALL_VMMR0(cbReq), pReq, SUP_IOCTL_CALL_VMMR0_SIZE(cbReq));
    515542        if (RT_SUCCESS(rc))
    516543            rc = pReq->Hdr.rc;
    517         memcpy(pvVMMReq, &pReq->abReqPkt[0], cbVMMReq);
     544        memcpy(pReqHdr, &pReq->abReqPkt[0], cbReq);
    518545    }
    519546    else /** @todo may have to remove the size limits one this request... */
    520         AssertMsgFailedReturn(("cbVMMReq=%#x\n", cbVMMReq), VERR_INTERNAL_ERROR);
     547        AssertMsgFailedReturn(("cbReq=%#x\n", pReqHdr->cbReq), VERR_INTERNAL_ERROR);
    521548    return rc;
    522549}
     
    529556
    530557#else
    531     if (RT_LIKELY(uOperation == VMMR0_DO_RAW_RUN))
     558    if (RT_LIKELY(uOperation == SUP_VMMR0_DO_RAW_RUN))
    532559    {
    533560        Assert(!pvArg);
    534561        return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_RAW_RUN);
    535562    }
    536     if (RT_LIKELY(uOperation == VMMR0_DO_HWACC_RUN))
     563    if (RT_LIKELY(uOperation == SUP_VMMR0_DO_HWACC_RUN))
    537564    {
    538565        Assert(!pvArg);
    539566        return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_HWACC_RUN);
    540567    }
    541     if (RT_LIKELY(uOperation == VMMR0_DO_NOP))
     568    if (RT_LIKELY(uOperation == SUP_VMMR0_DO_NOP))
    542569    {
    543570        Assert(!pvArg);
    544571        return suplibOSIOCtlFast(SUP_IOCTL_FAST_DO_NOP);
    545572    }
    546     AssertMsgFailedReturn(("uOperation=%#x\n", uOperation), VERR_INTERNAL_ERROR);
     573    return SUPCallVMMR0Ex(pVMR0, uOperation, (uintptr_t)pvArg, NULL);
    547574#endif
    548575}
     
    14991526                     * Get the entry points.
    15001527                     */
    1501                     RTUINTPTR VMMR0Entry = 0;
     1528                    RTUINTPTR VMMR0EntryInt = 0;
     1529                    RTUINTPTR VMMR0EntryFast = 0;
     1530                    RTUINTPTR VMMR0EntryEx = 0;
    15021531                    RTUINTPTR ModuleInit = 0;
    15031532                    RTUINTPTR ModuleTerm = 0;
    15041533                    if (fIsVMMR0)
    1505                         rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0Entry", &VMMR0Entry);
     1534                    {
     1535                        rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0EntryInt", &VMMR0EntryInt);
     1536                        if (RT_SUCCESS(rc))
     1537                            rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0EntryFast", &VMMR0EntryFast);
     1538                        if (RT_SUCCESS(rc))
     1539                            rc = RTLdrGetSymbolEx(hLdrMod, &pLoadReq->u.In.achImage[0], (uintptr_t)OpenReq.u.Out.pvImageBase, "VMMR0EntryEx", &VMMR0EntryEx);
     1540                    }
    15061541                    if (RT_SUCCESS(rc))
    15071542                    {
     
    15461581                                pLoadReq->u.In.eEPType                = SUPLDRLOADEP_VMMR0;
    15471582                                pLoadReq->u.In.EP.VMMR0.pvVMMR0       = OpenReq.u.Out.pvImageBase;
    1548                                 pLoadReq->u.In.EP.VMMR0.pvVMMR0Entry  = (RTR0PTR)VMMR0Entry;
     1583                                pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryInt = (RTR0PTR)VMMR0EntryInt;
     1584                                pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryFast= (RTR0PTR)VMMR0EntryFast;
     1585                                pLoadReq->u.In.EP.VMMR0.pvVMMR0EntryEx  = (RTR0PTR)VMMR0EntryEx;
    15491586                            }
    15501587                            else
  • trunk/src/VBox/HostDrivers/Support/SUPLibInternal.h

    r4800 r4811  
    4545int     suplibOsTerm(void);
    4646int     suplibOsIOCtl(uintptr_t uFunction, void *pvReq, size_t cbReq);
    47 #ifdef VBOX_WITHOUT_IDT_PATCHING
    4847int     suplibOSIOCtlFast(uintptr_t uFunction);
    49 #endif
    5048int     suplibOsPageAlloc(size_t cPages, void **ppvPages);
    5149int     suplibOsPageFree(void *pvPages, size_t cPages);
  • trunk/src/VBox/HostDrivers/Support/darwin/SUPLib-darwin.cpp

    r4800 r4811  
    192192}
    193193
    194 #ifdef VBOX_WITHOUT_IDT_PATCHING
     194
    195195int suplibOSIOCtlFast(uintptr_t uFunction)
    196196{
     
    200200    return rc;
    201201}
    202 #endif
    203202
    204203
  • trunk/src/VBox/HostDrivers/Support/freebsd/SUPLib-freebsd.cpp

    r4800 r4811  
    117117}
    118118
    119 #ifdef VBOX_WITHOUT_IDT_PATCHING
     119
    120120int suplibOSIOCtlFast(uintptr_t uFunction)
    121121{
     
    125125    return rc;
    126126}
    127 #endif
    128127
    129128
  • trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c

    r4800 r4811  
    751751static int VBoxDrvLinuxIOCtl(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg)
    752752{
    753 #ifdef VBOX_WITHOUT_IDT_PATCHING
    754753    /*
    755754     * Deal with the two high-speed IOCtl that takes it's arguments from
     
    759758                  || uCmd == SUP_IOCTL_FAST_DO_HWACC_RUN
    760759                  || uCmd == SUP_IOCTL_FAST_DO_NOP))
    761         return supdrvIOCtlFast(iCmd, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data);
    762 #endif
     760        return supdrvIOCtlFast(uCmd, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data);
    763761    return VBoxDrvLinuxIOCtlSlow(pInode, pFilp, uCmd, ulArg);
    764762}
  • trunk/src/VBox/HostDrivers/Support/linux/SUPLib-linux.cpp

    r4800 r4811  
    234234
    235235/**
     236 * Fast I/O Control path, no buffers.
     237 *
     238 * @returns VBox status code.
     239 * @param   uFunction   The operation.
     240 */
     241int suplibOSIOCtlFast(uintptr_t uFunction)
     242{
     243    int rc = ioctl(g_hDevice, uFunction, NULL);
     244    if (rc == -1)
     245        rc = -errno;
     246    return rc;
     247}
     248
     249
     250/**
    236251 * Allocate a number of zero-filled pages in user space.
    237252 *
  • trunk/src/VBox/HostDrivers/Support/os2/SUPLib-os2.cpp

    r4800 r4811  
    145145
    146146
    147 #ifdef VBOX_WITHOUT_IDT_PATCHING
    148147int suplibOSIOCtlFast(uintptr_t uFunction)
    149148{
     
    159158    return rc;
    160159}
    161 #endif
    162160
    163161
  • trunk/src/VBox/HostDrivers/Support/solaris/SUPDrv-solaris.c

    r4800 r4811  
    486486     * the session and iCmd, and only returns a VBox status code.
    487487     */
    488 #ifdef VBOX_WITHOUT_IDT_PATCHING
    489488    if (    Cmd == SUP_IOCTL_FAST_DO_RAW_RUN
    490489        ||  Cmd == SUP_IOCTL_FAST_DO_HWACC_RUN
    491490        ||  Cmd == SUP_IOCTL_FAST_DO_NOP)
    492491        return supdrvIOCtlFast(Cmd, &g_DevExt, pSession);
    493 #endif
    494492
    495493    return VBoxDrvSolarisIOCtlSlow(pSession, Cmd, Mode, pArgs);
  • trunk/src/VBox/HostDrivers/Support/solaris/SUPLib-solaris.cpp

    r4800 r4811  
    142142
    143143
    144 #ifdef VBOX_WITHOUT_IDT_PATCHING
    145144int suplibOSIOCtlFast(uintptr_t uFunction)
    146145{
     
    150149    return rc;
    151150}
    152 #endif
    153151
    154152
  • trunk/src/VBox/HostDrivers/Support/testcase/tstInt.cpp

    r4071 r4811  
     1/** $Id$ */
    12/** @file
    2  *
    3  * VBox host drivers - Ring-0 support drivers - Testcases:
    4  * Test the interrupt gate feature of the support library
     3 * Testcase: Test the interrupt gate feature of the support library.
    54 */
    65
     
    10099                pVM->pSession = pSession;
    101100
    102 #ifdef VBOX_WITHOUT_IDT_PATCHING
    103101                rc = SUPSetVMForFastIOCtl(pVMR0);
    104 #endif
    105102                if (!rc)
    106103                {
     
    148145                        uint64_t NanoSecs = RTTimeNanoTS() - StartTS;
    149146
    150                         RTPrintf("tstInt: %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n",
     147                        RTPrintf("tstInt: SUPCallVMMR0     - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n",
     148                                 i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks);
     149
     150#ifndef VBOX_WITHOUT_IDT_PATCHING
     151                        /*
     152                         * The fast path.
     153                         */
     154                        RTTimeNanoTS();
     155                        StartTS = RTTimeNanoTS();
     156                        StartTick = ASMReadTSC();
     157                        MinTicks = UINT64_MAX;
     158                        for (i = 0; i < 1000000; i++)
     159                        {
     160                            uint64_t OneStartTick = ASMReadTSC();
     161                            rc = SUPCallVMMR0Fast(pVMR0, VMMR0_DO_NOP);
     162                            uint64_t Ticks = ASMReadTSC() - OneStartTick;
     163                            if (Ticks < MinTicks)
     164                                MinTicks = Ticks;
     165
     166                            if (RT_UNLIKELY(rc != VINF_SUCCESS))
     167                            {
     168                                RTPrintf("tstInt: SUPCallVMMR0 -> rc=%Vrc i=%d Expected VINF_SUCCESS!\n", rc, i);
     169                                rcRet++;
     170                                break;
     171                            }
     172                        }
     173                        Ticks = ASMReadTSC() - StartTick;
     174                        NanoSecs = RTTimeNanoTS() - StartTS;
     175
     176                        RTPrintf("tstInt: SUPCallVMMR0Fast - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n",
     177                                 i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks);
     178#endif /* !VBOX_WITHOUT_IDT_PATCHING */
     179
     180                        /*
     181                         * The ordinary path.
     182                         */
     183                        RTTimeNanoTS();
     184                        StartTS = RTTimeNanoTS();
     185                        StartTick = ASMReadTSC();
     186                        MinTicks = UINT64_MAX;
     187                        for (i = 0; i < 1000000; i++)
     188                        {
     189                            uint64_t OneStartTick = ASMReadTSC();
     190                            rc = SUPCallVMMR0Ex(pVMR0, VMMR0_DO_NOP, 0, NULL);
     191                            uint64_t Ticks = ASMReadTSC() - OneStartTick;
     192                            if (Ticks < MinTicks)
     193                                MinTicks = Ticks;
     194
     195                            if (RT_UNLIKELY(rc != VINF_SUCCESS))
     196                            {
     197                                RTPrintf("tstInt: SUPCallVMMR0 -> rc=%Vrc i=%d Expected VINF_SUCCESS!\n", rc, i);
     198                                rcRet++;
     199                                break;
     200                            }
     201                        }
     202                        Ticks = ASMReadTSC() - StartTick;
     203                        NanoSecs = RTTimeNanoTS() - StartTS;
     204
     205                        RTPrintf("tstInt: SUPCallVMMR0Ex   - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n",
    151206                                 i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks);
    152207                    }
  • trunk/src/VBox/HostDrivers/Support/win/SUPDrv-win.cpp

    r4800 r4811  
    271271    PSUPDRVSESSION      pSession = (PSUPDRVSESSION)pStack->FileObject->FsContext;
    272272
    273 #ifdef VBOX_WITHOUT_IDT_PATCHING
    274273    /*
    275274     * Deal with the two high-speed IOCtl that takes it's arguments from
     
    298297        return rcNt;
    299298    }
    300 #endif /* VBOX_WITHOUT_IDT_PATCHING */
    301299
    302300    return VBoxDrvNtDeviceControlSlow(pDevExt, pSession, pIrp, pStack);
  • trunk/src/VBox/HostDrivers/Support/win/SUPLib-win.cpp

    r4804 r4811  
    624624
    625625
    626 #ifdef VBOX_WITHOUT_IDT_PATCHING
    627626int suplibOSIOCtlFast(uintptr_t uFunction)
    628627{
     
    636635    return suplibConvertWin32Err(GetLastError());
    637636}
    638 #endif
    639637
    640638
  • trunk/src/VBox/VMM/HWACCM.cpp

    r4789 r4811  
    423423            memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
    424424
    425             int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, NULL, 0);
     425            int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
    426426            AssertRC(rc);
    427427            if (rc == VINF_SUCCESS)
     
    457457            pVM->hwaccm.s.fInitialized = true;
    458458
    459             int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, NULL, 0);
     459            int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
    460460            AssertRC(rc);
    461461            if (rc == VINF_SUCCESS)
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r4738 r4811  
    786786typedef struct GMMMAPUNMAPCHUNKREQ
    787787{
     788    /** The header. */
     789    SUPVMMR0REQHDR  Hdr;
    788790    /** The chunk to map, UINT32_MAX if unmap only. (IN) */
    789     uint32_t    idChunkMap;
     791    uint32_t        idChunkMap;
    790792    /** The chunk to unmap, UINT32_MAX if map only. (IN) */
    791     uint32_t    idChunkUnmap;
     793    uint32_t        idChunkUnmap;
    792794    /** Where the mapping address is returned. (OUT) */
    793     RTR3PTR     pvR3;
     795    RTR3PTR         pvR3;
    794796} GMMMAPUNMAPCHUNKREQ;
    795797
     
    833835     */
    834836    GMMMAPUNMAPCHUNKREQ Req;
     837    Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
     838    Req.Hdr.cbReq = sizeof(Req);
    835839    Req.pvR3 = NULL;
    836840    Req.idChunkMap = idChunk;
     
    838842    if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
    839843        Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
    840     /** @todo SUPCallVMMR0Ex needs to support in+out or similar.  */
    841     rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, &Req, sizeof(Req));
     844    rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
    842845    if (VBOX_SUCCESS(rc))
    843846    {
     
    888891/**
    889892 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
    890  * 
     893 *
    891894 * @returns see pgmR3PhysChunkMap.
    892895 * @param   pVM         The VM handle.
     
    902905/**
    903906 * Invalidates the TLB for the ring-3 mapping cache.
    904  * 
     907 *
    905908 * @param   pVM         The VM handle.
    906909 */
     
    919922/**
    920923 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
    921  * 
     924 *
    922925 * @returns The following VBox status codes.
    923926 * @retval  VINF_SUCCESS on success. FF cleared.
    924927 * @retval  VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
    925  * 
     928 *
    926929 * @param   pVM         The VM handle.
    927930 */
     
    929932{
    930933    pgmLock(pVM);
    931     int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0);
     934    int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
    932935    if (rc == VERR_GMM_SEED_ME)
    933936    {
     
    935938        rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
    936939        if (VBOX_SUCCESS(rc))
    937             rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, pvChunk, 0);
     940            rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
    938941        if (VBOX_FAILURE(rc))
    939942        {
  • trunk/src/VBox/VMM/VMM.cpp

    r4799 r4811  
    375375        return rc;
    376376
    377 #ifdef VBOX_WITHOUT_IDT_PATCHING
    378377    /*
    379378     * Register the Ring-0 VM handle with the session for fast ioctl calls.
     
    382381    if (VBOX_FAILURE(rc))
    383382        return rc;
    384 #endif
    385383
    386384    /*
     
    19591957            rc = VERR_GENERAL_FAILURE;
    19601958#else
    1961             rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL, 0);
     1959            //rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL, 0);
     1960# if !defined(RT_OS_LINUX) /* Alternative for debugging - currently untested on linux. */
     1961            rc = SUPCallVMMR0Fast(pVM->pVMR0, VMMR0_DO_HWACC_RUN);
     1962# else
     1963            rc = SUPCallVMMR0(pVM->pVMR0, VMMR0_DO_HWACC_RUN, NULL);
     1964# endif
    19621965#endif
    19631966        } while (rc == VINF_EM_RAW_INTERRUPT_HYPER);
     
    21892192            break;
    21902193        }
    2191 #endif 
     2194#endif
    21922195
    21932196        /*
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r4738 r4811  
    128128/**
    129129 * Invalidates the GC page mapping TLB.
    130  * 
     130 *
    131131 * @param   pVM     The VM handle.
    132132 */
     
    140140/**
    141141 * Invalidates the ring-0 page mapping TLB.
    142  * 
     142 *
    143143 * @param   pVM     The VM handle.
    144144 */
     
    151151/**
    152152 * Invalidates the ring-3 page mapping TLB.
    153  * 
     153 *
    154154 * @param   pVM     The VM handle.
    155155 */
     
    171171/**
    172172 * Makes sure that there is at least one handy page ready for use.
    173  * 
     173 *
    174174 * This will also take the appropriate actions when reaching water-marks.
    175  * 
     175 *
    176176 * @returns The following VBox status codes.
    177177 * @retval  VINF_SUCCESS on success.
    178178 * @retval  VERR_EM_NO_MEMORY if we're really out of memory.
    179  * 
     179 *
    180180 * @param   pVM     The VM handle.
    181  * 
     181 *
    182182 * @remarks Must be called from within the PGM critical section. It may
    183183 *          nip back to ring-3/0 in some cases.
     
    194194     *      - 50%: Try allocate pages; on failure we'll force REM to quite ASAP.
    195195     *
    196      * The basic idea is that we should be able to get out of any situation with 
     196     * The basic idea is that we should be able to get out of any situation with
    197197     * only 50% of handy pages remaining.
    198198     *
    199      * At the moment we'll not adjust the number of handy pages relative to the 
     199     * At the moment we'll not adjust the number of handy pages relative to the
    200200     * actual VM RAM committment, that's too much work for now.
    201201     */
     
    204204#ifdef IN_RING3
    205205        ||   pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */
    206 #endif 
     206#endif
    207207       )
    208208    {
    209209        Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
    210210#ifdef IN_RING3
    211         int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, NULL, 0);
     211        int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
    212212#elif defined(IN_RING0)
    213213        /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */
     
    215215#else
    216216        int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
    217 #endif 
     217#endif
    218218        if (RT_UNLIKELY(rc != VINF_SUCCESS))
    219219        {
     
    242242            VM_FF_SET(pVM, VM_FF_TO_R3);
    243243        }
    244 #endif 
     244#endif
    245245    }
    246246
     
    255255 * @retval  VINF_SUCCESS on success, pPage is modified.
    256256 * @retval  VERR_EM_NO_MEMORY if we're totally out of memory.
    257  * 
     257 *
    258258 * @todo    Propagate VERR_EM_NO_MEMORY up the call tree.
    259259 *
    260260 * @param   pVM         The VM address.
    261  * @param   pPage       The physical page tracking structure. This will 
    262  *                      be modified on success. 
     261 * @param   pPage       The physical page tracking structure. This will
     262 *                      be modified on success.
    263263 * @param   GCPhys      The address of the page.
    264264 *
    265265 * @remarks Must be called from within the PGM critical section. It may
    266266 *          nip back to ring-3/0 in some cases.
    267  * 
    268  * @remarks This function shouldn't really fail, however if it does 
    269  *          it probably means we've screwed up the size of the amount 
     267 *
     268 * @remarks This function shouldn't really fail, however if it does
     269 *          it probably means we've screwed up the size of the amount
    270270 *          and/or the low-water mark of handy pages. Or, that some
    271271 *          device I/O is causing a lot of pages to be allocated while
     
    309309        VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
    310310
    311         Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage), 
     311        Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
    312312              GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
    313313        STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceShared);
     
    523523 * scarse resources (R0 and GC) in the mapping cache. When you're done
    524524 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
    525  * 
    526  * This API will assume your intention is to write to the page, and will 
    527  * therefore replace shared and zero pages. If you do not intend to modify 
     525 *
     526 * This API will assume your intention is to write to the page, and will
     527 * therefore replace shared and zero pages. If you do not intend to modify
    528528 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
    529529 *
     
    639639 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
    640640 *
    641  * This API will assume your intention is to write to the page, and will 
    642  * therefore replace shared and zero pages. If you do not intend to modify 
     641 * This API will assume your intention is to write to the page, and will
     642 * therefore replace shared and zero pages. If you do not intend to modify
    643643 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
    644644 *
     
    704704/**
    705705 * Release the mapping of a guest page.
    706  * 
     706 *
    707707 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
    708708 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r4787 r4811  
    6161 * networking. */
    6262#if defined(DEBUG_sandervl) /*|| defined(DEBUG_bird)*/
    63 #define DEBUG_NO_RING0_ASSERTIONS
     63# define DEBUG_NO_RING0_ASSERTIONS
    6464#endif
    6565#ifdef DEBUG_NO_RING0_ASSERTIONS
     
    412412
    413413
    414 /**
    415  * The Ring 0 entry point, called by the support library (SUP).
     414
     415/**
     416 * The Ring 0 entry point, called by the interrupt gate.
    416417 *
    417418 * @returns VBox status code.
    418  * @param   pVM         The VM to operate on.
    419  * @param   uOperation  Which operation to execute. (VMMR0OPERATION)
    420  * @param   pvArg       Argument to the operation.
    421  */
    422 VMMR0DECL(int) VMMR0Entry(PVM pVM, unsigned /* make me an enum */ uOperation, void *pvArg)
    423 {
    424     switch (uOperation)
    425     {
     419 * @param   pVM             The VM to operate on.
     420 * @param   enmOperation    Which operation to execute.
     421 * @param   pvArg           Argument to the operation.
     422 * @remarks Assume called with interrupts disabled.
     423 */
     424VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
     425{
     426    switch (enmOperation)
     427    {
     428#ifndef VBOX_WITHOUT_IDT_PATCHING
    426429        /*
    427430         * Switch to GC.
     
    444447
    445448            /*
    446              * Check if there is an exit R0 action associated with the return code.
     449             * We'll let TRPM change the stack frame so our return is different.
     450             * Just keep in mind that after the call, things have changed!
    447451             */
    448             switch (rc)
     452            if (    rc == VINF_EM_RAW_INTERRUPT
     453                ||  rc == VINF_EM_RAW_INTERRUPT_HYPER)
    449454            {
    450455                /*
    451                  * Default - no action, just return.
     456                 * Don't trust the compiler to get this right.
     457                 * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
     458                 * mode too because we push the arguments on the stack in the IDT patch code.
    452459                 */
    453                 default:
    454                     return rc;
    455 
    456                 /*
    457                  * We'll let TRPM change the stack frame so our return is different.
    458                  * Just keep in mind that after the call, things have changed!
    459                  */
    460                 case VINF_EM_RAW_INTERRUPT:
    461                 case VINF_EM_RAW_INTERRUPT_HYPER:
    462                 {
    463 #ifdef VBOX_WITHOUT_IDT_PATCHING
    464                     TRPMR0DispatchHostInterrupt(pVM);
    465 #else /* !VBOX_WITHOUT_IDT_PATCHING */
    466                     /*
    467                      * Don't trust the compiler to get this right.
    468                      * gcc -fomit-frame-pointer screws up big time here. This works fine in 64-bit
    469                      * mode too because we push the arguments on the stack in the IDT patch code.
    470                      */
    471460# if defined(__GNUC__)
    472                     void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
     461                void *pvRet = (uint8_t *)__builtin_frame_address(0) + sizeof(void *);
    473462# elif defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
    474                     void *pvRet = (uint8_t *)_AddressOfReturnAddress();
     463                void *pvRet = (uint8_t *)_AddressOfReturnAddress();
    475464# elif defined(RT_ARCH_X86)
    476                     void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
     465                void *pvRet = (uint8_t *)&pVM - sizeof(pVM);
    477466# else
    478467#  error "huh?"
    479468# endif
    480                     if (    ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
    481                         &&  ((uintptr_t *)pvRet)[2] == (uintptr_t)uOperation
    482                         &&  ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
    483                         TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
    484                     else
     469                if (    ((uintptr_t *)pvRet)[1] == (uintptr_t)pVM
     470                    &&  ((uintptr_t *)pvRet)[2] == (uintptr_t)enmOperation
     471                    &&  ((uintptr_t *)pvRet)[3] == (uintptr_t)pvArg)
     472                    TRPMR0SetupInterruptDispatcherFrame(pVM, pvRet);
     473                else
     474                {
     475# if defined(DEBUG) || defined(LOG_ENABLED)
     476                    static bool  s_fHaveWarned = false;
     477                    if (!s_fHaveWarned)
    485478                    {
    486 # if defined(DEBUG) || defined(LOG_ENABLED)
    487                         static bool  s_fHaveWarned = false;
    488                         if (!s_fHaveWarned)
    489                         {
    490                              s_fHaveWarned = true;
    491                              //RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); -- @todo export me!
    492                              RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
    493                         }
     479                         s_fHaveWarned = true;
     480                         //RTLogPrintf("VMMR0.r0: The compiler can't find the stack frame!\n"); -- @todo export me!
     481                         RTLogComPrintf("VMMR0.r0: The compiler can't find the stack frame!\n");
     482                    }
    494483# endif
    495                         TRPMR0DispatchHostInterrupt(pVM);
    496                     }
    497 #endif /* !VBOX_WITHOUT_IDT_PATCHING */
    498                     return rc;
     484                    TRPMR0DispatchHostInterrupt(pVM);
    499485                }
    500486            }
    501             /* Won't get here! */
    502             break;
     487            return rc;
    503488        }
    504489
     
    508493        case VMMR0_DO_HWACC_RUN:
    509494        {
    510             int rc;
    511             RTCCUINTREG fFlags;
    512 
    513495            STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
    514             fFlags = ASMIntDisableFlags();
    515             rc = HWACCMR0Enable(pVM);
     496            int rc = HWACCMR0Enable(pVM);
    516497            if (VBOX_SUCCESS(rc))
    517498            {
     
    527508            }
    528509            pVM->vmm.s.iLastGCRc = rc;
    529             ASMSetFlags(fFlags);
    530510
    531511#ifdef VBOX_WITH_STATISTICS
     
    543523            RTCCUINTREG fFlags = ASMIntDisableFlags();
    544524            int rc = VMMR0Init(pVM, (unsigned)(uintptr_t)pvArg);
     525            ASMSetFlags(fFlags);
     526            return rc;
     527        }
     528
     529        /*
     530         * Terminate the R0 part of a VM instance.
     531         */
     532        case VMMR0_DO_VMMR0_TERM:
     533        {
     534            RTCCUINTREG fFlags = ASMIntDisableFlags();
     535            int rc = VMMR0Term(pVM);
     536            ASMSetFlags(fFlags);
     537            return rc;
     538        }
     539
     540        /*
     541         * Setup the hardware accelerated raw-mode session.
     542         */
     543        case VMMR0_DO_HWACC_SETUP_VM:
     544        {
     545            RTCCUINTREG fFlags = ASMIntDisableFlags();
     546            int rc = HWACCMR0SetupVMX(pVM);
     547            ASMSetFlags(fFlags);
     548            return rc;
     549        }
     550
     551        /*
     552         * Switch to GC to execute Hypervisor function.
     553         */
     554        case VMMR0_DO_CALL_HYPERVISOR:
     555        {
     556            /* Safety precaution as VMX disables the switcher. */
     557            Assert(!pVM->vmm.s.fSwitcherDisabled);
     558            if (pVM->vmm.s.fSwitcherDisabled)
     559                return VERR_NOT_SUPPORTED;
     560
     561            RTCCUINTREG fFlags = ASMIntDisableFlags();
     562            int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
     563            ASMSetFlags(fFlags);
     564            return rc;
     565        }
     566
     567        /*
     568         * For profiling.
     569         */
     570        case VMMR0_DO_NOP:
     571            return VINF_SUCCESS;
     572#endif /* !VBOX_WITHOUT_IDT_PATCHING */
     573
     574        default:
     575            /*
     576             * We're returning VERR_NOT_SUPPORT here so we've got something else
     577             * than -1 which the interrupt gate glue code might return.
     578             */
     579            Log(("operation %#x is not supported\n", enmOperation));
     580            return VERR_NOT_SUPPORTED;
     581    }
     582}
     583
     584
     585/**
     586 * The Ring 0 entry point, called by the fast-ioctl path.
     587 *
     588 * @returns VBox status code.
     589 * @param   pVM             The VM to operate on.
     590 * @param   enmOperation    Which operation to execute.
     591 * @remarks Assume called with interrupts disabled.
     592 */
     593VMMR0DECL(int) VMMR0EntryFast(PVM pVM, VMMR0OPERATION enmOperation)
     594{
     595    switch (enmOperation)
     596    {
     597        /*
     598         * Switch to GC and run guest raw mode code.
     599         */
     600        case VMMR0_DO_RAW_RUN:
     601        {
     602            /* Safety precaution as VMX disables the switcher. */
     603            if (RT_LIKELY(!pVM->vmm.s.fSwitcherDisabled))
     604            {
     605                int rc = pVM->vmm.s.pfnR0HostToGuest(pVM);
     606                pVM->vmm.s.iLastGCRc = rc;
     607
     608                if (    rc == VINF_EM_RAW_INTERRUPT
     609                    ||  rc == VINF_EM_RAW_INTERRUPT_HYPER)
     610                    TRPMR0DispatchHostInterrupt(pVM);
     611
     612#ifdef VBOX_WITH_STATISTICS
     613                STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
     614                vmmR0RecordRC(pVM, rc);
     615#endif
     616                return rc;
     617            }
     618
     619            Assert(!pVM->vmm.s.fSwitcherDisabled);
     620            return VERR_NOT_SUPPORTED;
     621        }
     622
     623        /*
     624         * Run guest code using the available hardware acceleration technology.
     625         */
     626        case VMMR0_DO_HWACC_RUN:
     627        {
     628            STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
     629            int rc = HWACCMR0Enable(pVM);
     630            if (VBOX_SUCCESS(rc))
     631            {
     632#ifdef DEBUG_NO_RING0_ASSERTIONS
     633                g_pVMAssert = pVM;
     634#endif
     635                rc = vmmR0CallHostSetJmp(&pVM->vmm.s.CallHostR0JmpBuf, HWACCMR0RunGuestCode, pVM); /* this may resume code. */
     636#ifdef DEBUG_NO_RING0_ASSERTIONS
     637                g_pVMAssert = NULL;
     638#endif
     639                int rc2 = HWACCMR0Disable(pVM);
     640                AssertRC(rc2);
     641            }
     642            pVM->vmm.s.iLastGCRc = rc;
     643
     644#ifdef VBOX_WITH_STATISTICS
     645            vmmR0RecordRC(pVM, rc);
     646#endif
     647            /* No special action required for external interrupts, just return. */
     648            return rc;
     649        }
     650
     651        /*
     652         * For profiling.
     653         */
     654        case VMMR0_DO_NOP:
     655            return VINF_SUCCESS;
     656
     657        /*
     658         * Impossible.
     659         */
     660        default:
     661            AssertMsgFailed(("%#x\n", enmOperation));
     662            return VERR_NOT_SUPPORTED;
     663    }
     664}
     665
     666
     667/**
     668 * The Ring 0 entry point, called by the support library (SUP).
     669 *
     670 * @returns VBox status code.
     671 * @param   pVM             The VM to operate on.
     672 * @param   enmOperation    Which operation to execute.
     673 * @param   pReq            This points to a SUPVMMR0REQHDR packet. Optional.
     674 * @param   u64Arg          Some simple constant argument.
     675 * @remarks Assume called with interrupts _enabled_.
     676 */
     677VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg)
     678{
     679    switch (enmOperation)
     680    {
     681#if 1 /* disable later? */
     682        /*
     683         * Alternative to the fast path, all we need to do is disable interrupts.
     684         */
     685        case VMMR0_DO_RAW_RUN:
     686        case VMMR0_DO_HWACC_RUN:
     687        {
     688            RTCCUINTREG fFlags = ASMIntDisableFlags();
     689            int rc = VMMR0EntryFast(pVM, enmOperation);
     690            ASMSetFlags(fFlags);
     691            return rc;
     692        }
     693#endif
     694
     695        /*
     696         * Initialize the R0 part of a VM instance.
     697         */
     698        case VMMR0_DO_VMMR0_INIT:
     699        {
     700            RTCCUINTREG fFlags = ASMIntDisableFlags();
     701            int rc = VMMR0Init(pVM, (unsigned)u64Arg);
    545702            ASMSetFlags(fFlags);
    546703            return rc;
     
    603760        case VMMR0_DO_GMM_SEED_CHUNK:
    604761            return GMMR0SeedChunk(pVM, (RTR3PTR)pvArg);
    605 #endif 
    606 
    607 
    608 
    609 #ifdef VBOX_WITH_INTERNAL_NETWORKING
     762#endif
     763
     764
     765
     766#if 0//def VBOX_WITH_INTERNAL_NETWORKING - currently busted
    610767        /*
    611768         * Services.
     
    635792             * Unpack the arguments and call the service.
    636793             */
    637             switch (uOperation)
     794            switch (enmOperation)
    638795            {
    639796                case VMMR0_DO_INTNET_OPEN:
     
    698855             * than -1 which the interrupt gate glue code might return.
    699856             */
    700             Log(("operation %#x is not supported\n", uOperation));
     857            Log(("operation %#x is not supported\n", enmOperation));
    701858            return VERR_NOT_SUPPORTED;
    702859    }
    703860}
     861
     862
    704863
    705864
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette