VirtualBox

Changeset 13025 in vbox for trunk


Ignore:
Timestamp:
Oct 7, 2008 7:28:54 AM (16 years ago)
Author:
vboxsync
Message:

Updates for EPT.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCM.cpp

    r12989 r13025  
    521521            if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
    522522                pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
     523
     524            if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
     525                pVM->hwaccm.s.vmx.fVPID = true;
    523526#endif
    524527
     
    609612            LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %VX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
    610613            LogRel(("HWACCM: SVM revision                      = %X\n", pVM->hwaccm.s.svm.u32Rev));
    611             LogRel(("HWACCM: SVM max ASID                      = %d\n", pVM->hwaccm.s.svm.u32MaxASID));
     614            LogRel(("HWACCM: SVM max ASID                      = %d\n", pVM->hwaccm.s.uMaxASID));
    612615            LogRel(("HWACCM: SVM features                      = %X\n", pVM->hwaccm.s.svm.u32Features));
    613616
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r12989 r13025  
    138138
    139139    RTR0MEMOBJ  pMemObj;
    140     /* Current ASID (AMD-V only) */
     140    /* Current ASID (AMD-V)/VPID (Intel) */
    141141    uint32_t    uCurrentASID;
    142142    /* TLB flush count */
     
    185185    bool                        fAllowNestedPaging;
    186186
     187    /** Set if we need to flush the TLB during the world switch. */
     188    bool                        fForceTLBFlush;
     189
     190    /** Old style FPU reporting trap mask override performed (optimization) */
     191    bool                        fFPUOldStyleOverride;
     192
    187193    /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask
    188194     *  naturally. */
    189     bool                        padding[3+4];
     195    bool                        padding[1];
    190196
    191197    /** HWACCM_CHANGED_* flags. */
    192     uint32_t                    fContextUseFlags;
    193 
    194     /** Old style FPU reporting trap mask override performed (optimization) */
    195     uint32_t                    fFPUOldStyleOverride;
     198    RTUINT                      fContextUseFlags;
     199
     200    /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
     201    RTCPUID                     idLastCpu;
     202
     203    /* TLB flush count */
     204    RTUINT                      cTLBFlushes;
     205
     206    /* Current ASID in use by the VM */
     207    RTUINT                      uCurrentASID;
     208
     209    /** Maximum ASID allowed. */
     210    RTUINT                      uMaxASID;
    196211
    197212    /** And mask for copying register contents. */
     
    207222        /** Set if we can use VMXResume to execute guest code. */
    208223        bool                        fResumeVM;
     224
     225        /** Set if VPID is supported. */
     226        bool                        fVPID;
    209227
    210228        /** R0 memory object for the VM control structure (VMCS). */
     
    344362        /** Set if erratum 170 affects the AMD cpu. */
    345363        bool                        fAlwaysFlushTLB;
    346         /** Set if we need to flush the TLB during the world switch. */
    347         bool                        fForceTLBFlush;
    348 
    349         /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */
    350         RTCPUID                     idLastCpu;
    351 
    352         /* TLB flush count */
    353         uint32_t                    cTLBFlushes;
    354 
    355         /* Current ASID in use by the VM */
    356         uint32_t                    uCurrentASID;
    357364
    358365        /** R0 memory object for the VM control block (VMCB). */
     
    389396        /** SVM revision. */
    390397        uint32_t                    u32Rev;
    391 
    392         /** Maximum ASID allowed. */
    393         uint32_t                    u32MaxASID;
    394398
    395399        /** SVM feature bits from cpuid 0x8000000a */
  • trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp

    r12989 r13025  
    5353VMMDECL(int) HWACCMInvalidatePage(PVM pVM, RTGCPTR GCVirt)
    5454{
    55     /** @todo Intel for nested paging */
    5655#ifdef IN_RING0
    57     if (pVM->hwaccm.s.svm.fSupported)
    58         return SVMR0InvalidatePage(pVM, GCVirt);
     56    if (pVM->hwaccm.s.vmx.fSupported)
     57        return VMXR0InvalidatePage(pVM, GCVirt);
     58
     59    Assert(pVM->hwaccm.s.svm.fSupported);
     60    return SVMR0InvalidatePage(pVM, GCVirt);
    5961#endif
    6062
     
    7072VMMDECL(int) HWACCMFlushTLB(PVM pVM)
    7173{
    72     /** @todo Intel for nested paging */
    73     if (pVM->hwaccm.s.svm.fSupported)
    74     {
    75         LogFlow(("HWACCMFlushTLB\n"));
    76         pVM->hwaccm.s.svm.fForceTLBFlush = true;
    77         STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBManual);
    78     }
     74    LogFlow(("HWACCMFlushTLB\n"));
     75
     76    pVM->hwaccm.s.fForceTLBFlush = true;
     77    STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBManual);
    7978    return VINF_SUCCESS;
    8079}
     
    121120
    122121#ifdef IN_RING0
    123     /** @todo Intel for nested paging */
    124     if (pVM->hwaccm.s.svm.fSupported)
    125     {
    126         SVMR0InvalidatePhysPage(pVM, GCPhys);
    127     }
     122    if (pVM->hwaccm.s.vmx.fSupported)
     123        return VMXR0InvalidatePhysPage(pVM, GCPhys);
     124
     125    Assert(pVM->hwaccm.s.svm.fSupported);
     126    SVMR0InvalidatePhysPage(pVM, GCPhys);
    128127#else
    129128    HWACCMFlushTLB(pVM);
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp

    r12989 r13025  
    7474    DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM));
    7575
     76    /** Maximum ASID allowed. */
     77    uint32_t                  uMaxASID;
     78
    7679    struct
    7780    {
     
    111114        uint32_t                    u32Rev;
    112115
    113         /** Maximum ASID allowed. */
    114         uint32_t                    u32MaxASID;
    115 
    116116        /** SVM feature bits from cpuid 0x8000000a */
    117117        uint32_t                    u32Features;
     
    224224                        HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1  = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1);
    225225                        HWACCMR0Globals.vmx.msr.vmx_vmcs_enum   = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM);
     226                        /* VPID 16 bits ASID. */
     227                        HWACCMR0Globals.uMaxASID                = 0x10000; /* exclusive */
    226228
    227229                        if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     
    331333                {
    332334                    /* Query AMD features. */
    333                     ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.svm.u32MaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
     335                    ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);
    334336
    335337                    HWACCMR0Globals.svm.fSupported = true;
     
    591593    pCpu->fFlushTLB = true;
    592594
     595    pCpu->uCurrentASID = 0;   /* we'll aways increment this the first time (host uses ASID 0) */
     596    pCpu->cTLBFlushes  = 0;
     597
    593598    /* Should never happen */
    594     if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
     599    if (!pCpu->pMemObj)
    595600    {
    596601        AssertFailed();
     
    599604    }
    600605
    601     pvPageCpu    = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
    602     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
     606    pvPageCpu    = RTR0MemObjAddress(pCpu->pMemObj);
     607    pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
    603608
    604609    paRc[idCpu]  = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys);
    605610    AssertRC(paRc[idCpu]);
    606611    if (VBOX_SUCCESS(paRc[idCpu]))
    607         HWACCMR0Globals.aCpuInfo[idCpu].fConfigured = true;
     612        pCpu->fConfigured = true;
    608613
    609614    return;
     
    620625static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    621626{
    622     void    *pvPageCpu;
    623     RTHCPHYS pPageCpuPhys;
    624     int     *paRc = (int *)pvUser1;
     627    void           *pvPageCpu;
     628    RTHCPHYS        pPageCpuPhys;
     629    int            *paRc = (int *)pvUser1;
     630    PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu];
    625631
    626632    Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day)
    627633    Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo));
    628634
    629     if (!HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)
     635    if (!pCpu->pMemObj)
    630636        return;
    631637
    632     pvPageCpu    = RTR0MemObjAddress(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);
    633     pPageCpuPhys = RTR0MemObjGetPagePhysAddr(HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);
    634 
    635     paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu(&HWACCMR0Globals.aCpuInfo[idCpu], pvPageCpu, pPageCpuPhys);
     638    pvPageCpu    = RTR0MemObjAddress(pCpu->pMemObj);
     639    pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0);
     640
     641    paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys);
    636642    AssertRC(paRc[idCpu]);
    637643    HWACCMR0Globals.aCpuInfo[idCpu].fConfigured = false;
     644
     645    pCpu->uCurrentASID = 0;
     646
    638647    return;
    639648}
     
    676685    pVM->hwaccm.s.vmx.msr.vmx_eptcaps       = HWACCMR0Globals.vmx.msr.vmx_eptcaps;
    677686    pVM->hwaccm.s.svm.u32Rev                = HWACCMR0Globals.svm.u32Rev;
    678     pVM->hwaccm.s.svm.u32MaxASID            = HWACCMR0Globals.svm.u32MaxASID;
    679687    pVM->hwaccm.s.svm.u32Features           = HWACCMR0Globals.svm.u32Features;
    680688    pVM->hwaccm.s.cpuid.u32AMDFeatureECX    = HWACCMR0Globals.cpuid.u32AMDFeatureECX;
     
    684692    pVM->hwaccm.s.idEnteredCpu              = NIL_RTCPUID;
    685693#endif
     694
     695    pVM->hwaccm.s.uMaxASID                  = HWACCMR0Globals.uMaxASID;
     696
     697    /* Invalidate the last cpu we were running on. */
     698    pVM->hwaccm.s.idLastCpu                 = NIL_RTCPUID;
     699
     700    /* we'll aways increment this the first time (host uses ASID 0) */
     701    pVM->hwaccm.s.uCurrentASID              = 0;
    686702
    687703    /* Init a VT-x or AMD-V VM. */
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r12989 r13025  
    8686    ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys);
    8787
    88     pCpu->uCurrentASID = 0;   /* we'll aways increment this the first time (host uses ASID 0) */
    89     pCpu->cTLBFlushes  = 0;
    9088    return VINF_SUCCESS;
    9189}
     
    114112    /* Invalidate host state physical address. */
    115113    ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0);
    116     pCpu->uCurrentASID = 0;
    117114
    118115    return VINF_SUCCESS;
     
    203200        pVM->hwaccm.s.svm.fAlwaysFlushTLB = true;
    204201    }
    205 
    206     /* Invalidate the last cpu we were running on. */
    207     pVM->hwaccm.s.svm.idLastCpu    = NIL_RTCPUID;
    208 
    209     /* we'll aways increment this the first time (host uses ASID 0) */
    210     pVM->hwaccm.s.svm.uCurrentASID = 0;
    211202    return VINF_SUCCESS;
    212203}
     
    917908#ifdef LOG_ENABLED
    918909    pCpu = HWACCMR0GetCurrentCpu();
    919     if (    pVM->hwaccm.s.svm.idLastCpu != pCpu->idCpu
    920         ||  pVM->hwaccm.s.svm.cTLBFlushes != pCpu->cTLBFlushes)
    921     {
    922         if (pVM->hwaccm.s.svm.idLastCpu != pCpu->idCpu)
    923             Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s.svm.idLastCpu, pCpu->idCpu));
     910    if (    pVM->hwaccm.s.idLastCpu  != pCpu->idCpu
     911        ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     912    {
     913        if (pVM->hwaccm.s.idLastCpu != pCpu->idCpu)
     914            Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s.idLastCpu, pCpu->idCpu));
    924915        else
    925             Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s.svm.cTLBFlushes, pCpu->cTLBFlushes));
     916            Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    926917    }
    927918    if (pCpu->fFlushTLB)
     
    949940    /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
    950941    /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
    951     if (    pVM->hwaccm.s.svm.idLastCpu != pCpu->idCpu
     942    if (    pVM->hwaccm.s.idLastCpu != pCpu->idCpu
    952943            /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
    953         ||  pVM->hwaccm.s.svm.cTLBFlushes != pCpu->cTLBFlushes)
     944        ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    954945    {
    955946        /* Force a TLB flush on VM entry. */
    956         pVM->hwaccm.s.svm.fForceTLBFlush = true;
     947        pVM->hwaccm.s.fForceTLBFlush = true;
    957948    }
    958949    else
    959950        Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB);
    960951
    961     pVM->hwaccm.s.svm.idLastCpu = pCpu->idCpu;
     952    pVM->hwaccm.s.idLastCpu = pCpu->idCpu;
    962953
    963954    /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
    964     if (    pVM->hwaccm.s.svm.fForceTLBFlush
     955    if (    pVM->hwaccm.s.fForceTLBFlush
    965956        && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
    966957    {
    967         if (    ++pCpu->uCurrentASID >= pVM->hwaccm.s.svm.u32MaxASID
     958        if (    ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID
    968959            ||  pCpu->fFlushTLB)
    969960        {
     
    976967            STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID);
    977968
    978         pVM->hwaccm.s.svm.cTLBFlushes  = pCpu->cTLBFlushes;
    979         pVM->hwaccm.s.svm.uCurrentASID = pCpu->uCurrentASID;
     969        pVM->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     970        pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
    980971    }
    981972    else
     
    984975
    985976        /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */
    986         if (!pCpu->uCurrentASID || !pVM->hwaccm.s.svm.uCurrentASID)
    987             pVM->hwaccm.s.svm.uCurrentASID = pCpu->uCurrentASID = 1;
    988 
    989         Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVM->hwaccm.s.svm.fForceTLBFlush);
    990         pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s.svm.fForceTLBFlush;
    991     }
    992     AssertMsg(pVM->hwaccm.s.svm.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.svm.cTLBFlushes, pCpu->cTLBFlushes));
    993     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.svm.u32MaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    994     AssertMsg(pVM->hwaccm.s.svm.uCurrentASID >= 1 && pVM->hwaccm.s.svm.uCurrentASID < pVM->hwaccm.s.svm.u32MaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.svm.uCurrentASID));
    995     pVMCB->ctrl.TLBCtrl.n.u32ASID = pVM->hwaccm.s.svm.uCurrentASID;
     977        if (!pCpu->uCurrentASID || !pVM->hwaccm.s.uCurrentASID)
     978            pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
     979
     980        Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVM->hwaccm.s.fForceTLBFlush);
     981        pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s.fForceTLBFlush;
     982    }
     983    AssertMsg(pVM->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     984    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
     985    AssertMsg(pVM->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID));
     986    pVMCB->ctrl.TLBCtrl.n.u32ASID = pVM->hwaccm.s.uCurrentASID;
    996987
    997988#ifdef VBOX_WITH_STATISTICS
     
    1004995    /* In case we execute a goto ResumeExecution later on. */
    1005996    pVM->hwaccm.s.svm.fResumeVM      = true;
    1006     pVM->hwaccm.s.svm.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
     997    pVM->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
    1007998
    1008999    Assert(sizeof(pVM->hwaccm.s.svm.pVMCBPhys) == 8);
     
    16611652
    16621653            /* Must be set by PGMSyncCR3 */
    1663             Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVM->hwaccm.s.svm.fForceTLBFlush);
     1654            Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVM->hwaccm.s.fForceTLBFlush);
    16641655        }
    16651656        if (rc == VINF_SUCCESS)
     
    20592050    Assert(pVM->hwaccm.s.svm.fSupported);
    20602051
    2061     LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVM->hwaccm.s.svm.idLastCpu, pVM->hwaccm.s.svm.uCurrentASID));
     2052    LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVM->hwaccm.s.idLastCpu, pVM->hwaccm.s.uCurrentASID));
    20622053    pVM->hwaccm.s.svm.fResumeVM = false;
    20632054
     
    21952186VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
    21962187{
    2197     bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.svm.fForceTLBFlush;
     2188    bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.fForceTLBFlush;
    21982189
    21992190    /* Skip it if a TLB flush is already pending. */
     
    22272218VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
    22282219{
    2229     bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.svm.fForceTLBFlush;
     2220    bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.fForceTLBFlush;
    22302221
    22312222    Assert(pVM->hwaccm.s.fNestedPaging);
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r12989 r13025  
    275275    val = val | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
    276276              | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET
    277               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
    278277              | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
    279278              | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
    280279              | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT;    /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
     280
     281    /* Without nested paging we should intercept invlpg and cr3 mov instructions. */
     282    if (!pVM->hwaccm.s.fNestedPaging)
     283        val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
     284               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
     285               | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
    281286
    282287    /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch failure with an invalid control fields error. (combined with some other exit reasons) */
     
    321326        val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
    322327
     328#ifdef HWACCM_VTX_WITH_EPT
     329        if (pVM->hwaccm.s.fNestedPaging)
     330            val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
     331#endif
    323332        /* Mask away the bits that the CPU doesn't support */
    324333        /** @todo make sure they don't conflict with the above requirements. */
     
    362371     */
    363372    pVM->hwaccm.s.vmx.u32TrapMask = HWACCM_VMX_TRAP_MASK;
     373#ifndef DEBUG
     374    if (pVM->hwaccm.s.fNestedPaging)
     375        pVM->hwaccm.s.vmx.u32TrapMask &= ~RT_BIT(X86_XCPT_PF);   /* no longer need to intercept #PF. */
     376#endif
    364377    rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, pVM->hwaccm.s.vmx.u32TrapMask);
    365378    AssertRC(rc);
     
    9941007        /* Note: protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */
    9951008        val |= X86_CR0_PE | X86_CR0_PG;
    996         /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */
    997         val |= X86_CR0_WP;
     1009        if (!pVM->hwaccm.s.fNestedPaging)
     1010        {
     1011            /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */
     1012            val |= X86_CR0_WP;
     1013        }
    9981014
    9991015        /* Always enable caching. */
     
    10271043        /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */
    10281044        val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
    1029         switch(pVM->hwaccm.s.enmShadowMode)
    1030         {
    1031         case PGMMODE_REAL:          /* Real mode                 -> emulated using v86 mode */
    1032         case PGMMODE_PROTECTED:     /* Protected mode, no paging -> emulated using identity mapping. */
    1033         case PGMMODE_32_BIT:        /* 32-bit paging. */
    1034             break;
    1035 
    1036         case PGMMODE_PAE:           /* PAE paging. */
    1037         case PGMMODE_PAE_NX:        /* PAE paging with NX enabled. */
    1038             /** @todo use normal 32 bits paging */
    1039             val |= X86_CR4_PAE;
    1040             break;
    1041 
    1042         case PGMMODE_AMD64:         /* 64-bit AMD paging (long mode). */
    1043         case PGMMODE_AMD64_NX:      /* 64-bit AMD paging (long mode) with NX enabled. */
     1045
     1046        if (!pVM->hwaccm.s.fNestedPaging)
     1047        {
     1048            switch(pVM->hwaccm.s.enmShadowMode)
     1049            {
     1050            case PGMMODE_REAL:          /* Real mode                 -> emulated using v86 mode */
     1051            case PGMMODE_PROTECTED:     /* Protected mode, no paging -> emulated using identity mapping. */
     1052            case PGMMODE_32_BIT:        /* 32-bit paging. */
     1053                break;
     1054
     1055            case PGMMODE_PAE:           /* PAE paging. */
     1056            case PGMMODE_PAE_NX:        /* PAE paging with NX enabled. */
     1057                /** @todo use normal 32 bits paging */
     1058                val |= X86_CR4_PAE;
     1059                break;
     1060
     1061            case PGMMODE_AMD64:         /* 64-bit AMD paging (long mode). */
     1062            case PGMMODE_AMD64_NX:      /* 64-bit AMD paging (long mode) with NX enabled. */
    10441063#ifdef VBOX_ENABLE_64_BITS_GUESTS
    1045             break;
     1064                break;
    10461065#else
    1047             AssertFailed();
    1048             return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    1049 #endif
    1050         default:                   /* shut up gcc */
    1051             AssertFailed();
    1052             return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    1053         }
     1066                AssertFailed();
     1067                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     1068#endif
     1069            default:                   /* shut up gcc */
     1070                AssertFailed();
     1071                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     1072            }
     1073        }
     1074
    10541075#ifdef HWACCM_VMX_EMULATE_REALMODE
    10551076        /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
     
    10801101    if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
    10811102    {
    1082         /* Save our shadow CR3 register. */
    10831103        val = PGMGetHyperCR3(pVM);
    10841104        Assert(val);
     1105        if (pVM->hwaccm.s.fNestedPaging)
     1106        {
     1107#if HC_ARCH_BITS == 64
     1108            rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_FULL, val);
     1109#else
     1110            rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_FULL, val);
     1111            rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_HIGH, val);
     1112#endif
     1113            AssertRC(rc);
     1114
     1115            /* Save the real guest CR3 in VMX_VMCS_GUEST_CR3 */
     1116            val = pCtx->cr3;
     1117        }
     1118        /* Save our shadow CR3 register. */
    10851119        rc = VMXWriteVMCS(VMX_VMCS_GUEST_CR3, val);
    10861120        AssertRC(rc);
     
    12551289    RTGCUINTPTR errCode, instrInfo, uInterruptState;
    12561290    bool        fSyncTPR = false;
     1291    PHWACCM_CPUINFO pCpu = 0;
    12571292    unsigned    cResume = 0;
    12581293#ifdef VBOX_STRICT
     
    14121447    }
    14131448
     1449#ifdef LOG_ENABLED
     1450    pCpu = HWACCMR0GetCurrentCpu();
     1451    if (    pVM->hwaccm.s.idLastCpu   != pCpu->idCpu
     1452        ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1453    {
     1454        if (pVM->hwaccm.s.idLastCpu != pCpu->idCpu)
     1455            Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s.idLastCpu, pCpu->idCpu));
     1456        else
     1457            Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1458    }
     1459    if (pCpu->fFlushTLB)
     1460        Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu));
     1461#endif
     1462
    14141463    /*
    14151464     * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
     
    14321481        STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
    14331482        goto end;
     1483    }
     1484
     1485    /* Deal with tagged TLBs if VPID is supported. */
     1486    if (pVM->hwaccm.s.vmx.fVPID)
     1487    {
     1488        pCpu = HWACCMR0GetCurrentCpu();
     1489        /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
     1490        /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
     1491        if (    pVM->hwaccm.s.idLastCpu != pCpu->idCpu
     1492                /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
     1493            ||  pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1494        {
     1495            /* Force a TLB flush on VM entry. */
     1496            pVM->hwaccm.s.fForceTLBFlush = true;
     1497        }
     1498        else
     1499            Assert(!pCpu->fFlushTLB);
     1500
     1501        pVM->hwaccm.s.idLastCpu = pCpu->idCpu;
     1502
     1503        /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */
     1504        if (pVM->hwaccm.s.fForceTLBFlush)
     1505        {
     1506            if (    ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID
     1507                ||  pCpu->fFlushTLB)
     1508            {
     1509                pCpu->fFlushTLB                  = false;
     1510                pCpu->uCurrentASID               = 1;       /* start at 1; host uses 0 */
     1511                pCpu->cTLBFlushes++;
     1512            }
     1513            else
     1514            {
     1515                STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID);
     1516                pVM->hwaccm.s.fForceTLBFlush     = false;
     1517            }
     1518
     1519            pVM->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     1520            pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
     1521        }
     1522        else
     1523        {
     1524            Assert(!pCpu->fFlushTLB);
     1525
     1526            if (!pCpu->uCurrentASID || !pVM->hwaccm.s.uCurrentASID)
     1527                pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
     1528        }
     1529        AssertMsg(pVM->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1530        AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
     1531        AssertMsg(pVM->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID));
     1532
     1533        rc  = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pVM->hwaccm.s.uCurrentASID);
     1534        AssertRC(rc);
     1535
     1536        if (pVM->hwaccm.s.fForceTLBFlush)
     1537        {
     1538
     1539        }
     1540
     1541#ifdef VBOX_WITH_STATISTICS
     1542        if (pVM->hwaccm.s.fForceTLBFlush)
     1543            STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch);
     1544        else
     1545            STAM_COUNTER_INC(&pVM->hwaccm.s.StatNoFlushTLBWorldSwitch);
     1546#endif
     1547
    14341548    }
    14351549
     
    14631577
    14641578    /* In case we execute a goto ResumeExecution later on. */
    1465     pVM->hwaccm.s.vmx.fResumeVM = true;
     1579    pVM->hwaccm.s.vmx.fResumeVM  = true;
     1580    pVM->hwaccm.s.fForceTLBFlush = false;
    14661581
    14671582    /*
     
    16751790    CPUMSetGuestCR4(pVM, val);
    16761791
     1792    /* Can be updated behind our back in the nested paging case. */
    16771793    CPUMSetGuestCR2(pVM, ASMGetCR2());
     1794
     1795    /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */
     1796    /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */
     1797    if (pVM->hwaccm.s.fNestedPaging)
     1798    {
     1799        VMXReadVMCS(VMX_VMCS_GUEST_CR3, &val);
     1800
     1801        if (val != pCtx->cr3)
     1802        {
     1803            CPUMSetGuestCR3(pVM, val);
     1804            PGMUpdateCR3(pVM, val);
     1805        }
     1806    }
    16781807
    16791808    /* Sync back DR7 here. */
     
    18571986            case X86_XCPT_PF: /* Page fault */
    18581987            {
    1859                 Log2(("Page fault at %VGv error code %x\n", exitQualification ,errCode));
     1988#ifdef DEBUG
     1989                if (pVM->hwaccm.s.fNestedPaging)
     1990                {   /* A genuine pagefault.
     1991                     * Forward the trap to the guest by injecting the exception and resuming execution.
     1992                     */
     1993                    Log(("Guest page fault at %VGv cr2=%VGv error code %x rsp=%VGv\n", (RTGCPTR)pCtx->rip, exitQualification, errCode, (RTGCPTR)pCtx->rsp));
     1994
     1995                    STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
     1996                    /* The error code might have been changed. */
     1997                    errCode = TRPMGetErrorCode(pVM);
     1998
     1999                    TRPMResetTrap(pVM);
     2000
     2001                    /* Now we must update CR2. */
     2002                    pCtx->cr2 = exitQualification;
     2003                    rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
     2004                    AssertRC(rc);
     2005
     2006                    STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
     2007                    goto ResumeExecution;
     2008                }
     2009#endif
     2010                Assert(!pVM->hwaccm.s.fNestedPaging);
     2011
     2012                Log2(("Page fault at %VGv error code %x\n", exitQualification, errCode));
    18602013                /* Exit qualification contains the linear address of the page fault. */
    18612014                TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
     
    20562209    }
    20572210
     2211    case VMX_EXIT_EPT_VIOLATION:        /* 48 EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures. */
     2212    {
     2213        Log2(("EPT Page fault at %VGv error code %x\n", exitQualification ,errCode));
     2214        Assert(pVM->hwaccm.s.fNestedPaging);
     2215
     2216        /* Exit qualification contains the linear address of the page fault. */
     2217        TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
     2218        TRPMSetErrorCode(pVM, errCode);
     2219        TRPMSetFaultAddress(pVM, exitQualification );
     2220
     2221        /* Handle the pagefault trap for the nested shadow table. */
     2222        rc = PGMR0Trap0eHandlerNestedPaging(pVM, PGMMODE_EPT, errCode, CPUMCTX2CORE(pCtx), exitQualification );
     2223        Log2(("PGMR0Trap0eHandlerNestedPaging %VGv returned %Vrc\n", pCtx->rip, rc));
     2224        if (rc == VINF_SUCCESS)
     2225        {   /* We've successfully synced our shadow pages, so let's just continue execution. */
     2226            Log2(("Shadow page fault at %VGv cr2=%VGp error code %x\n", pCtx->rip, exitQualification , errCode));
     2227            STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
     2228
     2229            TRPMResetTrap(pVM);
     2230
     2231            STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
     2232            goto ResumeExecution;
     2233        }
     2234
     2235#ifdef VBOX_STRICT
     2236        if (rc != VINF_EM_RAW_EMULATE_INSTR)
     2237            LogFlow(("PGMTrap0eHandlerNestedPaging failed with %d\n", rc));
     2238#endif
     2239        /* Need to go back to the recompiler to emulate the instruction. */
     2240        TRPMResetTrap(pVM);
     2241        break;
     2242    }
     2243
    20582244    case VMX_EXIT_IRQ_WINDOW:           /* 7 Interrupt window. */
    20592245        /* Clear VM-exit on IF=1 change. */
     
    21132299    {
    21142300        Log2(("VMX: invlpg\n"));
     2301        Assert(!pVM->hwaccm.s.fNestedPaging);
     2302
    21152303        STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
    21162304        rc = EMInterpretInvlpg(pVM, CPUMCTX2CORE(pCtx), exitQualification);
     
    21652353                break;
    21662354            case 3:
     2355                Assert(!pVM->hwaccm.s.fNestedPaging);
    21672356                pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
    21682357                break;
     
    21912380            Log2(("VMX: mov x, crx\n"));
    21922381            STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
     2382
     2383            Assert(!pVM->hwaccm.s.fNestedPaging || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != USE_REG_CR3);
    21932384
    21942385            /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
     
    26922883}
    26932884
     2885/**
     2886 * Invalidates a guest page
     2887 *
     2888 * @returns VBox status code.
     2889 * @param   pVM         The VM to operate on.
     2890 * @param   GCVirt      Page to invalidate
     2891 */
     2892VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)
     2893{
     2894    bool fFlushPending = pVM->hwaccm.s.fForceTLBFlush;
     2895
     2896    /* @todo Only relevant if we want to use VPID. */
     2897
     2898    /* Skip it if a TLB flush is already pending. */
     2899    if (!fFlushPending)
     2900    {
     2901    }
     2902    return VINF_SUCCESS;
     2903}
     2904
     2905/**
     2906 * Invalidates a guest page by physical address
     2907 *
     2908 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
     2909 *
     2910 * @returns VBox status code.
     2911 * @param   pVM         The VM to operate on.
     2912 * @param   GCPhys      Page to invalidate
     2913 */
     2914VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
     2915{
     2916    bool fFlushPending = pVM->hwaccm.s.fForceTLBFlush;
     2917
     2918    Assert(pVM->hwaccm.s.fNestedPaging);
     2919
     2920    /* Skip it if a TLB flush is already pending. */
     2921    if (!fFlushPending)
     2922    {
     2923    }
     2924    return VINF_SUCCESS;
     2925}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette