VirtualBox

Changeset 43494 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Oct 1, 2012 2:29:11 PM (12 years ago)
Author:
vboxsync
Message:

VMM/VMMR0,R3: HM cleanup.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r43430 r43494  
    100100
    101101    /** Maximum ASID allowed. */
    102     uint32_t                        uMaxASID;
     102    uint32_t                        uMaxAsid;
    103103
    104104    /** VT-x data. */
     
    145145    struct
    146146    {
    147         /* HWCR msr (for diagnostics) */
    148         uint64_t                    msrHWCR;
     147        /* HWCR MSR (for diagnostics) */
     148        uint64_t                    msrHwcr;
    149149
    150150        /** SVM revision. */
     
    419419                g_HvmR0.vmx.hostEFER            = ASMRdMsr(MSR_K6_EFER);
    420420                /* VPID 16 bits ASID. */
    421                 g_HvmR0.uMaxASID                = 0x10000; /* exclusive */
     421                g_HvmR0.uMaxAsid                = 0x10000; /* exclusive */
    422422
    423423                if (g_HvmR0.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     
    569569        /* Query AMD features. */
    570570        uint32_t u32Dummy;
    571         ASMCpuId(0x8000000A, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxASID,
    572                  &u32Dummy, &g_HvmR0.svm.u32Features);
     571        ASMCpuId(0x8000000A, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxAsid, &u32Dummy, &g_HvmR0.svm.u32Features);
    573572
    574573        /*
     
    588587        if (RT_SUCCESS(rc))
    589588        {
    590             /* Read the HWCR msr for diagnostics. */
    591             g_HvmR0.svm.msrHWCR    = ASMRdMsr(MSR_K8_HWCR);
     589            /* Read the HWCR MSR for diagnostics. */
     590            g_HvmR0.svm.msrHwcr    = ASMRdMsr(MSR_K8_HWCR);
    592591            g_HvmR0.svm.fSupported = true;
    593592        }
     
    871870
    872871    pCpu->idCpu         = idCpu;
    873     pCpu->uCurrentASID  = 0;    /* we'll aways increment this the first time (host uses ASID 0) */
    874     /* Do NOT reset cTLBFlushes here, see @bugref{6255}. */
     872    pCpu->uCurrentAsid  = 0;    /* we'll aways increment this the first time (host uses ASID 0) */
     873    /* Do NOT reset cTlbFlushes here, see @bugref{6255}. */
    875874
    876875    int rc;
     
    939938        Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ);
    940939        g_HvmR0.aCpuInfo[i].fConfigured = false;
    941         g_HvmR0.aCpuInfo[i].cTLBFlushes = 0;
     940        g_HvmR0.aCpuInfo[i].cTlbFlushes = 0;
    942941    }
    943942
     
    10511050        rc = VINF_SUCCESS; /* nothing to do */
    10521051
    1053     pCpu->uCurrentASID = 0;
     1052    pCpu->uCurrentAsid = 0;
    10541053    return rc;
    10551054}
     
    12151214    pVM->hm.s.vmx.msr.vmx_vmcs_enum     = g_HvmR0.vmx.msr.vmx_vmcs_enum;
    12161215    pVM->hm.s.vmx.msr.vmx_eptcaps       = g_HvmR0.vmx.msr.vmx_eptcaps;
    1217     pVM->hm.s.svm.msrHWCR               = g_HvmR0.svm.msrHWCR;
     1216    pVM->hm.s.svm.msrHwcr               = g_HvmR0.svm.msrHwcr;
    12181217    pVM->hm.s.svm.u32Rev                = g_HvmR0.svm.u32Rev;
    12191218    pVM->hm.s.svm.u32Features           = g_HvmR0.svm.u32Features;
     
    12221221    pVM->hm.s.lLastError                = g_HvmR0.lLastError;
    12231222
    1224     pVM->hm.s.uMaxASID                  = g_HvmR0.uMaxASID;
     1223    pVM->hm.s.uMaxAsid                  = g_HvmR0.uMaxAsid;
    12251224
    12261225
     
    12471246
    12481247        /* We'll aways increment this the first time (host uses ASID 0) */
    1249         pVCpu->hm.s.uCurrentASID        = 0;
     1248        pVCpu->hm.s.uCurrentAsid        = 0;
    12501249    }
    12511250
     
    15001499        /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */
    15011500        pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
    1502         pVCpu->hm.s.uCurrentASID = 0;
     1501        pVCpu->hm.s.uCurrentAsid = 0;
    15031502        VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    15041503    }
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r43455 r43494  
    138138     * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
    139139     * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
    140      * upon VMRUN). Therefore, just set the fFlushASIDBeforeUse flag which instructs hmR0SvmSetupTLB()
     140     * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB()
    141141     * to flush the TLB with before using a new ASID.
    142142     */
    143     pCpu->fFlushASIDBeforeUse = true;
     143    pCpu->fFlushAsidBeforeUse = true;
    144144
    145145    /*
    146146     * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
    147147     */
    148     ++pCpu->cTLBFlushes;
     148    ++pCpu->cTlbFlushes;
    149149
    150150    return VINF_SUCCESS;
     
    237237        pVCpu->hm.s.svm.hMemObjVMCBHost  = NIL_RTR0MEMOBJ;
    238238        pVCpu->hm.s.svm.hMemObjVMCB      = NIL_RTR0MEMOBJ;
    239         pVCpu->hm.s.svm.hMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     239        pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
    240240
    241241        /* Allocate one page for the host context */
     
    260260
    261261        /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
    262         rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
     262        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
    263263        if (RT_FAILURE(rc))
    264264            return rc;
    265265
    266         pVCpu->hm.s.svm.pvMSRBitmap     = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMSRBitmap);
    267         pVCpu->hm.s.svm.HCPhysMSRBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMSRBitmap, 0);
     266        pVCpu->hm.s.svm.pvMsrBitmap     = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
     267        pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0);
    268268        /* Set all bits to intercept all MSR accesses. */
    269         ASMMemFill32(pVCpu->hm.s.svm.pvMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);
     269        ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff);
    270270    }
    271271
     
    301301            pVCpu->hm.s.svm.hMemObjVMCB = NIL_RTR0MEMOBJ;
    302302        }
    303         if (pVCpu->hm.s.svm.hMemObjMSRBitmap != NIL_RTR0MEMOBJ)
    304         {
    305             RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMSRBitmap, false);
    306             pVCpu->hm.s.svm.pvMSRBitmap      = 0;
    307             pVCpu->hm.s.svm.HCPhysMSRBitmap  = 0;
    308             pVCpu->hm.s.svm.hMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     303        if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
     304        {
     305            RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false);
     306            pVCpu->hm.s.svm.pvMsrBitmap      = 0;
     307            pVCpu->hm.s.svm.HCPhysMsrBitmap  = 0;
     308            pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
    309309        }
    310310    }
     
    411411        /* Set IO and MSR bitmap addresses. */
    412412        pvVMCB->ctrl.u64IOPMPhysAddr  = pVM->hm.s.svm.HCPhysIOBitmap;
    413         pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMSRBitmap;
     413        pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;
    414414
    415415        /* No LBR virtualization. */
     
    476476{
    477477    unsigned ulBit;
    478     uint8_t *pvMSRBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMSRBitmap;
     478    uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    479479
    480480    if (ulMSR <= 0x00001FFF)
     
    488488        /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
    489489        ulBit = (ulMSR - 0xC0000000) * 2;
    490         pvMSRBitmap += 0x800;
     490        pvMsrBitmap += 0x800;
    491491    }
    492492    else if (   ulMSR >= 0xC0010000
     
    495495        /* AMD Seventh and Eighth Generation Processor MSRs */
    496496        ulBit = (ulMSR - 0xC0001000) * 2;
    497         pvMSRBitmap += 0x1000;
     497        pvMsrBitmap += 0x1000;
    498498    }
    499499    else
     
    504504    Assert(ulBit < 16 * 1024 - 1);
    505505    if (fRead)
    506         ASMBitClear(pvMSRBitmap, ulBit);
     506        ASMBitClear(pvMsrBitmap, ulBit);
    507507    else
    508         ASMBitSet(pvMSRBitmap, ulBit);
     508        ASMBitSet(pvMsrBitmap, ulBit);
    509509
    510510    if (fWrite)
    511         ASMBitClear(pvMSRBitmap, ulBit + 1);
     511        ASMBitClear(pvMsrBitmap, ulBit + 1);
    512512    else
    513         ASMBitSet(pvMSRBitmap, ulBit + 1);
     513        ASMBitSet(pvMsrBitmap, ulBit + 1);
    514514}
    515515
     
    10691069     * so we cannot reuse the ASIDs without flushing.
    10701070     */
    1071     bool fNewASID = false;
     1071    bool fNewAsid = false;
    10721072    if (    pVCpu->hm.s.idLastCpu   != pCpu->idCpu
    1073         ||  pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1073        ||  pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
    10741074    {
    10751075        pVCpu->hm.s.fForceTLBFlush = true;
    1076         fNewASID = true;
     1076        fNewAsid = true;
    10771077    }
    10781078
     
    10961096         * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
    10971097         */
    1098         pCpu->uCurrentASID               = 1;
    1099         pVCpu->hm.s.uCurrentASID     = 1;
    1100         pVCpu->hm.s.cTLBFlushes      = pCpu->cTLBFlushes;
     1098        pCpu->uCurrentAsid               = 1;
     1099        pVCpu->hm.s.uCurrentAsid     = 1;
     1100        pVCpu->hm.s.cTlbFlushes      = pCpu->cTlbFlushes;
    11011101        pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    11021102    }
    11031103    else if (pVCpu->hm.s.fForceTLBFlush)
    11041104    {
    1105         if (fNewASID)
    1106         {
    1107             ++pCpu->uCurrentASID;
     1105        if (fNewAsid)
     1106        {
     1107            ++pCpu->uCurrentAsid;
    11081108            bool fHitASIDLimit = false;
    1109             if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
     1109            if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
    11101110            {
    1111                 pCpu->uCurrentASID        = 1;  /* start at 1; host uses 0 */
    1112                 pCpu->cTLBFlushes++;
     1111                pCpu->uCurrentAsid        = 1;  /* start at 1; host uses 0 */
     1112                pCpu->cTlbFlushes++;
    11131113                fHitASIDLimit             = true;
    11141114
     
    11161116                {
    11171117                    pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    1118                     pCpu->fFlushASIDBeforeUse = true;
     1118                    pCpu->fFlushAsidBeforeUse = true;
    11191119                }
    11201120                else
    11211121                {
    11221122                    pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    1123                     pCpu->fFlushASIDBeforeUse = false;
     1123                    pCpu->fFlushAsidBeforeUse = false;
    11241124                }
    11251125            }
    11261126
    11271127            if (   !fHitASIDLimit
    1128                 && pCpu->fFlushASIDBeforeUse)
     1128                && pCpu->fFlushAsidBeforeUse)
    11291129            {
    11301130                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     
    11331133                {
    11341134                    pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    1135                     pCpu->fFlushASIDBeforeUse = false;
     1135                    pCpu->fFlushAsidBeforeUse = false;
    11361136                }
    11371137            }
    11381138
    1139             pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID;
    1140             pVCpu->hm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     1139            pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
     1140            pVCpu->hm.s.cTlbFlushes  = pCpu->cTlbFlushes;
    11411141        }
    11421142        else
     
    11681168
    11691169    /* Update VMCB with the ASID. */
    1170     pvVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentASID;
    1171 
    1172     AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
    1173               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
    1174     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    1175               ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    1176     AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
    1177               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
     1170    pvVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;
     1171
     1172    AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
     1173              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
     1174    AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
     1175              ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
     1176    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
     1177              ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
    11781178
    11791179#ifdef VBOX_WITH_STATISTICS
     
    14331433    if (pVCpu->hm.s.idLastCpu != pCpu->idCpu)
    14341434        LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
    1435     else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1436         LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1435    else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
     1436        LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
    14371437    else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
    14381438        LogFlow(("Manual TLB flush\n"));
     
    14781478    Assert(pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
    14791479    Assert(pvVMCB->ctrl.u64IOPMPhysAddr  == pVM->hm.s.svm.HCPhysIOBitmap);
    1480     Assert(pvVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMSRBitmap);
     1480    Assert(pvVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMsrBitmap);
    14811481    Assert(pvVMCB->ctrl.u64LBRVirt == 0);
    14821482
     
    14941494        && !(pvVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
    14951495    {
    1496         pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
    1497         uint64_t u64GuestTSCAux = 0;
    1498         rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux);
     1496        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
     1497        uint64_t u64GuestTscAux = 0;
     1498        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);
    14991499        AssertRC(rc2);
    1500         ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTSCAux);
     1500        ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux);
    15011501    }
    15021502
     
    15071507    pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu);
    15081508#endif
     1509
    15091510    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
    15101511    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);
     
    15141515        /* Restore host's TSC_AUX. */
    15151516        if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    1516             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux);
     1517            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
    15171518
    15181519        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() +
     
    28952896    Assert(pVM->hm.s.svm.fSupported);
    28962897
    2897     LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentASID));
     2898    LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentAsid));
    28982899    pVCpu->hm.s.fResumeVM = false;
    28992900
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r43469 r43494  
    165165     */
    166166    if (   pVM
    167         && pVM->hm.s.vmx.fVPID
     167        && pVM->hm.s.vmx.fVpid
    168168        && (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))
    169169    {
    170170        hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
    171         pCpu->fFlushASIDBeforeUse = false;
     171        pCpu->fFlushAsidBeforeUse = false;
    172172    }
    173173    else
    174         pCpu->fFlushASIDBeforeUse = true;
     174        pCpu->fFlushAsidBeforeUse = true;
    175175
    176176    /*
    177177     * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
    178178     */
    179     ++pCpu->cTLBFlushes;
     179    ++pCpu->cTlbFlushes;
    180180
    181181    return VINF_SUCCESS;
     
    425425
    426426    /* Initialize these always, see hmR3InitFinalizeR0().*/
    427     pVM->hm.s.vmx.enmFlushEPT  = VMX_FLUSH_EPT_NONE;
    428     pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE;
     427    pVM->hm.s.vmx.enmFlushEpt  = VMX_FLUSH_EPT_NONE;
     428    pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
    429429
    430430    /* Determine optimal flush type for EPT. */
     
    434434        {
    435435            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
    436                 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT;
     436                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
    437437            else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
    438                 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS;
     438                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
    439439            else
    440440            {
     
    443443                 * We cannot ignore EPT at this point as we've already setup Unrestricted Guest execution.
    444444                 */
    445                 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
     445                pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
    446446                return VERR_VMX_GENERIC;
    447447            }
     
    452452             * Should never really happen. EPT is supported but INVEPT instruction is not supported.
    453453             */
    454             pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
     454            pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
    455455            return VERR_VMX_GENERIC;
    456456        }
     
    458458
    459459    /* Determine optimal flush type for VPID. */
    460     if (pVM->hm.s.vmx.fVPID)
     460    if (pVM->hm.s.vmx.fVpid)
    461461    {
    462462        if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
    463463        {
    464464            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
    465                 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT;
     465                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
    466466            else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
    467                 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS;
     467                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
    468468            else
    469469            {
     
    476476                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
    477477                    Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
    478                 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
    479                 pVM->hm.s.vmx.fVPID = false;
     478                pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
     479                pVM->hm.s.vmx.fVpid = false;
    480480            }
    481481        }
     
    487487             */
    488488            Log(("VMXR0SetupVM: VPID supported without INVEPT support. Ignoring VPID.\n"));
    489             pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
    490             pVM->hm.s.vmx.fVPID = false;
     489            pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
     490            pVM->hm.s.vmx.fVpid = false;
    491491        }
    492492    }
     
    597597                val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
    598598
    599             if (pVM->hm.s.vmx.fVPID)
     599            if (pVM->hm.s.vmx.fVpid)
    600600                val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;
    601601
     
    782782     * Setup the right TLB function based on CPU capabilities.
    783783     */
    784     if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID)
    785         pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;
     784    if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
     785        pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBBoth;
    786786    else if (pVM->hm.s.fNestedPaging)
    787         pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;
    788     else if (pVM->hm.s.vmx.fVPID)
    789         pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;
     787        pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBEPT;
     788    else if (pVM->hm.s.vmx.fVpid)
     789        pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBVPID;
    790790    else
    791         pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;
     791        pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBDummy;
    792792
    793793vmx_end:
     
    24732473    PHMGLOBLCPUINFO pCpu;
    24742474
    2475     Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID);
     2475    Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid);
    24762476
    24772477    pCpu = HMR0GetCurrentCpu();
     
    24832483     * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
    24842484     */
    2485     bool fNewASID = false;
     2485    bool fNewAsid = false;
    24862486    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
    2487         || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2487        || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
    24882488    {
    24892489        pVCpu->hm.s.fForceTLBFlush = true;
    2490         fNewASID = true;
     2490        fNewAsid = true;
    24912491    }
    24922492
     
    25012501    if (pVCpu->hm.s.fForceTLBFlush)
    25022502    {
    2503         if (fNewASID)
    2504         {
    2505             ++pCpu->uCurrentASID;
    2506             if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
     2503        if (fNewAsid)
     2504        {
     2505            ++pCpu->uCurrentAsid;
     2506            if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
    25072507            {
    2508                 pCpu->uCurrentASID = 1;       /* start at 1; host uses 0 */
    2509                 pCpu->cTLBFlushes++;
    2510                 pCpu->fFlushASIDBeforeUse = true;
     2508                pCpu->uCurrentAsid = 1;       /* start at 1; host uses 0 */
     2509                pCpu->cTlbFlushes++;
     2510                pCpu->fFlushAsidBeforeUse = true;
    25112511            }
    25122512
    2513             pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID;
    2514             if (pCpu->fFlushASIDBeforeUse)
    2515                 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2513            pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
     2514            if (pCpu->fFlushAsidBeforeUse)
     2515                hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
    25162516        }
    25172517        else
     
    25202520                hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
    25212521            else
    2522                 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
    2523         }
    2524 
    2525         pVCpu->hm.s.cTLBFlushes    = pCpu->cTLBFlushes;
     2522                hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     2523        }
     2524
     2525        pVCpu->hm.s.cTlbFlushes    = pCpu->cTlbFlushes;
    25262526        pVCpu->hm.s.fForceTLBFlush = false;
    25272527    }
    25282528    else
    25292529    {
    2530         AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID,
    2531                   ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
    2532                    pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes,
    2533                    pCpu->uCurrentASID, pCpu->cTLBFlushes));
     2530        AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
     2531                  ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
     2532                   pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
     2533                   pCpu->uCurrentAsid, pCpu->cTlbFlushes));
    25342534
    25352535        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
     
    25502550            }
    25512551            else
    2552                 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
     2552                hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    25532553        }
    25542554        else
     
    25622562    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    25632563
    2564     AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
    2565               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
    2566     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    2567               ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    2568     AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
    2569               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
     2564    AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
     2565              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
     2566    AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
     2567              ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
     2568    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
     2569              ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
    25702570
    25712571    /* Update VMCS with the VPID. */
    2572     int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID);
     2572    int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
    25732573    AssertRC(rc);
    25742574}
     
    25872587
    25882588    Assert(pVM->hm.s.fNestedPaging);
    2589     Assert(!pVM->hm.s.vmx.fVPID);
     2589    Assert(!pVM->hm.s.vmx.fVpid);
    25902590
    25912591    pCpu = HMR0GetCurrentCpu();
     
    25972597     */
    25982598    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
    2599         || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2599        || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
    26002600    {
    26012601        pVCpu->hm.s.fForceTLBFlush = true;
     
    26092609
    26102610    pVCpu->hm.s.idLastCpu   = pCpu->idCpu;
    2611     pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes;
     2611    pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
    26122612
    26132613    if (pVCpu->hm.s.fForceTLBFlush)
    2614         hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
     2614        hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    26152615    else
    26162616    {
     
    26242624             */
    26252625            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    2626             hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
     2626            hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    26272627        }
    26282628    }
     
    26502650    PHMGLOBLCPUINFO pCpu;
    26512651
    2652     Assert(pVM->hm.s.vmx.fVPID);
     2652    Assert(pVM->hm.s.vmx.fVpid);
    26532653    Assert(!pVM->hm.s.fNestedPaging);
    26542654
     
    26622662     */
    26632663    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
    2664         || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2664        || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
    26652665    {
    26662666        /* Force a TLB flush on VM entry. */
     
    26782678    if (pVCpu->hm.s.fForceTLBFlush)
    26792679    {
    2680         ++pCpu->uCurrentASID;
    2681         if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
    2682         {
    2683             pCpu->uCurrentASID               = 1;       /* start at 1; host uses 0 */
    2684             pCpu->cTLBFlushes++;
    2685             pCpu->fFlushASIDBeforeUse        = true;
     2680        ++pCpu->uCurrentAsid;
     2681        if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
     2682        {
     2683            pCpu->uCurrentAsid               = 1;       /* start at 1; host uses 0 */
     2684            pCpu->cTlbFlushes++;
     2685            pCpu->fFlushAsidBeforeUse        = true;
    26862686        }
    26872687
    26882688        pVCpu->hm.s.fForceTLBFlush = false;
    2689         pVCpu->hm.s.cTLBFlushes    = pCpu->cTLBFlushes;
    2690         pVCpu->hm.s.uCurrentASID   = pCpu->uCurrentASID;
    2691         if (pCpu->fFlushASIDBeforeUse)
    2692             hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2689        pVCpu->hm.s.cTlbFlushes    = pCpu->cTlbFlushes;
     2690        pVCpu->hm.s.uCurrentAsid   = pCpu->uCurrentAsid;
     2691        if (pCpu->fFlushAsidBeforeUse)
     2692            hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
    26932693    }
    26942694    else
    26952695    {
    2696         AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID,
    2697                   ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
    2698                    pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes,
    2699                    pCpu->uCurrentASID, pCpu->cTLBFlushes));
     2696        AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
     2697                  ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
     2698                   pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
     2699                   pCpu->uCurrentAsid, pCpu->cTlbFlushes));
    27002700
    27012701        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
     
    27142714            }
    27152715            else
    2716                 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2716                hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
    27172717        }
    27182718    }
     
    27202720    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    27212721
    2722     AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
    2723               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
    2724     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    2725               ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    2726     AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
    2727               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
    2728 
    2729     int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID);
     2722    AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
     2723              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
     2724    AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxASID,
     2725              ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
     2726    AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxASID,
     2727              ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
     2728
     2729    int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
    27302730    AssertRC(rc);
    27312731
     
    30693069#ifdef LOG_ENABLED
    30703070    if (    pVM->hm.s.fNestedPaging
    3071         ||  pVM->hm.s.vmx.fVPID)
     3071        ||  pVM->hm.s.vmx.fVpid)
    30723072    {
    30733073        PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
     
    30773077                     pCpu->idCpu));
    30783078        }
    3079         else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
    3080         {
    3081             LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes,
    3082                      pCpu->cTLBFlushes));
     3079        else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
     3080        {
     3081            LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes,
     3082                     pCpu->cTlbFlushes));
    30833083        }
    30843084        else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
     
    31503150    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
    31513151    /* Deal with tagged TLB setup and invalidation. */
    3152     pVM->hm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);
     3152    pVM->hm.s.vmx.pfnSetupTaggedTlb(pVM, pVCpu);
    31533153
    31543154    /*
     
    50175017    uint64_t descriptor[2];
    50185018
    5019     Assert(pVM->hm.s.vmx.fVPID);
     5019    Assert(pVM->hm.s.vmx.fVpid);
    50205020    if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
    50215021    {
     
    50265026    {
    50275027        AssertPtr(pVCpu);
    5028         AssertMsg(pVCpu->hm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));
    5029         AssertMsg(pVCpu->hm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));
    5030         descriptor[0] = pVCpu->hm.s.uCurrentASID;
     5028        AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
     5029        AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
     5030        descriptor[0] = pVCpu->hm.s.uCurrentAsid;
    50315031        descriptor[1] = GCPtr;
    50325032    }
    50335033    int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
    50345034    AssertMsg(rc == VINF_SUCCESS,
    5035               ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentASID : 0, GCPtr, rc));
     5035              ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
    50365036#ifdef VBOX_WITH_STATISTICS
    50375037    if (pVCpu)
     
    50655065         * function maybe called in a loop with individual addresses.
    50665066         */
    5067         if (pVM->hm.s.vmx.fVPID)
     5067        if (pVM->hm.s.vmx.fVpid)
    50685068        {
    50695069            /* If we can flush just this page do it, otherwise flush as little as possible. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette