VirtualBox

Changeset 41277 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
May 14, 2012 10:15:38 AM (13 years ago)
Author:
vboxsync
Message:

VMM/VMMR0/HWSVMR0: AMD FlushByASID feature. Flush before reusing ASIDs when enabling SVM. Cleaned up code a bit.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r41072 r41277  
    9797    ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
    9898
     99    /*
     100     * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs
     101     * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done
     102     * upon VMRUN). Therefore, just set the fFlushASIDBeforeUse flag which instructs hmR0SvmSetupTLB()
     103     * to flush the TLB with before using a new ASID.
     104     */
     105    pCpu->fFlushASIDBeforeUse = true;
    99106    return VINF_SUCCESS;
    100107}
     
    951958}
    952959
     960/**
     961 * Setup TLB for ASID.
     962 *
     963 * @param    pVM        The VM to operate on.
     964 * @param    pVCpu      The VM CPU to operate on.
     965 */
     966static void hmR0SvmSetupTLB(PVM pVM, PVMCPU pVCpu)
     967{
     968    PHMGLOBLCPUINFO pCpu;
     969
     970    AssertPtr(pVM);
     971    AssertPtr(pVCpu);
     972
     973    SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
     974    pCpu = HWACCMR0GetCurrentCpu();
     975
     976    /*
     977     * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
     978     * This can happen both for start & resume due to long jumps back to ring-3.
     979     * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,
     980     * so we cannot reuse the ASIDs without flushing.
     981     */
     982    bool fNewASID = false;
     983    if (    pVCpu->hwaccm.s.idLastCpu   != pCpu->idCpu
     984        ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     985    {
     986        pVCpu->hwaccm.s.fForceTLBFlush = true;
     987        fNewASID = true;
     988    }
     989
     990    /*
     991     * Set TLB flush state as checked until we return from the world switch.
     992     */
     993    ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, true);
     994
     995    /*
     996     * Check for TLB shootdown flushes.
     997     */
     998    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
     999        pVCpu->hwaccm.s.fForceTLBFlush = true;
     1000
     1001    pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
     1002    pCpu->fFlushTLB = false;
     1003    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
     1004
     1005    if (RT_UNLIKELY(pVM->hwaccm.s.svm.fAlwaysFlushTLB))
     1006    {
     1007        /*
     1008         * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad.
     1009         */
     1010        pCpu->uCurrentASID               = 1;
     1011        pVCpu->hwaccm.s.uCurrentASID     = 1;
     1012        pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     1013    }
     1014    else if (pVCpu->hwaccm.s.fForceTLBFlush)
     1015    {
     1016        if (fNewASID)
     1017        {
     1018            ++pCpu->uCurrentASID;
     1019            bool fHitASIDLimit = false;
     1020            if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
     1021            {
     1022                pCpu->uCurrentASID        = 1;  /* start at 1; host uses 0 */
     1023                pCpu->cTLBFlushes++;
     1024                fHitASIDLimit             = true;
     1025
     1026                if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1027                {
     1028                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     1029                    pCpu->fFlushASIDBeforeUse = true;
     1030                }
     1031                else
     1032                {
     1033                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     1034                    pCpu->fFlushASIDBeforeUse = false;
     1035                }
     1036            }
     1037
     1038            if (   !fHitASIDLimit
     1039                && pCpu->fFlushASIDBeforeUse)
     1040            {
     1041                if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1042                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     1043                else
     1044                {
     1045                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     1046                    pCpu->fFlushASIDBeforeUse = false;
     1047                }
     1048            }
     1049
     1050            pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
     1051            pVCpu->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     1052        }
     1053        else
     1054        {
     1055            if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1056                pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     1057            else
     1058                pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
     1059        }
     1060
     1061        pVCpu->hwaccm.s.fForceTLBFlush = false;
     1062    }
     1063    else
     1064    {
     1065        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
     1066         *        not be executed. See hwaccmQueueInvlPage() where it is commented
     1067         *        out. Support individual entry flushing someday. */
     1068        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
     1069        {
     1070            /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
     1071            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
     1072            for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
     1073                SVMR0InvlpgA(pVCpu->hwaccm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);
     1074        }
     1075    }
     1076
     1077    pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     1078    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
     1079
     1080    /* Update VMCB with the ASID. */
     1081    pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID;
     1082
     1083    AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1084    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
     1085    AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     1086
     1087#ifdef VBOX_WITH_STATISTICS
     1088    if (pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
     1089        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
     1090    else
     1091        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
     1092#endif
     1093}
     1094
    9531095
    9541096/**
     
    12061348    STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);
    12071349
    1208     pCpu = HWACCMR0GetCurrentCpu();
    1209     /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */
    1210     /* Note that this can happen both for start and resume due to long jumps back to ring 3. */
    1211     if (    pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    1212             /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */
    1213         ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1214     {
    1215         /* Force a TLB flush on VM entry. */
    1216         pVCpu->hwaccm.s.fForceTLBFlush = true;
    1217     }
    1218     else
    1219         Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB);
    1220 
    1221     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    1222 
    1223     /* Set TLB flush state as checked until we return from the world switch. */
    1224     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, true);
    1225 
    1226     /* Check for tlb shootdown flushes. */
    1227     if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    1228         pVCpu->hwaccm.s.fForceTLBFlush = true;
    1229 
    1230     /* Make sure we flush the TLB when required.  Switch ASID to achieve the
    1231        same thing, but without actually flushing the whole TLB (which is
    1232        expensive). */
    1233     if (    pVCpu->hwaccm.s.fForceTLBFlush
    1234         && !pVM->hwaccm.s.svm.fAlwaysFlushTLB)
    1235     {
    1236         if (    ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID
    1237             ||  pCpu->fFlushTLB)
    1238         {
    1239             pCpu->fFlushTLB                  = false;
    1240             pCpu->uCurrentASID               = 1;       /* start at 1; host uses 0 */
    1241             pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = 1;       /* wrap around; flush TLB */
    1242             pCpu->cTLBFlushes++;
    1243         }
    1244         else
    1245             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
    1246 
    1247         pVCpu->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
    1248         pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
    1249     }
    1250     else
    1251     {
    1252         Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB);
    1253 
    1254         /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */
    1255         if (!pCpu->uCurrentASID || !pVCpu->hwaccm.s.uCurrentASID)
    1256             pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;
    1257 
    1258         Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVCpu->hwaccm.s.fForceTLBFlush);
    1259         pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVCpu->hwaccm.s.fForceTLBFlush;
    1260 
    1261         if (    !pVM->hwaccm.s.svm.fAlwaysFlushTLB
    1262             &&  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    1263         {
    1264             /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
    1265             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
    1266             for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++)
    1267                 SVMR0InvlpgA(pVCpu->hwaccm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);
    1268         }
    1269     }
    1270     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
    1271     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    1272 
    1273     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    1274     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    1275     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
    1276     pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID;
    1277 
    1278 #ifdef VBOX_WITH_STATISTICS
    1279     if (pVMCB->ctrl.TLBCtrl.n.u1TLBFlush)
    1280         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
    1281     else
    1282         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
    1283 #endif
     1350    /*
     1351     * Setup TLB control and ASID in the VMCB.
     1352     */
     1353    hmR0SvmSetupTLB(pVM, pVCpu);
    12841354
    12851355    /* In case we execute a goto ResumeExecution later on. */
     
    13401410
    13411411        Log(("ctrl.TLBCtrl.u32ASID              %x\n",      pVMCB->ctrl.TLBCtrl.n.u32ASID));
    1342         Log(("ctrl.TLBCtrl.u1TLBFlush           %x\n",      pVMCB->ctrl.TLBCtrl.n.u1TLBFlush));
     1412        Log(("ctrl.TLBCtrl.u8TLBFlush           %x\n",      pVMCB->ctrl.TLBCtrl.n.u8TLBFlush));
    13431413        Log(("ctrl.TLBCtrl.u7Reserved           %x\n",      pVMCB->ctrl.TLBCtrl.n.u7Reserved));
    13441414        Log(("ctrl.TLBCtrl.u24Reserved          %x\n",      pVMCB->ctrl.TLBCtrl.n.u24Reserved));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette