VirtualBox

Changeset 57470 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Aug 20, 2015 9:44:08 AM (9 years ago)
Author:
vboxsync
Message:

VMM/HM: Purge the unused TLB shootdown code path.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r57445 r57470  
    884884    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
    885885
    886     /* Check for explicit TLB shootdowns. */
     886    /* Check for explicit TLB flushes. */
    887887    if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    888888    {
     
    959959        pVCpu->hm.s.fForceTLBFlush = false;
    960960    }
    961     /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    962      *        not be executed. See hmQueueInvlPage() where it is commented
    963      *        out. Support individual entry flushing someday. */
    964 #if 0
    965     else
    966     {
    967         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    968         {
    969             /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
    970             STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    971             for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
    972                 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVmcb->ctrl.TLBCtrl.n.u32ASID);
    973 
    974             pVCpu->hm.s.TlbShootdown.cPages = 0;
    975             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    976         }
    977     }
    978 #endif
    979 
    980961
    981962    /* Update VMCB with the ASID. */
     
    31503131
    31513132    /* Flush the appropriate tagged-TLB entries. */
    3152     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB-shootdowns, set this across the world switch. */
     3133    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    31533134    hmR0SvmFlushTaggedTlb(pVCpu);
    31543135    Assert(HMR0GetCurrentCpu()->idCpu == pVCpu->hm.s.idLastCpu);
     
    32333214    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    32343215
    3235     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
    3236     ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
     3216    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
     3217    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    32373218
    32383219    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r57445 r57470  
    18741874    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
    18751875
    1876     /** @todo TLB shootdown is currently not used. See hmQueueInvlPage(). */
    1877 #if 0
    1878     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    1879     pVCpu->hm.s.TlbShootdown.cPages = 0;
    1880 #endif
    1881 
    18821876    Assert(pCpu->idCpu != NIL_RTCPUID);
    18831877    pVCpu->hm.s.idLastCpu           = pCpu->idCpu;
     
    19531947    }
    19541948
    1955     /* Check for explicit TLB shootdowns. */
     1949    /* Check for explicit TLB flushes. */
    19561950    if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    19571951    {
     
    19681962    }
    19691963
    1970     /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
    1971      *        where it is commented out. Support individual entry flushing
    1972      *        someday. */
    1973 #if 0
    1974     if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    1975     {
    1976         STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    1977 
    1978         /*
    1979          * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
    1980          * as supported by the CPU.
    1981          */
    1982         if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    1983         {
    1984             for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
    1985                 hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
    1986         }
    1987         else
    1988             hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    1989 
    1990         HMVMX_SET_TAGGED_TLB_FLUSHED();
    1991         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    1992         pVCpu->hm.s.TlbShootdown.cPages = 0;
    1993     }
    1994 #endif
    1995 
    19961964    pVCpu->hm.s.fForceTLBFlush = false;
    1997 
    19981965    HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
    19991966
     
    20462013    }
    20472014
    2048     /* Check for explicit TLB shootdown flushes. */
     2015    /* Check for explicit TLB flushes. */
    20492016    if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    20502017    {
     
    20612028        pVCpu->hm.s.fForceTLBFlush = false;
    20622029    }
    2063     /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
    2064      *        where it is commented out. Support individual entry flushing
    2065      *        someday. */
    2066 #if 0
    2067     else
    2068     {
    2069         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    2070         {
    2071             /* We cannot flush individual entries without VPID support. Flush using EPT. */
    2072             STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    2073             hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    2074         }
    2075         else
    2076             STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
    2077 
    2078         pVCpu->hm.s.TlbShootdown.cPages = 0;
    2079         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    2080     }
    2081 #endif
    20822030}
    20832031
     
    21142062    }
    21152063
    2116     /* Check for explicit TLB shootdown flushes. */
     2064    /* Check for explicit TLB flushes. */
    21172065    if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    21182066    {
     
    21562104        }
    21572105    }
    2158     /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere. See hmQueueInvlPage()
    2159      *        where it is commented out. Support individual entry flushing
    2160      *        someday. */
    2161 #if 0
    2162     else
    2163     {
    2164         AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
    2165                   ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
    2166                    pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
    2167                    pCpu->uCurrentAsid, pCpu->cTlbFlushes));
    2168 
    2169         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    2170         {
    2171             /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
    2172             if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    2173             {
    2174                 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
    2175                     hmR0VmxFlushVpid(pVM, pVCpu, VMXFLUSHVPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
    2176             }
    2177             else
    2178                 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
    2179 
    2180             pVCpu->hm.s.TlbShootdown.cPages = 0;
    2181             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    2182         }
    2183         else
    2184             STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
    2185     }
    2186 #endif
    21872106
    21882107    AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
     
    22212140            break;
    22222141    }
    2223 
    2224     /* VMCPU_FF_TLB_SHOOTDOWN is unused. */
    2225     Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN));
    22262142
    22272143    /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */
     
    86258541    }
    86268542
    8627     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB-shootdowns, set this across the world switch. */
     8543    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);    /* Used for TLB flushing, set this across the world switch. */
    86288544    hmR0VmxFlushTaggedTlb(pVCpu, pCpu);                         /* Invalidate the appropriate guest entries from the TLB. */
    86298545    Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
     
    86988614    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    86998615
    8700     ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
    8701     ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
     8616    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);   /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */
     8617    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);            /* Initialized in vmR3CreateUVM(): used for EMT poking. */
    87028618    HMVMXCPU_GST_RESET_TO(pVCpu, 0);                            /* Exits/longjmps to ring-3 requires saving the guest state. */
    87038619    pVmxTransient->fVmcsFieldsRead     = 0;                     /* Transient fields need to be read from the VMCS. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette