VirtualBox

Changeset 48218 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Sep 1, 2013 4:31:26 PM (11 years ago)
Author:
vboxsync
Message:

VMM: Addressed a rare corner case stale TLB entry issue.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r48217 r48218  
    9191    DECLR0CALLBACKMEMBER(int,  pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx));
    9292    DECLR0CALLBACKMEMBER(int,  pfnEnableCpu,(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    93                                              bool fEnabledByHost));
     93                                             bool fEnabledByHost, void *pvArg));
    9494    DECLR0CALLBACKMEMBER(int,  pfnDisableCpu,(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage));
    9595    DECLR0CALLBACKMEMBER(int,  pfnInitVM,(PVM pVM));
     
    243243
    244244static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,
    245                                             bool fEnabledBySystem)
    246 {
    247     NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem);
     245                                            bool fEnabledBySystem, void *pvArg)
     246{
     247    NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem); NOREF(pvArg);
    248248    return VINF_SUCCESS;
    249249}
     
    908908    int rc;
    909909    if (g_HvmR0.vmx.fSupported && g_HvmR0.vmx.fUsingSUPR0EnableVTx)
    910         rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true);
     910        rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HvmR0.vmx.Msrs);
    911911    else
    912912    {
     
    914914        void    *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
    915915        RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    916         rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
     916
     917        if (g_HvmR0.vmx.fSupported)
     918            rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs);
     919        else
     920            rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, NULL /* pvArg */);
    917921    }
    918922    AssertRC(rc);
     
    17671771        void           *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
    17681772        RTHCPHYS        HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    1769         VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
     1773        VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs);
    17701774    }
    17711775}
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48206 r48218  
    301301 * @param   pvCpuPage       Pointer to the global CPU page.
    302302 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    303  */
    304 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
     303 * @param   pvArg           Unused on AMD-V.
     304 */
     305VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     306                              void *pvArg)
    305307{
    306308    AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER);
     
    308310                 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
    309311    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
     312    NOREF(pvArg);
    310313
    311314    /*
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r48153 r48218  
    4242VMMR0DECL(int)  SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
    4343VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    44 VMMR0DECL(int)  SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem);
     44VMMR0DECL(int)  SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem,
     45                               void *pvArg);
    4546VMMR0DECL(int)  SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    4647VMMR0DECL(int)  SVMR0InitVM(PVM pVM);
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48216 r48218  
    314314*   Internal Functions                                                         *
    315315*******************************************************************************/
    316 static void               hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
     316static void               hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
    317317static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
    318318static int                hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
     
    10091009 * @param   fEnabledByHost  Set if SUPR0EnableVTx() or similar was used to
    10101010 *                          enable VT-x on the host.
    1011  */
    1012 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
     1011 * @param   pvMsrs          Opaque pointer to VMXMSRS struct.
     1012 */
     1013VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,
     1014                              void *pvMsrs)
    10131015{
    10141016    AssertReturn(pCpu, VERR_INVALID_PARAMETER);
     1017    AssertReturn(pvMsrs, VERR_INVALID_PARAMETER);
    10151018    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    10161019
     
    10231026
    10241027    /*
    1025      * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that
    1026      * we can avoid an explicit flush while using new VPIDs. We would still need to flush
    1027      * each time while reusing a VPID after hitting the MaxASID limit once.
     1028     * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so
     1029     * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID.
    10281030     */
    1029     if (   pVM
    1030         && pVM->hm.s.fNestedPaging)
    1031     {
    1032         /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
    1033         Assert(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
    1034         hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
     1031    PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs;
     1032    if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
     1033    {
     1034        hmR0VmxFlushEpt(NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
    10351035        pCpu->fFlushAsidBeforeUse = false;
    10361036    }
    10371037    else
    1038     {
    1039         /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM
    1040          *        without Nested Paging triggered this function) we still have the risk
    1041          *        of potentially running with stale TLB-entries from other hypervisors
    1042          *        when later we use a VM with NestedPaging. To fix this properly we will
    1043          *        have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
    1044          *        'u64EptVpidCaps' from it. Sigh. */
    10451038        pCpu->fFlushAsidBeforeUse = true;
    1046     }
    10471039
    10481040    /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
     
    11261118 *
    11271119 * @returns VBox status code.
    1128  * @param   pVM         Pointer to the VM.
    11291120 * @param   pVCpu       Pointer to the VMCPU (can be NULL depending on @a
    11301121 *                      enmFlush).
    11311122 * @param   enmFlush    Type of flush.
    1132  */
    1133 static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
    1134 {
    1135     AssertPtr(pVM);
    1136     Assert(pVM->hm.s.fNestedPaging);
    1137 
     1123 *
     1124 * @remarks Caller is responsible for making sure this function is called only
     1125 *          when NestedPaging is supported and providing @a enmFlush that is
     1126 *          supported by the CPU.
     1127 */
     1128static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
     1129{
    11381130    uint64_t descriptor[2];
    11391131    if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
     
    13511343         * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
    13521344         */
    1353         hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     1345        hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    13541346        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
    13551347        HMVMX_SET_TAGGED_TLB_FLUSHED();
     
    13671359         * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
    13681360         */
    1369         hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     1361        hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    13701362        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
    13711363        HMVMX_SET_TAGGED_TLB_FLUSHED();
     
    13891381        }
    13901382        else
    1391             hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     1383            hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    13921384
    13931385        HMVMX_SET_TAGGED_TLB_FLUSHED();
     
    14591451    if (pVCpu->hm.s.fForceTLBFlush)
    14601452    {
    1461         hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     1453        hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    14621454        pVCpu->hm.s.fForceTLBFlush = false;
    14631455    }
     
    14711463            /* We cannot flush individual entries without VPID support. Flush using EPT. */
    14721464            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    1473             hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
     1465            hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt);
    14741466        }
    14751467        else
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r48153 r48218  
    3131VMMR0DECL(int)  VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu);
    3232VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit);
    33 VMMR0DECL(int)  VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem);
     33VMMR0DECL(int)  VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem,
     34                               void *pvMsrs);
    3435VMMR0DECL(int)  VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
    3536VMMR0DECL(int)  VMXR0GlobalInit(void);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette