VirtualBox

Changeset 49701 in vbox for trunk/src


Ignore:
Timestamp:
Nov 28, 2013 5:30:55 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Simplified the MSR optimization code.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49700 r49701  
    209209    uint32_t        u32Alignment0;
    210210#endif
    211     /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
    212     uint64_t        u64LStarMsr;
    213211    /** The guest's TPR value used for TPR shadowing. */
    214212    uint8_t         u8GuestTpr;
     
    12871285        && fUpdateHostMsr)
    12881286    {
     1287        Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1288        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    12891289        pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
    12901290    }
     
    14121412    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    14131413
    1414 #define VMXLOCAL_SAVE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \
    1415     do { \
    1416         if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag))) \
    1417         { \
    1418             pVCpu->hm.s.vmx.u64Host##a_HostMsrField = ASMRdMsr(uMsr); \
    1419         } \
    1420     } while (0)
    1421 
    14221414    /*
    14231415     * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
    14241416     */
    1425     VMXLOCAL_SAVE_HOST_MSR(MSR_K8_LSTAR,          LStarMsr,        VMX_RESTORE_HOST_MSR_LSTAR);
    1426     VMXLOCAL_SAVE_HOST_MSR(MSR_K6_STAR,           StarMsr,         VMX_RESTORE_HOST_MSR_STAR);
    1427     VMXLOCAL_SAVE_HOST_MSR(MSR_K8_SF_MASK,        SFMaskMsr,       VMX_RESTORE_HOST_MSR_SFMASK);
    1428     VMXLOCAL_SAVE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
    1429 
    1430 #undef VMXLOCAL_SAVE_HOST_MSR
     1417    if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST))
     1418    {
     1419        pVCpu->hm.s.vmx.u64HostLStarMsr        = ASMRdMsr(MSR_K8_LSTAR);
     1420        pVCpu->hm.s.vmx.u64HostStarMsr         = ASMRdMsr(MSR_K6_STAR);
     1421        pVCpu->hm.s.vmx.u64HostSFMaskMsr       = ASMRdMsr(MSR_K8_SF_MASK);
     1422        pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1423        pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_SAVED_HOST;
     1424    }
    14311425}
    14321426
     
    14701464    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    14711465
    1472 #define VMXLOCAL_SAVE_GUEST_MSR(uMsr, a_GuestMsrField, RestoreFlag) \
    1473     do { \
    1474         if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \
    1475         { \
    1476             pMixedCtx->msr##a_GuestMsrField = ASMRdMsr(uMsr); \
    1477             Log4(("hmR0VmxLazySaveGuestMsrs: uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \
    1478         } \
    1479     } while (0)
    1480 
    1481     VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_LSTAR,          LSTAR,        VMX_RESTORE_HOST_MSR_LSTAR);
    1482     VMXLOCAL_SAVE_GUEST_MSR(MSR_K6_STAR,           STAR,         VMX_RESTORE_HOST_MSR_STAR);
    1483     VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_SF_MASK,        SFMASK,       VMX_RESTORE_HOST_MSR_SFMASK);
    1484     VMXLOCAL_SAVE_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
    1485 
    1486 #undef VMXLOCAL_SAVE_GUEST_MSR
     1466    if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)
     1467    {
     1468        Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
     1469        pMixedCtx->msrLSTAR        = ASMRdMsr(MSR_K8_LSTAR);
     1470        pMixedCtx->msrSTAR         = ASMRdMsr(MSR_K6_STAR);
     1471        pMixedCtx->msrSFMASK       = ASMRdMsr(MSR_K8_SF_MASK);
     1472        pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1473    }
    14871474}
    14881475
     
    15081495    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    15091496
    1510 #define VMXLOCAL_LOAD_GUEST_MSR(uMsr, a_GuestMsrField, a_HostMsrField, RestoreFlag) \
     1497#if 0   /* Disabled until issue with non-atomic flag updates is resolved. See @bugref{6398#c170}. */
     1498    Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
     1499    if (!(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST))
     1500    {
     1501#define VMXLOCAL_LAZY_LOAD_GUEST_MSR(uMsr, a_GuestMsr, a_HostMsr) \
    15111502    do { \
    1512         if (   (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \
    1513             || pMixedCtx->msr##a_GuestMsrField != pVCpu->hm.s.vmx.u64Host##a_HostMsrField) \
    1514         { \
    1515             ASMWrMsr((uMsr), pMixedCtx->msr##a_GuestMsrField); \
    1516         } \
    1517         pVCpu->hm.s.vmx.fRestoreHostMsrs |= (RestoreFlag); \
    1518         Log4(("Load: MSRSWAP uMsr=%#RX32 GuestValue=%#RX64\n", (uMsr), pMixedCtx->msr##a_GuestMsrField)); \
     1503        if (pMixedCtx->msr##a_GuestMsr != pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr) \
     1504            ASMWrMsr(uMsr, pMixedCtx->msr##a_GuestMsr); \
     1505        else \
     1506            Assert(ASMRdMsr(uMsr) == pVCpu->hm.s.vmx.u64Host##a_HostMsr##Msr); \
    15191507    } while (0)
    15201508
    1521     VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_LSTAR,          LSTAR,        LStarMsr,        VMX_RESTORE_HOST_MSR_LSTAR);
    1522     VMXLOCAL_LOAD_GUEST_MSR(MSR_K6_STAR,           STAR,         StarMsr,         VMX_RESTORE_HOST_MSR_STAR);
    1523     VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_SF_MASK,        SFMASK,       SFMaskMsr,       VMX_RESTORE_HOST_MSR_SFMASK);
    1524     VMXLOCAL_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
    1525 
    1526 #undef VMXLOCAL_LOAD_GUEST_MSR
     1509        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
     1510        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
     1511        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
     1512        VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
     1513#undef VMXLOCAL_LAZY_LOAD_GUEST_MSR
     1514    }
     1515    else
     1516#endif
     1517    {
     1518        ASMWrMsr(MSR_K8_LSTAR,          pMixedCtx->msrLSTAR);
     1519        ASMWrMsr(MSR_K6_STAR,           pMixedCtx->msrSTAR);
     1520        ASMWrMsr(MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK);
     1521        ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
     1522    }
     1523    pVCpu->hm.s.vmx.fRestoreHostMsrs |= VMX_RESTORE_HOST_MSR_LOADED_GUEST;
    15271524}
    15281525
     
    15431540    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    15441541
    1545 #define VMXLOCAL_RESTORE_HOST_MSR(uMsr, a_HostMsrField, RestoreFlag) \
    1546     do { \
    1547         if (pVCpu->hm.s.vmx.fRestoreHostMsrs & (RestoreFlag)) \
    1548         { \
    1549             ASMWrMsr((uMsr), pVCpu->hm.s.vmx.u64Host##a_HostMsrField); \
    1550             pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(RestoreFlag); \
    1551             Log4(("hmR0VmxLazyRestoreHostMsrs: uMsr=%#RX32 HostValue=%#RX64\n", (uMsr), \
    1552                  pVCpu->hm.s.vmx.u64Host##a_HostMsrField)); \
    1553         } \
    1554     } while (0)
    1555 
    1556     VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_LSTAR,          LStarMsr,        VMX_RESTORE_HOST_MSR_LSTAR);
    1557     VMXLOCAL_RESTORE_HOST_MSR(MSR_K6_STAR,           StarMsr,         VMX_RESTORE_HOST_MSR_STAR);
    1558     VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_SF_MASK,        SFMaskMsr,       VMX_RESTORE_HOST_MSR_SFMASK);
    1559     VMXLOCAL_RESTORE_HOST_MSR(MSR_K8_KERNEL_GS_BASE, KernelGSBaseMsr, VMX_RESTORE_HOST_MSR_KERNELGSBASE);
    1560 
    1561 #undef VMXLOCAL_RESTORE_HOST_MSR
     1542    if (pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_LOADED_GUEST)
     1543    {
     1544        Assert(pVCpu->hm.s.vmx.fRestoreHostMsrs & VMX_RESTORE_HOST_MSR_SAVED_HOST);
     1545        ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hm.s.vmx.u64HostLStarMsr);
     1546        ASMWrMsr(MSR_K6_STAR,           pVCpu->hm.s.vmx.u64HostStarMsr);
     1547        ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hm.s.vmx.u64HostSFMaskMsr);
     1548        ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
     1549    }
     1550    pVCpu->hm.s.vmx.fRestoreHostMsrs &= ~(VMX_RESTORE_HOST_MSR_LOADED_GUEST | VMX_RESTORE_HOST_MSR_SAVED_HOST);
    15621551}
    15631552#endif  /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     
    44114400
    44124401        VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);
    4413         Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n",  pMixedCtx->ldtr.u64Base));
     4402        Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
    44144403    }
    44154404
     
    44764465            PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    44774466            for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
    4478                 Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
     4467                Log4(("Load: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
    44794468# endif
    44804469#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette