VirtualBox

Ignore:
Timestamp:
May 9, 2014 8:11:15 AM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Do EFER swapping using auto-load/store area in the VMCS when required.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r51220 r51222  
    4545# define HMVMX_ALWAYS_SWAP_FPU_STATE
    4646# define HMVMX_ALWAYS_FLUSH_TLB
     47# define HMVMX_ALWAYS_SWAP_EFER
    4748#endif
    4849
     
    13031304 * auto-load/store MSR area in the VMCS.
    13041305 *
    1305  * Does not fail if the MSR in @a uMsr is not found in the auto-load/store MSR
    1306  * area.
    1307  *
    13081306 * @returns VBox status code.
    13091307 * @param   pVCpu       Pointer to the VMCPU.
     
    13281326            /* Remove it by swapping the last MSR in place of it, and reducing the count. */
    13291327            PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1330             pLastGuestMsr            += cMsrs;
     1328            pLastGuestMsr            += cMsrs - 1;
    13311329            pGuestMsr->u32Msr         = pLastGuestMsr->u32Msr;
    13321330            pGuestMsr->u64Value       = pLastGuestMsr->u64Value;
     
    13341332            PVMXAUTOMSR pHostMsr     = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    13351333            PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1336             pLastHostMsr            += cMsrs;
     1334            pLastHostMsr            += cMsrs - 1;
    13371335            pHostMsr->u32Msr         = pLastHostMsr->u32Msr;
    13381336            pHostMsr->u64Value       = pLastHostMsr->u64Value;
     
    13521350        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    13531351            hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
    1354     }
    1355 
    1356     return VINF_SUCCESS;
     1352
     1353        Log4(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));
     1354        return VINF_SUCCESS;
     1355    }
     1356
     1357    return VERR_NOT_FOUND;
    13571358}
    13581359
     
    13971398    {
    13981399        AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
    1399         pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
     1400
     1401        /*
     1402         * Performance hack for the host EFER MSR. We use the cached value rather than re-read it.
     1403         * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
     1404         */
     1405        if (pHostMsr->u32Msr == MSR_K6_EFER)
     1406            pHostMsr->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostEfer;
     1407        else
     1408            pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
    14001409    }
    14011410
     
    16461655    {
    16471656        /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
    1648         AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32\n", pHostMsr->u32Msr,
    1649                                                                     pGuestMsr->u32Msr));
     1657        AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32 cMsrs=%u\n", pHostMsr->u32Msr,
     1658                                                                    pGuestMsr->u32Msr, cMsrs));
    16501659
    16511660        uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
    1652         AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64\n", pHostMsr->u32Msr,
    1653                                                            pHostMsr->u64Value, u64Msr));
     1661        AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64 cMsrs=%u\n",
     1662                                                           pHostMsr->u32Msr, pHostMsr->u64Value, u64Msr, cMsrs));
    16541663
    16551664        /* Verify that the permissions are as expected in the MSR bitmap. */
     
    16601669            rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
    16611670            AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
    1662             AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 No passthru read permission!\n",
    1663                                                                       pGuestMsr->u32Msr));
    1664             AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 No passthru write permission!\n",
    1665                                                                         pGuestMsr->u32Msr));
     1671            if (pGuestMsr->u32Msr == MSR_K6_EFER)
     1672            {
     1673                AssertMsgReturnVoid(enmRead  == VMXMSREXIT_INTERCEPT_READ,  ("Passthru read for EFER!?\n"));
     1674                AssertMsgReturnVoid(enmWrite == VMXMSREXIT_INTERCEPT_WRITE, ("Passthru write for EFER!?\n"));
     1675            }
     1676            else
     1677            {
     1678                AssertMsgReturnVoid(enmRead  == VMXMSREXIT_PASSTHRU_READ,  ("u32Msr=%#RX32 cMsrs=%u No passthru read!\n",
     1679                                                                            pGuestMsr->u32Msr, cMsrs));
     1680                AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 cMsrs=%u No passthru write!\n",
     1681                                                                            pGuestMsr->u32Msr, cMsrs));
     1682            }
    16661683        }
    16671684    }
     
    31553172
    31563173    /*
     3174     * Host EFER MSR.
    31573175     * If the CPU supports the newer VMCS controls for managing EFER, use it.
     3176     * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs().
    31583177     */
    31593178#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     
    31933212static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    31943213{
     3214#ifdef HMVMX_ALWAYS_SWAP_EFER
     3215    return true;
     3216#endif
    31953217    PVM      pVM          = pVCpu->CTX_SUFF(pVM);
    31963218    uint64_t u64HostEfer  = pVM->hm.s.vmx.u64HostEfer;
     
    32013223     * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.
    32023224     */
    3203     if (   pVM->hm.s.fAllow64BitGuests
     3225    if (   CPUMIsGuestInLongMode(pVCpu)
    32043226        && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE))
    32053227    {
     
    46554677    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS))
    46564678    {
     4679        /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
    46574680#if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    46584681        if (pVM->hm.s.fAllow64BitGuests)
     
    46984721    {
    46994722#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4700         if (   HMVMX_IS_64BIT_HOST_MODE()
    4701             && pVM->hm.s.vmx.fSupportsVmcsEfer
    4702             && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))  /* Not really needed here, but avoids a VM-write as a nested guest. */
    4703         {
    4704             int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
    4705             AssertRCReturn(rc,rc);
    4706             Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER));
     4723        if (HMVMX_IS_64BIT_HOST_MODE())
     4724        {
     4725            /*
     4726             * If the CPU supports VMCS controls for swapping EFER, use it. Otherwise, we have no option
     4727             * but to use the auto-load store MSR area in the VMCS for swapping EFER. See @bugref{7368}.
     4728             */
     4729            if (pVM->hm.s.vmx.fSupportsVmcsEfer)
     4730            {
     4731                /* Not strictly necessary to check hmR0VmxShouldSwapEferMsr() here, but it avoids
     4732                   one VM-write when we're a nested guest. */
     4733                if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     4734                {
     4735                    int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER);
     4736                    AssertRCReturn(rc,rc);
     4737                    Log4(("Load: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pMixedCtx->msrEFER));
     4738                }
     4739            }
     4740            else
     4741            {
     4742                if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx))
     4743                {
     4744                    hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_EFER, pMixedCtx->msrEFER, false /* fUpdateHostMsr */);
     4745                    /* We need to intercept reads too, see @bugref{7386} comment #16. */
     4746                    hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
     4747                    Log4(("Load: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER,
     4748                          pVCpu->hm.s.vmx.cMsrs));
     4749                }
     4750                else
     4751                    hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER);
     4752            }
    47074753        }
    47084754#endif
     
    61376183            case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK       = pMsr->u64Value;             break;
    61386184            case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;             break;
     6185#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     6186            case MSR_K6_EFER:
     6187            {
     6188                if (HMVMX_IS_64BIT_HOST_MODE())  /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */
     6189                    break;
     6190            }
     6191#endif
    61396192            default:
    61406193            {
    6141                 AssertFailed();
     6194                AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));
    61426195                return VERR_HM_UNEXPECTED_LD_ST_MSR;
    61436196            }
     
    71157168        }
    71167169#endif
     7170        /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
    71177171        pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
    71187172        VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
     
    1029110345    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    1029210346    {
    10293         if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
     10347        if (   hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)
     10348            && pMixedCtx->ecx != MSR_K6_EFER)
    1029410349        {
    1029510350            AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
     
    1038210437                case MSR_K8_FS_BASE:        /* no break */
    1038310438                case MSR_K8_GS_BASE:        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);     break;
     10439                case MSR_K6_EFER:           HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR);         break;
    1038410440                default:
    1038510441                {
     
    1041510471                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
    1041610472                    {
     10473#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     10474                        /* EFER writes are always intercepted, see hmR0VmxLoadGuestMsrs(). */
     10475                        if (   HMVMX_IS_64BIT_HOST_MODE()
     10476                            && pMixedCtx->ecx == MSR_K6_EFER)
     10477                        {
     10478                            break;
     10479                        }
     10480#endif
    1041710481                        AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
    1041810482                                         pMixedCtx->ecx));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette