VirtualBox

Changeset 61359 in vbox


Ignore:
Timestamp:
Jun 1, 2016 7:51:27 AM (9 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
107655
Message:

VMM/HMVMXR0: Fix lazy save/restore MSRs code structure to be more malleable if we need to add non-64-bit guest MSRs in the future.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r61317 r61359  
    14581458
    14591459
    1460 #if HC_ARCH_BITS == 64
    14611460/**
    14621461 * Saves a set of host MSRs to allow read/write passthru access to the guest and
     
    14741473     * Note: If you're adding MSRs here, make sure to update the MSR-bitmap permissions in hmR0VmxSetupProcCtls().
    14751474     */
    1476     if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
    1477     {
    1478         pVCpu->hm.s.vmx.u64HostLStarMsr        = ASMRdMsr(MSR_K8_LSTAR);
    1479         pVCpu->hm.s.vmx.u64HostStarMsr         = ASMRdMsr(MSR_K6_STAR);
    1480         pVCpu->hm.s.vmx.u64HostSFMaskMsr       = ASMRdMsr(MSR_K8_SF_MASK);
    1481         pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1475    Assert(!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST));
     1476    if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST))
     1477    {
     1478#if HC_ARCH_BITS == 64
     1479        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     1480        {
     1481            pVCpu->hm.s.vmx.u64HostLStarMsr        = ASMRdMsr(MSR_K8_LSTAR);
     1482            pVCpu->hm.s.vmx.u64HostStarMsr         = ASMRdMsr(MSR_K6_STAR);
     1483            pVCpu->hm.s.vmx.u64HostSFMaskMsr       = ASMRdMsr(MSR_K8_SF_MASK);
     1484            pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1485        }
     1486#endif
    14821487        pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_SAVED_HOST;
    14831488    }
     
    14961501{
    14971502    NOREF(pVCpu);
    1498     switch (uMsr)
    1499     {
    1500         case MSR_K8_LSTAR:
    1501         case MSR_K6_STAR:
    1502         case MSR_K8_SF_MASK:
    1503         case MSR_K8_KERNEL_GS_BASE:
    1504             return true;
    1505     }
     1503#if HC_ARCH_BITS == 64
     1504    if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     1505    {
     1506        switch (uMsr)
     1507        {
     1508            case MSR_K8_LSTAR:
     1509            case MSR_K6_STAR:
     1510            case MSR_K8_SF_MASK:
     1511            case MSR_K8_KERNEL_GS_BASE:
     1512                return true;
     1513        }
     1514    }
     1515#endif
    15061516    return false;
    15071517}
     
    15261536    {
    15271537        Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
    1528         pMixedCtx->msrLSTAR        = ASMRdMsr(MSR_K8_LSTAR);
    1529         pMixedCtx->msrSTAR         = ASMRdMsr(MSR_K6_STAR);
    1530         pMixedCtx->msrSFMASK       = ASMRdMsr(MSR_K8_SF_MASK);
    1531         pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1538#if HC_ARCH_BITS == 64
     1539        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     1540        {
     1541            pMixedCtx->msrLSTAR        = ASMRdMsr(MSR_K8_LSTAR);
     1542            pMixedCtx->msrSTAR         = ASMRdMsr(MSR_K6_STAR);
     1543            pMixedCtx->msrSFMASK       = ASMRdMsr(MSR_K8_SF_MASK);
     1544            pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);
     1545        }
     1546#endif
    15321547    }
    15331548}
     
    15651580    if (!(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))
    15661581    {
    1567         VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
    1568         VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
    1569         VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
    1570         VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
     1582#if HC_ARCH_BITS == 64
     1583        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     1584        {
     1585            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_LSTAR, LSTAR, LStar);
     1586            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K6_STAR, STAR, Star);
     1587            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_SF_MASK, SFMASK, SFMask);
     1588            VMXLOCAL_LAZY_LOAD_GUEST_MSR(MSR_K8_KERNEL_GS_BASE, KERNELGSBASE, KernelGSBase);
     1589        }
     1590#endif
    15711591        pVCpu->hm.s.vmx.fLazyMsrs |= VMX_LAZY_MSRS_LOADED_GUEST;
    1572     }
    1573     else
    1574     {
    1575         ASMWrMsr(MSR_K8_LSTAR,          pMixedCtx->msrLSTAR);
    1576         ASMWrMsr(MSR_K6_STAR,           pMixedCtx->msrSTAR);
    1577         ASMWrMsr(MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK);
    1578         ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE);
    15791592    }
    15801593
     
    16011614    {
    16021615        Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);
    1603         ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hm.s.vmx.u64HostLStarMsr);
    1604         ASMWrMsr(MSR_K6_STAR,           pVCpu->hm.s.vmx.u64HostStarMsr);
    1605         ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hm.s.vmx.u64HostSFMaskMsr);
    1606         ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
     1616#if HC_ARCH_BITS == 64
     1617        if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     1618        {
     1619            ASMWrMsr(MSR_K8_LSTAR,          pVCpu->hm.s.vmx.u64HostLStarMsr);
     1620            ASMWrMsr(MSR_K6_STAR,           pVCpu->hm.s.vmx.u64HostStarMsr);
     1621            ASMWrMsr(MSR_K8_SF_MASK,        pVCpu->hm.s.vmx.u64HostSFMaskMsr);
     1622            ASMWrMsr(MSR_K8_KERNEL_GS_BASE, pVCpu->hm.s.vmx.u64HostKernelGSBaseMsr);
     1623        }
     1624#endif
    16071625    }
    16081626    pVCpu->hm.s.vmx.fLazyMsrs &= ~(VMX_LAZY_MSRS_LOADED_GUEST | VMX_LAZY_MSRS_SAVED_HOST);
    16091627}
    1610 #endif  /* HC_ARCH_BITS == 64 */
    16111628
    16121629
     
    24312448        }
    24322449#endif
     2450        /* Though MSR_IA32_PERF_GLOBAL_CTRL is saved/restored lazily, we want intercept reads/write to it for now. */
    24332451    }
    24342452
     
    30953113    AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
    30963114
    3097     int rc = VINF_SUCCESS;
    3098 #if HC_ARCH_BITS == 64
    3099     if (pVM->hm.s.fAllow64BitGuests)
    3100         hmR0VmxLazySaveHostMsrs(pVCpu);
    3101 #endif
     3115    /*
     3116     * Save MSRs that we restore lazily (due to preemption or transition to ring-3)
     3117     * rather than swapping them on every VM-entry.
     3118     */
     3119    hmR0VmxLazySaveHostMsrs(pVCpu);
    31023120
    31033121    /*
    31043122     * Host Sysenter MSRs.
    31053123     */
    3106     rc  = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,       ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
     3124    int rc  = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,   ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    31073125#if HC_ARCH_BITS == 32
    31083126    rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,        ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
     
    61696187static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    61706188{
    6171 #if HC_ARCH_BITS == 64
    6172     if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
    6173     {
    6174         /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
    6175         VMMRZCallRing3Disable(pVCpu);
    6176         HM_DISABLE_PREEMPT();
    6177 
    6178         /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
    6179         if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
    6180         {
    6181             hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
    6182             HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
    6183         }
    6184 
    6185         HM_RESTORE_PREEMPT();
    6186         VMMRZCallRing3Enable(pVCpu);
    6187     }
    6188     else
     6189    /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */
     6190    VMMRZCallRing3Disable(pVCpu);
     6191    HM_DISABLE_PREEMPT();
     6192
     6193    /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */
     6194    if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS))
     6195    {
     6196        hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx);
    61896197        HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
    6190 #else
    6191     NOREF(pMixedCtx);
    6192     HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS);
    6193 #endif
     6198    }
     6199
     6200    HM_RESTORE_PREEMPT();
     6201    VMMRZCallRing3Enable(pVCpu);
    61946202
    61956203    return VINF_SUCCESS;
     
    70477055#endif
    70487056
    7049 #if HC_ARCH_BITS == 64
    70507057    /* Restore the lazy host MSRs as we're leaving VT-x context. */
    7051     if (   pVM->hm.s.fAllow64BitGuests
    7052         && pVCpu->hm.s.vmx.fLazyMsrs)
     7058    if (pVCpu->hm.s.vmx.fLazyMsrs)
    70537059    {
    70547060        /* We shouldn't reload the guest MSRs without saving it first. */
     
    70627068        Assert(!pVCpu->hm.s.vmx.fLazyMsrs);
    70637069    }
    7064 #endif
    70657070
    70667071    /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
     
    72887293            VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
    72897294        pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
    7290 
     7295#endif
    72917296        /* Restore the lazy host MSRs as we're leaving VT-x context. */
    7292         if (   pVCpu->hm.s.vmx.fLazyMsrs
    7293             && pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)
     7297        if (pVCpu->hm.s.vmx.fLazyMsrs)
    72947298            hmR0VmxLazyRestoreHostMsrs(pVCpu);
    7295 #endif
     7299
    72967300        /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */
    72977301        pVCpu->hm.s.vmx.fUpdatedHostMsrs = false;
     
    81828186    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    81838187
    8184     if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
    8185         return VINF_SUCCESS;
    8186 
    8187     int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
    8188     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    8189 
    8190     rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
    8191     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    8192 
    8193     rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
    8194     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    8195 
    8196     HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
     8188    int rc = VINF_SUCCESS;
     8189    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))
     8190    {
     8191        rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
     8192        AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     8193
     8194        rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
     8195        AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     8196
     8197        rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
     8198        AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     8199
     8200        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);
     8201    }
    81978202    return rc;
    81988203}
     
    83678372    if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS))
    83688373    {
    8369 #if HC_ARCH_BITS == 64
    8370         if (pVM->hm.s.fAllow64BitGuests)
    8371             hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
    8372 #endif
     8374        hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx);
    83738375        HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
    83748376    }
     
    1188411886            HMVMX_RETURN_UNEXPECTED_EXIT();
    1188511887        }
    11886 # if HC_ARCH_BITS == 64
    11887         if (   pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests
    11888             && hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    11889         {
    11890             AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
    11891             HMVMX_RETURN_UNEXPECTED_EXIT();
    11892         }
    11893 # endif
     11888        if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
     11889        {
     11890            VMXMSREXITREAD  enmRead;
     11891            VMXMSREXITWRITE enmWrite;
     11892            int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);
     11893            AssertRCReturn(rc2, rc2);
     11894            if (enmRead == VMXMSREXIT_PASSTHRU_READ)
     11895            {
     11896                AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
     11897                HMVMX_RETURN_UNEXPECTED_EXIT();
     11898            }
     11899        }
    1189411900    }
    1189511901#endif
     
    1197411980                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
    1197511981                        HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    11976 #if HC_ARCH_BITS == 64
    1197711982                    else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    1197811983                        HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_LAZY_MSRS);
    11979 #endif
    1198011984                    break;
    1198111985                }
     
    1201212016                    }
    1201312017
    12014 #if HC_ARCH_BITS == 64
    1201512018                    if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))
    1201612019                    {
    12017                         AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
    12018                         HMVMX_RETURN_UNEXPECTED_EXIT();
     12020                        VMXMSREXITREAD  enmRead;
     12021                        VMXMSREXITWRITE enmWrite;
     12022                        int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);
     12023                        AssertRCReturn(rc2, rc2);
     12024                        if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE)
     12025                        {
     12026                            AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));
     12027                            HMVMX_RETURN_UNEXPECTED_EXIT();
     12028                        }
    1201912029                    }
    12020 #endif
    1202112030                    break;
    1202212031                }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette