VirtualBox

Ignore:
Timestamp:
Apr 25, 2019 8:16:53 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: Nested VMX: bugref:9180 Array addressing of the VMX MSR areas, easier to read and chances of missing increments are fewer. Some extra assertions.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r78259 r78287  
    12561256static void hmR0VmxInitVmcsInfo(PVMXVMCSINFO pVmcsInfo)
    12571257{
    1258     RT_ZERO(*pVmcsInfo);
     1258    memset(pVmcsInfo, 0, sizeof(*pVmcsInfo));
    12591259
    12601260    Assert(pVmcsInfo->hMemObjVmcs          == NIL_RTR0MEMOBJ);
     
    15511551 *
    15521552 * @sa      HMGetVmxMsrPermission.
     1553 * @remarks Can be called with interrupts disabled.
    15531554 */
    15541555static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm)
     
    16441645    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    16451646    uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
    1646     if (RT_UNLIKELY(cMsrs >= cMaxSupportedMsrs))
    1647     {
    1648         LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
    1649         pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    1650         return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    1651     }
    1652 
    1653     /* Commit the MSR counts to the VMCS and update the cache. */
    1654     int rc = VINF_SUCCESS;
    1655     if (pVmcsInfo->cEntryMsrLoad != cMsrs)
    1656         rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
    1657     if (pVmcsInfo->cExitMsrStore != cMsrs)
    1658         rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
    1659     if (pVmcsInfo->cExitMsrLoad != cMsrs)
    1660         rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);
    1661     AssertRCReturn(rc, rc);
    1662 
    1663     pVmcsInfo->cEntryMsrLoad = cMsrs;
    1664     pVmcsInfo->cExitMsrStore = cMsrs;
    1665     pVmcsInfo->cExitMsrLoad  = cMsrs;
    1666 
    1667     return VINF_SUCCESS;
     1647    if (RT_LIKELY(cMsrs < cMaxSupportedMsrs))
     1648    {
     1649        /* Commit the MSR counts to the VMCS and update the cache. */
     1650        if (pVmcsInfo->cEntryMsrLoad != cMsrs)
     1651        {
     1652            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs);
     1653            rc    |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs);
     1654            rc    |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);
     1655            AssertRCReturn(rc, rc);
     1656
     1657            pVmcsInfo->cEntryMsrLoad = cMsrs;
     1658            pVmcsInfo->cExitMsrStore = cMsrs;
     1659            pVmcsInfo->cExitMsrLoad  = cMsrs;
     1660        }
     1661        return VINF_SUCCESS;
     1662    }
     1663
     1664    LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs));
     1665    pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
     1666    return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    16681667}
    16691668
     
    16991698    for (i = 0; i < cMsrs; i++)
    17001699    {
    1701         if (pGuestMsrLoad->u32Msr == idMsr)
     1700        if (pGuestMsrLoad[i].u32Msr == idMsr)
    17021701            break;
    1703         pGuestMsrLoad++;
    17041702    }
    17051703
     
    17071705    if (i == cMsrs)
    17081706    {
    1709         /* The MSR does not exist, bump the MSR coun to make room for the new MSR. */
     1707        /* The MSR does not exist, bump the MSR count to make room for the new MSR. */
    17101708        ++cMsrs;
    17111709        int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs);
     
    17211719
    17221720    /* Update the MSR value for the newly added or already existing MSR. */
    1723     pGuestMsrLoad->u32Msr   = idMsr;
    1724     pGuestMsrLoad->u64Value = uGuestMsrValue;
     1721    pGuestMsrLoad[i].u32Msr   = idMsr;
     1722    pGuestMsrLoad[i].u64Value = uGuestMsrValue;
    17251723
    17261724    /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */
     
    17281726    {
    17291727        PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    1730         pGuestMsrStore += i;
    1731         pGuestMsrStore->u32Msr   = idMsr;
    1732         pGuestMsrStore->u64Value = 0;
     1728        pGuestMsrStore[i].u32Msr   = idMsr;
     1729        pGuestMsrStore[i].u64Value = uGuestMsrValue;
    17331730    }
    17341731
    17351732    /* Update the corresponding slot in the host MSR area. */
    17361733    PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    1737     Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad && pHostMsr != pVmcsInfo->pvGuestMsrStore);
    1738     pHostMsr += i;
    1739     pHostMsr->u32Msr = idMsr;
     1734    Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad);
     1735    Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore);
     1736    pHostMsr[i].u32Msr = idMsr;
    17401737
    17411738    /*
     
    17511748        Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    17521749        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    1753         pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
     1750        pHostMsr[i].u64Value = ASMRdMsr(idMsr);
    17541751    }
    17551752    return VINF_SUCCESS;
     
    17731770    uint32_t     cMsrs         = pVmcsInfo->cEntryMsrLoad;
    17741771
    1775     bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);
    17761772    for (uint32_t i = 0; i < cMsrs; i++)
    17771773    {
    17781774        /* Find the MSR. */
    1779         if (pGuestMsrLoad->u32Msr == idMsr)
    1780         {
    1781             /* If it's the last MSR, simply reduce the count. */
    1782             if (i == cMsrs - 1)
     1775        if (pGuestMsrLoad[i].u32Msr == idMsr)
     1776        {
     1777            /*
     1778             * If it's the last MSR, we only need to reduce the MSR count.
     1779             * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count.
     1780             */
     1781            if (i < cMsrs - 1)
    17831782            {
    1784                 --cMsrs;
    1785                 break;
     1783                /* Remove it from the VM-entry MSR-load area. */
     1784                pGuestMsrLoad[i].u32Msr   = pGuestMsrLoad[cMsrs - 1].u32Msr;
     1785                pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value;
     1786
     1787                /* Remove it from the VM-exit MSR-store area if it's in a different page. */
     1788                if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo))
     1789                {
     1790                    PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     1791                    Assert(pGuestMsrStore[i].u32Msr == idMsr);
     1792                    pGuestMsrStore[i].u32Msr   = pGuestMsrStore[cMsrs - 1].u32Msr;
     1793                    pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value;
     1794                }
     1795
     1796                /* Remove it from the VM-exit MSR-load area. */
     1797                PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
     1798                Assert(pHostMsr[i].u32Msr == idMsr);
     1799                pHostMsr[i].u32Msr   = pHostMsr[cMsrs - 1].u32Msr;
     1800                pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value;
    17861801            }
    17871802
    1788             /* Remove it by copying the last MSR in place of it, and reducing the count. */
    1789             PVMXAUTOMSR pLastGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1790             pLastGuestMsrLoad            += cMsrs - 1;
    1791             pGuestMsrLoad->u32Msr         = pLastGuestMsrLoad->u32Msr;
    1792             pGuestMsrLoad->u64Value       = pLastGuestMsrLoad->u64Value;
    1793 
    1794             /* Remove it from the VM-exit MSR-store area if we are using a different page. */
    1795             if (fSeparateExitMsrStorePage)
    1796             {
    1797                 PVMXAUTOMSR pGuestMsrStore     = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    1798                 PVMXAUTOMSR pLastGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    1799                 pGuestMsrStore                += i;
    1800                 pLastGuestMsrStore            += cMsrs - 1;
    1801                 Assert(pGuestMsrStore->u32Msr == idMsr);
    1802                 pGuestMsrStore->u32Msr         = pLastGuestMsrStore->u32Msr;
    1803                 pGuestMsrStore->u64Value       = pLastGuestMsrStore->u64Value;
    1804             }
    1805 
    1806             /* Remove it from the VM-exit MSR-load area. */
    1807             PVMXAUTOMSR pHostMsr      = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    1808             PVMXAUTOMSR pLastHostMsr  = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    1809             pHostMsr                 += i;
    1810             pLastHostMsr             += cMsrs - 1;
    1811             Assert(pHostMsr->u32Msr == idMsr);
    1812             pHostMsr->u32Msr          = pLastHostMsr->u32Msr;
    1813             pHostMsr->u64Value        = pLastHostMsr->u64Value;
     1803            /* Reduce the count to reflect the removed MSR and bail. */
    18141804            --cMsrs;
    18151805            break;
    18161806        }
    1817         pGuestMsrLoad++;
    1818     }
    1819 
    1820     /* Update the VMCS if the count changed (meaning the MSR was found). */
     1807    }
     1808
     1809    /* Update the VMCS if the count changed (meaning the MSR was found and removed). */
    18211810    if (cMsrs != pVmcsInfo->cEntryMsrLoad)
    18221811    {
     
    18451834static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr)
    18461835{
    1847     PCVMXAUTOMSR   pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    1848     uint32_t const cMsrs         = pVmcsInfo->cEntryMsrLoad;
     1836    PCVMXAUTOMSR   pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
     1837    uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad;
     1838    Assert(pMsrs);
     1839    Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
    18491840    for (uint32_t i = 0; i < cMsrs; i++)
    18501841    {
    1851         if (pGuestMsrLoad->u32Msr == idMsr)
     1842        if (pMsrs[i].u32Msr == idMsr)
    18521843            return true;
    1853         pGuestMsrLoad++;
    18541844    }
    18551845    return false;
     
    18671857static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo)
    18681858{
     1859    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1860
    18691861    PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad;
    18701862    uint32_t const cMsrs     = pVmcsInfo->cExitMsrLoad;
    1871 
    1872     Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    18731863    Assert(pHostMsrLoad);
    1874 
    1875     for (uint32_t i = 0; i < cMsrs; i++, pHostMsrLoad++)
     1864    Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE);
     1865    for (uint32_t i = 0; i < cMsrs; i++)
    18761866    {
    18771867        /*
     
    18791869         * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}.
    18801870         */
    1881         if (pHostMsrLoad->u32Msr == MSR_K6_EFER)
    1882             pHostMsrLoad->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
     1871        if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER)
     1872            pHostMsrLoad[i].u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;
    18831873        else
    1884             pHostMsrLoad->u64Value = ASMRdMsr(pHostMsrLoad->u32Msr);
     1874            pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr);
    18851875    }
    18861876}
     
    21492139    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    21502140
    2151     /* Verify MSR counts in the VMCS are what we think it should be.  */
    2152     uint32_t cMsrs;
    2153     int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs);
    2154     AssertRC(rc);
    2155     Assert(cMsrs == pVmcsInfo->cEntryMsrLoad);
    2156 
    2157     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs);
    2158     AssertRC(rc);
    2159     Assert(cMsrs == pVmcsInfo->cExitMsrStore);
    2160 
    2161     rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs);
    2162     AssertRC(rc);
    2163     Assert(cMsrs == pVmcsInfo->cExitMsrLoad);
     2141    /* Read the various MSR-area counts from the VMCS. */
     2142    uint32_t cEntryLoadMsrs;
     2143    uint32_t cExitStoreMsrs;
     2144    uint32_t cExitLoadMsrs;
     2145    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cEntryLoadMsrs);  AssertRC(rc);
     2146    rc     = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cExitStoreMsrs);  AssertRC(rc);
     2147    rc     = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  &cExitLoadMsrs);   AssertRC(rc);
     2148
     2149    /* Verify all the MSR counts are the same. */
     2150    Assert(cEntryLoadMsrs == cExitStoreMsrs);
     2151    Assert(cExitStoreMsrs == cExitLoadMsrs);
     2152    uint32_t const cMsrs = cExitLoadMsrs;
    21642153
    21652154    /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */
    21662155    Assert(cMsrs < VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc));
    21672156
     2157    /* Verify the MSR counts are within the allocated page size. */
     2158    Assert(sizeof(VMXAUTOMSR) * cMsrs <= X86_PAGE_4K_SIZE);
     2159
     2160    /* Verify the relevant contents of the MSR areas match. */
    21682161    PCVMXAUTOMSR pGuestMsrLoad  = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad;
    21692162    PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     
    21922185        if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS)
    21932186        {
    2194             uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);
     2187            uint32_t const fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);
    21952188            if (pGuestMsrLoad->u32Msr == MSR_K6_EFER)
    21962189            {
     
    28272820    int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad);
    28282821    rc    |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore);
    2829     rc    |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad);
     2822    rc    |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL,  HCPhysHostMsrLoad);
    28302823    AssertRCReturn(rc, rc);
    28312824    return VINF_SUCCESS;
     
    74827475                )
    74837476            {
    7484                 PCVMXAUTOMSR   pMsr  = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
     7477                PCVMXAUTOMSR   pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore;
    74857478                uint32_t const cMsrs = pVmcsInfo->cExitMsrStore;
    7486                 Assert(cMsrs == 0 || pMsr != NULL);
     7479                Assert(pMsrs);
    74877480                Assert(cMsrs <= VMX_MISC_MAX_MSRS(pVM->hm.s.vmx.Msrs.u64Misc));
    7488                 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)
     7481                Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE);
     7482                for (uint32_t i = 0; i < cMsrs; i++)
    74897483                {
    7490                     switch (pMsr->u32Msr)
     7484                    uint32_t const idMsr = pMsrs[i].u32Msr;
     7485                    switch (idMsr)
    74917486                    {
     7487                        case MSR_K8_TSC_AUX:        CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value);     break;
     7488                        case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value);   break;
     7489                        case MSR_K6_EFER:           /* Can't be changed without causing a VM-exit */  break;
    74927490#if HC_ARCH_BITS == 32
    7493                         case MSR_K8_LSTAR:          pCtx->msrLSTAR        = pMsr->u64Value;          break;
    7494                         case MSR_K6_STAR:           pCtx->msrSTAR         = pMsr->u64Value;          break;
    7495                         case MSR_K8_SF_MASK:        pCtx->msrSFMASK       = pMsr->u64Value;          break;
    7496                         case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value;          break;
     7491                        case MSR_K8_LSTAR:          pCtx->msrLSTAR        = pMsrs[i].u64Value;        break;
     7492                        case MSR_K6_STAR:           pCtx->msrSTAR         = pMsrs[i].u64Value;        break;
     7493                        case MSR_K8_SF_MASK:        pCtx->msrSFMASK       = pMsrs[i].u64Value;        break;
     7494                        case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsrs[i].u64Value;        break;
    74977495#endif
    7498                         case MSR_IA32_SPEC_CTRL:    CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value);     break;
    7499                         case MSR_K8_TSC_AUX:        CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);       break;
    7500                         case MSR_K6_EFER:           /* Can't be changed without causing a VM-exit */ break;
    7501 
    75027496                        default:
    75037497                        {
    7504                             pVCpu->hm.s.u32HMError = pMsr->u32Msr;
     7498                            pCtx->fExtrn = 0;
     7499                            pVCpu->hm.s.u32HMError = pMsrs->u32Msr;
    75057500                            ASMSetFlags(fEFlags);
    7506                             AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr,
    7507                                              cMsrs));
     7501                            AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs));
    75087502                            return VERR_HM_UNEXPECTED_LD_ST_MSR;
    75097503                        }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette