Changeset 78287 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Apr 25, 2019 8:16:53 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r78259 r78287 1256 1256 static void hmR0VmxInitVmcsInfo(PVMXVMCSINFO pVmcsInfo) 1257 1257 { 1258 RT_ZERO(*pVmcsInfo);1258 memset(pVmcsInfo, 0, sizeof(*pVmcsInfo)); 1259 1259 1260 1260 Assert(pVmcsInfo->hMemObjVmcs == NIL_RTR0MEMOBJ); … … 1551 1551 * 1552 1552 * @sa HMGetVmxMsrPermission. 1553 * @remarks Can be called with interrupts disabled. 1553 1554 */ 1554 1555 static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, PVMXVMCSINFO pVmcsInfo, bool fIsNstGstVmcs, uint32_t idMsr, uint32_t fMsrpm) … … 1644 1645 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ 1645 1646 uint32_t const cMaxSupportedMsrs = VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc); 1646 if (RT_UNLIKELY(cMsrs >= cMaxSupportedMsrs)) 1647 { 1648 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs)); 1649 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE; 1650 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1651 } 1652 1653 /* Commit the MSR counts to the VMCS and update the cache. */ 1654 int rc = VINF_SUCCESS; 1655 if (pVmcsInfo->cEntryMsrLoad != cMsrs) 1656 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); 1657 if (pVmcsInfo->cExitMsrStore != cMsrs) 1658 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); 1659 if (pVmcsInfo->cExitMsrLoad != cMsrs) 1660 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); 1661 AssertRCReturn(rc, rc); 1662 1663 pVmcsInfo->cEntryMsrLoad = cMsrs; 1664 pVmcsInfo->cExitMsrStore = cMsrs; 1665 pVmcsInfo->cExitMsrLoad = cMsrs; 1666 1667 return VINF_SUCCESS; 1647 if (RT_LIKELY(cMsrs < cMaxSupportedMsrs)) 1648 { 1649 /* Commit the MSR counts to the VMCS and update the cache. */ 1650 if (pVmcsInfo->cEntryMsrLoad != cMsrs) 1651 { 1652 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cMsrs); 1653 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cMsrs); 1654 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cMsrs); 1655 AssertRCReturn(rc, rc); 1656 1657 pVmcsInfo->cEntryMsrLoad = cMsrs; 1658 pVmcsInfo->cExitMsrStore = cMsrs; 1659 pVmcsInfo->cExitMsrLoad = cMsrs; 1660 } 1661 return VINF_SUCCESS; 1662 } 1663 1664 LogRel(("Auto-load/store MSR count exceeded! cMsrs=%u MaxSupported=%u\n", cMsrs, cMaxSupportedMsrs)); 1665 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE; 1666 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 1668 1667 } 1669 1668 … … 1699 1698 for (i = 0; i < cMsrs; i++) 1700 1699 { 1701 if (pGuestMsrLoad ->u32Msr == idMsr)1700 if (pGuestMsrLoad[i].u32Msr == idMsr) 1702 1701 break; 1703 pGuestMsrLoad++;1704 1702 } 1705 1703 … … 1707 1705 if (i == cMsrs) 1708 1706 { 1709 /* The MSR does not exist, bump the MSR coun to make room for the new MSR. */1707 /* The MSR does not exist, bump the MSR count to make room for the new MSR. */ 1710 1708 ++cMsrs; 1711 1709 int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, pVmcsInfo, cMsrs); … … 1721 1719 1722 1720 /* Update the MSR value for the newly added or already existing MSR. */ 1723 pGuestMsrLoad ->u32Msr = idMsr;1724 pGuestMsrLoad ->u64Value = uGuestMsrValue;1721 pGuestMsrLoad[i].u32Msr = idMsr; 1722 pGuestMsrLoad[i].u64Value = uGuestMsrValue; 1725 1723 1726 1724 /* Create the corresponding slot in the VM-exit MSR-store area if we use a different page. */ … … 1728 1726 { 1729 1727 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 1730 pGuestMsrStore += i; 1731 pGuestMsrStore->u32Msr = idMsr; 1732 pGuestMsrStore->u64Value = 0; 1728 pGuestMsrStore[i].u32Msr = idMsr; 1729 pGuestMsrStore[i].u64Value = uGuestMsrValue; 1733 1730 } 1734 1731 1735 1732 /* Update the corresponding slot in the host MSR area. */ 1736 1733 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1737 Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad && pHostMsr != pVmcsInfo->pvGuestMsrStore);1738 pHostMsr += i;1739 pHostMsr ->u32Msr = idMsr;1734 Assert(pHostMsr != pVmcsInfo->pvGuestMsrLoad); 1735 Assert(pHostMsr != pVmcsInfo->pvGuestMsrStore); 1736 pHostMsr[i].u32Msr = idMsr; 1740 1737 1741 1738 /* … … 1751 1748 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1752 1749 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1753 pHostMsr ->u64Value = ASMRdMsr(pHostMsr->u32Msr);1750 pHostMsr[i].u64Value = ASMRdMsr(idMsr); 1754 1751 } 1755 1752 return VINF_SUCCESS; … … 1773 1770 uint32_t cMsrs = pVmcsInfo->cEntryMsrLoad; 1774 1771 1775 bool const fSeparateExitMsrStorePage = hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo);1776 1772 for (uint32_t i = 0; i < cMsrs; i++) 1777 1773 { 1778 1774 /* Find the MSR. */ 1779 if (pGuestMsrLoad->u32Msr == idMsr) 1780 { 1781 /* If it's the last MSR, simply reduce the count. */ 1782 if (i == cMsrs - 1) 1775 if (pGuestMsrLoad[i].u32Msr == idMsr) 1776 { 1777 /* 1778 * If it's the last MSR, we only need to reduce the MSR count. 1779 * If it's -not- the last MSR, copy the last MSR in place of it and reduce the MSR count. 1780 */ 1781 if (i < cMsrs - 1) 1783 1782 { 1784 --cMsrs; 1785 break; 1783 /* Remove it from the VM-entry MSR-load area. */ 1784 pGuestMsrLoad[i].u32Msr = pGuestMsrLoad[cMsrs - 1].u32Msr; 1785 pGuestMsrLoad[i].u64Value = pGuestMsrLoad[cMsrs - 1].u64Value; 1786 1787 /* Remove it from the VM-exit MSR-store area if it's in a different page. */ 1788 if (hmR0VmxIsSeparateExitMsrStoreAreaVmcs(pVmcsInfo)) 1789 { 1790 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 1791 Assert(pGuestMsrStore[i].u32Msr == idMsr); 1792 pGuestMsrStore[i].u32Msr = pGuestMsrStore[cMsrs - 1].u32Msr; 1793 pGuestMsrStore[i].u64Value = pGuestMsrStore[cMsrs - 1].u64Value; 1794 } 1795 1796 /* Remove it from the VM-exit MSR-load area. */ 1797 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1798 Assert(pHostMsr[i].u32Msr == idMsr); 1799 pHostMsr[i].u32Msr = pHostMsr[cMsrs - 1].u32Msr; 1800 pHostMsr[i].u64Value = pHostMsr[cMsrs - 1].u64Value; 1786 1801 } 1787 1802 1788 /* Remove it by copying the last MSR in place of it, and reducing the count. */ 1789 PVMXAUTOMSR pLastGuestMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 1790 pLastGuestMsrLoad += cMsrs - 1; 1791 pGuestMsrLoad->u32Msr = pLastGuestMsrLoad->u32Msr; 1792 pGuestMsrLoad->u64Value = pLastGuestMsrLoad->u64Value; 1793 1794 /* Remove it from the VM-exit MSR-store area if we are using a different page. */ 1795 if (fSeparateExitMsrStorePage) 1796 { 1797 PVMXAUTOMSR pGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 1798 PVMXAUTOMSR pLastGuestMsrStore = (PVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 1799 pGuestMsrStore += i; 1800 pLastGuestMsrStore += cMsrs - 1; 1801 Assert(pGuestMsrStore->u32Msr == idMsr); 1802 pGuestMsrStore->u32Msr = pLastGuestMsrStore->u32Msr; 1803 pGuestMsrStore->u64Value = pLastGuestMsrStore->u64Value; 1804 } 1805 1806 /* Remove it from the VM-exit MSR-load area. */ 1807 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1808 PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1809 pHostMsr += i; 1810 pLastHostMsr += cMsrs - 1; 1811 Assert(pHostMsr->u32Msr == idMsr); 1812 pHostMsr->u32Msr = pLastHostMsr->u32Msr; 1813 pHostMsr->u64Value = pLastHostMsr->u64Value; 1803 /* Reduce the count to reflect the removed MSR and bail. */ 1814 1804 --cMsrs; 1815 1805 break; 1816 1806 } 1817 pGuestMsrLoad++; 1818 } 1819 1820 /* Update the VMCS if the count changed (meaning the MSR was found). */ 1807 } 1808 1809 /* Update the VMCS if the count changed (meaning the MSR was found and removed). */ 1821 1810 if (cMsrs != pVmcsInfo->cEntryMsrLoad) 1822 1811 { … … 1845 1834 static bool hmR0VmxIsAutoLoadGuestMsr(PCVMXVMCSINFO pVmcsInfo, uint32_t idMsr) 1846 1835 { 1847 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 1848 uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad; 1836 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 1837 uint32_t const cMsrs = pVmcsInfo->cEntryMsrLoad; 1838 Assert(pMsrs); 1839 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE); 1849 1840 for (uint32_t i = 0; i < cMsrs; i++) 1850 1841 { 1851 if (p GuestMsrLoad->u32Msr == idMsr)1842 if (pMsrs[i].u32Msr == idMsr) 1852 1843 return true; 1853 pGuestMsrLoad++;1854 1844 } 1855 1845 return false; … … 1867 1857 static void hmR0VmxUpdateAutoLoadHostMsrs(PCVMCPU pVCpu, PCVMXVMCSINFO pVmcsInfo) 1868 1858 { 1859 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1860 1869 1861 PVMXAUTOMSR pHostMsrLoad = (PVMXAUTOMSR)pVmcsInfo->pvHostMsrLoad; 1870 1862 uint32_t const cMsrs = pVmcsInfo->cExitMsrLoad; 1871 1872 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1873 1863 Assert(pHostMsrLoad); 1874 1875 for (uint32_t i = 0; i < cMsrs; i++ , pHostMsrLoad++)1864 Assert(sizeof(*pHostMsrLoad) * cMsrs <= X86_PAGE_4K_SIZE); 1865 for (uint32_t i = 0; i < cMsrs; i++) 1876 1866 { 1877 1867 /* … … 1879 1869 * Strict builds will catch mismatches in hmR0VmxCheckAutoLoadStoreMsrs(). See @bugref{7368}. 1880 1870 */ 1881 if (pHostMsrLoad ->u32Msr == MSR_K6_EFER)1882 pHostMsrLoad ->u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer;1871 if (pHostMsrLoad[i].u32Msr == MSR_K6_EFER) 1872 pHostMsrLoad[i].u64Value = pVCpu->CTX_SUFF(pVM)->hm.s.vmx.u64HostMsrEfer; 1883 1873 else 1884 pHostMsrLoad ->u64Value = ASMRdMsr(pHostMsrLoad->u32Msr);1874 pHostMsrLoad[i].u64Value = ASMRdMsr(pHostMsrLoad[i].u32Msr); 1885 1875 } 1886 1876 } … … 2149 2139 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2150 2140 2151 /* Verify MSR counts in the VMCS are what we think it should be. */ 2152 uint32_t cMsrs; 2153 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs); 2154 AssertRC(rc); 2155 Assert(cMsrs == pVmcsInfo->cEntryMsrLoad); 2156 2157 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs); 2158 AssertRC(rc); 2159 Assert(cMsrs == pVmcsInfo->cExitMsrStore); 2160 2161 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs); 2162 AssertRC(rc); 2163 Assert(cMsrs == pVmcsInfo->cExitMsrLoad); 2141 /* Read the various MSR-area counts from the VMCS. */ 2142 uint32_t cEntryLoadMsrs; 2143 uint32_t cExitStoreMsrs; 2144 uint32_t cExitLoadMsrs; 2145 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cEntryLoadMsrs); AssertRC(rc); 2146 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cExitStoreMsrs); AssertRC(rc); 2147 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cExitLoadMsrs); AssertRC(rc); 2148 2149 /* Verify all the MSR counts are the same. */ 2150 Assert(cEntryLoadMsrs == cExitStoreMsrs); 2151 Assert(cExitStoreMsrs == cExitLoadMsrs); 2152 uint32_t const cMsrs = cExitLoadMsrs; 2164 2153 2165 2154 /* Verify the MSR counts do not exceed the maximum count supported by the hardware. */ 2166 2155 Assert(cMsrs < VMX_MISC_MAX_MSRS(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc)); 2167 2156 2157 /* Verify the MSR counts are within the allocated page size. */ 2158 Assert(sizeof(VMXAUTOMSR) * cMsrs <= X86_PAGE_4K_SIZE); 2159 2160 /* Verify the relevant contents of the MSR areas match. */ 2168 2161 PCVMXAUTOMSR pGuestMsrLoad = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrLoad; 2169 2162 PCVMXAUTOMSR pGuestMsrStore = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; … … 2192 2185 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 2193 2186 { 2194 uint32_t fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr);2187 uint32_t const fMsrpm = HMGetVmxMsrPermission(pVmcsInfo->pvMsrBitmap, pGuestMsrLoad->u32Msr); 2195 2188 if (pGuestMsrLoad->u32Msr == MSR_K6_EFER) 2196 2189 { … … 2827 2820 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, HCPhysGuestMsrLoad); 2828 2821 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, HCPhysGuestMsrStore); 2829 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad);2822 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, HCPhysHostMsrLoad); 2830 2823 AssertRCReturn(rc, rc); 2831 2824 return VINF_SUCCESS; … … 7482 7475 ) 7483 7476 { 7484 PCVMXAUTOMSR pMsr 7477 PCVMXAUTOMSR pMsrs = (PCVMXAUTOMSR)pVmcsInfo->pvGuestMsrStore; 7485 7478 uint32_t const cMsrs = pVmcsInfo->cExitMsrStore; 7486 Assert( cMsrs == 0 || pMsr != NULL);7479 Assert(pMsrs); 7487 7480 Assert(cMsrs <= VMX_MISC_MAX_MSRS(pVM->hm.s.vmx.Msrs.u64Misc)); 7488 for (uint32_t i = 0; i < cMsrs; i++, pMsr++) 7481 Assert(sizeof(*pMsrs) * cMsrs <= X86_PAGE_4K_SIZE); 7482 for (uint32_t i = 0; i < cMsrs; i++) 7489 7483 { 7490 switch (pMsr->u32Msr) 7484 uint32_t const idMsr = pMsrs[i].u32Msr; 7485 switch (idMsr) 7491 7486 { 7487 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsrs[i].u64Value); break; 7488 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsrs[i].u64Value); break; 7489 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break; 7492 7490 #if HC_ARCH_BITS == 32 7493 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr ->u64Value;break;7494 case MSR_K6_STAR: pCtx->msrSTAR = pMsr ->u64Value;break;7495 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr ->u64Value;break;7496 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr ->u64Value;break;7491 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsrs[i].u64Value; break; 7492 case MSR_K6_STAR: pCtx->msrSTAR = pMsrs[i].u64Value; break; 7493 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsrs[i].u64Value; break; 7494 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsrs[i].u64Value; break; 7497 7495 #endif 7498 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;7499 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break;7500 case MSR_K6_EFER: /* Can't be changed without causing a VM-exit */ break;7501 7502 7496 default: 7503 7497 { 7504 pVCpu->hm.s.u32HMError = pMsr->u32Msr; 7498 pCtx->fExtrn = 0; 7499 pVCpu->hm.s.u32HMError = pMsrs->u32Msr; 7505 7500 ASMSetFlags(fEFlags); 7506 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, 7507 cMsrs)); 7501 AssertMsgFailed(("Unexpected MSR in auto-load/store area. idMsr=%#RX32 cMsrs=%u\n", idMsr, cMsrs)); 7508 7502 return VERR_HM_UNEXPECTED_LD_ST_MSR; 7509 7503 }
Note:
See TracChangeset
for help on using the changeset viewer.