Changeset 72358 in vbox
- Timestamp:
- May 28, 2018 2:47:51 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r72065 r72358 1173 1173 uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx); 1174 1174 VMMDECL(uint64_t) CPUMGetGuestEFER(PVMCPU pVCpu); 1175 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PVMCPU pVCpu); 1176 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu); 1175 1177 VMMDECL(VBOXSTRICTRC) CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue); 1176 1178 VMMDECL(VBOXSTRICTRC) CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue); … … 1679 1681 VMMDECL(PCCPUMCTXCORE) CPUMGetHyperCtxCore(PVMCPU pVCpu); 1680 1682 VMMDECL(PCPUMCTX) CPUMQueryGuestCtxPtr(PVMCPU pVCpu); 1683 VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu); 1681 1684 VMMDECL(PCCPUMCTXCORE) CPUMGetGuestCtxCore(PVMCPU pVCpu); 1682 1685 VMM_INT_DECL(int) CPUMRawEnter(PVMCPU pVCpu); -
trunk/include/iprt/nt/hyperv.h
r72307 r72358 849 849 AssertCompile(HvX64RegisterGdtr == 0x00070001); 850 850 AssertCompile(HvX64RegisterInitialApicId == 0x0008000c); 851 AssertCompile(HvX64RegisterMtrrCap == 0x0008000d); 851 852 AssertCompile(HvX64RegisterMtrrDefType == 0x0008000e); 852 853 AssertCompile(HvX64RegisterMtrrPhysBaseF == 0x0008001f); -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r72208 r72358 232 232 233 233 234 /** 235 * Get fixed IA32_FEATURE_CONTROL value for NEM and cpumMsrRd_Ia32FeatureControl. 236 * 237 * @returns Fixed IA32_FEATURE_CONTROL value. 238 * @param pVCpu The cross context per CPU structure. 239 */ 240 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu) 241 { 242 RT_NOREF_PV(pVCpu); 243 return 1; /* Locked, no VT-X, no SYSENTER micromanagement. */ 244 } 245 234 246 /** @callback_method_impl{FNCPUMRDMSR} */ 235 247 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 236 248 { 237 RT_NOREF_PV( pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);238 *puValue = 1; /* Locked, no VT-X, no SYSENTER micromanagement. */249 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 250 *puValue = CPUMGetGuestIa32FeatureControl(pVCpu); 239 251 return VINF_SUCCESS; 240 252 } … … 384 396 385 397 386 /** @callback_method_impl{FNCPUMRDMSR} */ 387 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32MtrrCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 388 { 389 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 398 /** 399 * Get fixed IA32_MTRR_CAP value for NEM and cpumMsrRd_Ia32MtrrCap. 400 * 401 * @returns Fixed IA32_MTRR_CAP value. 402 * @param pVCpu The cross context per CPU structure. 403 */ 404 VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PVMCPU pVCpu) 405 { 406 RT_NOREF_PV(pVCpu); 390 407 391 408 /* This is currently a bit weird. :-) */ … … 394 411 bool const fFixedRangeRegisters = false; 395 412 bool const fWriteCombiningType = false; 396 *puValue = cVariableRangeRegs 397 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0) 398 | (fWriteCombiningType ? RT_BIT_64(10) : 0) 399 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0); 413 return cVariableRangeRegs 414 | (fFixedRangeRegisters ? RT_BIT_64(8) : 0) 415 | (fWriteCombiningType ? RT_BIT_64(10) : 0) 416 | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0); 417 } 418 419 /** @callback_method_impl{FNCPUMRDMSR} */ 420 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32MtrrCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 421 { 422 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 423 *puValue = CPUMGetGuestIa32MtrrCap(pVCpu); 400 424 return VINF_SUCCESS; 401 425 } -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r72129 r72358 577 577 } 578 578 579 580 /** 581 * Queries the pointer to the internal CPUMCTXMSRS structure. 582 * 583 * This is for NEM only. 584 * 585 * @returns The CPUMCTX pointer. 586 * @param pVCpu The cross context virtual CPU structure. 587 */ 588 VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu) 589 { 590 return &pVCpu->cpum.s.GuestMsrs; 591 } 592 593 579 594 VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit) 580 595 { … … 589 604 } 590 605 606 591 607 VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit) 592 608 { … … 601 617 } 602 618 619 603 620 VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr) 604 621 { … … 611 628 return VINF_SUCCESS; /* formality, consider it void. */ 612 629 } 630 613 631 614 632 VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr) -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r72308 r72358 1709 1709 * Deals with unrecoverable exception (triple fault). 1710 1710 * 1711 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up 1712 * here too. So we'll leave it to IEM to decide. 1713 * 1711 1714 * @returns Strict VBox status code. 1712 1715 * @param pVCpu The cross context per CPU structure. 1713 1716 * @param pMsgHdr The message header. 1714 1717 * @param pCtx The register context. 1718 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3). 1715 1719 */ 1716 1720 NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu, 1717 1721 HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr, 1718 PCPUMCTX pCtx )1722 PCPUMCTX pCtx, PGVMCPU pGVCpu) 1719 1723 { 1720 1724 /* … … 1726 1730 AssertMsg(pMsgHdr->InstructionLength < 0x10, ("%#x\n", pMsgHdr->InstructionLength)); 1727 1731 1732 #if 0 1728 1733 /* 1729 1734 * Just copy the state we've got and handle it in the loop for now. … … 1733 1738 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags)); 1734 1739 return VINF_EM_TRIPLE_FAULT; 1740 #else 1741 /* 1742 * Let IEM decide whether this is really it. 1743 */ 1744 /** @todo check if this happens becaused of incorrectly pending interrupts of smth. */ 1745 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr); 1746 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit"); 1747 if (rcStrict == VINF_SUCCESS) 1748 { 1749 rcStrict = IEMExecOne(pVCpu); 1750 if (rcStrict == VINF_SUCCESS) 1751 { 1752 Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> VINF_SUCCESS\n", 1753 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags )); 1754 return VINF_SUCCESS; 1755 } 1756 if (rcStrict == VINF_EM_TRIPLE_FAULT) 1757 Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", 1758 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 1759 else 1760 Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", 1761 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 1762 } 1763 else 1764 Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> %Rrc (state import)\n", 1765 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) )); 1766 return rcStrict; 1767 #endif 1735 1768 } 1736 1769 … … 1792 1825 case HvMessageTypeUnrecoverableException: 1793 1826 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader)); 1794 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx); 1827 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable); 1828 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx, pGVCpu); 1795 1829 1796 1830 case HvMessageTypeInvalidVpRegisterValue: -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r72343 r72358 1093 1093 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT; 1094 1094 iReg++; 1095 #if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */ 1096 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1097 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap; 1098 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu); 1099 iReg++; 1100 #endif 1101 1102 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1103 1104 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1105 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType; 1106 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType; 1107 iReg++; 1108 1109 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */ 1110 1111 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1112 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000; 1113 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000; 1114 iReg++; 1115 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1116 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000; 1117 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000; 1118 iReg++; 1119 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1120 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000; 1121 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000; 1122 iReg++; 1123 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1124 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000; 1125 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000; 1126 iReg++; 1127 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1128 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000; 1129 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000; 1130 iReg++; 1131 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1132 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000; 1133 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000; 1134 iReg++; 1135 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1136 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000; 1137 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000; 1138 iReg++; 1139 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1140 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000; 1141 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000; 1142 iReg++; 1143 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1144 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000; 1145 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000; 1146 iReg++; 1147 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1148 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000; 1149 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000; 1150 iReg++; 1151 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1152 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000; 1153 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000; 1154 iReg++; 1155 1156 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM); 1157 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 1158 { 1159 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1160 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable; 1161 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable; 1162 iReg++; 1163 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]); 1164 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl; 1165 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu); 1166 iReg++; 1167 } 1095 1168 } 1096 1169 … … 1383 1456 } 1384 1457 1458 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM); 1385 1459 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 1386 1460 { 1387 1461 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE 1388 1462 pInput->Names[iReg++] = HvX64RegisterPat; 1463 #if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */ 1464 pInput->Names[iReg++] = HvX64RegisterMtrrCap; 1465 #endif 1466 pInput->Names[iReg++] = HvX64RegisterMtrrDefType; 1467 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000; 1468 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000; 1469 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000; 1470 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000; 1471 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000; 1472 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000; 1473 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000; 1474 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000; 1475 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000; 1476 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000; 1477 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000; 1478 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 1479 { 1480 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable; 1481 #ifdef LOG_ENABLED 1482 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl; 1483 #endif 1484 } 1389 1485 } 1390 1486 … … 1784 1880 if (paValues[iReg].Reg64 != pCtx->msrEFER) 1785 1881 { 1882 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64)); 1786 1883 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE) 1787 1884 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE)); … … 1794 1891 { 1795 1892 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase); 1893 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64) 1894 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64)); 1796 1895 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64; 1797 1896 iReg++; … … 1800 1899 { 1801 1900 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs); 1901 if (pCtx->SysEnter.cs != paValues[iReg].Reg64) 1902 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64)); 1802 1903 pCtx->SysEnter.cs = paValues[iReg].Reg64; 1803 1904 iReg++; 1905 1804 1906 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip); 1907 if (pCtx->SysEnter.eip != paValues[iReg].Reg64) 1908 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64)); 1805 1909 pCtx->SysEnter.eip = paValues[iReg].Reg64; 1806 1910 iReg++; 1911 1807 1912 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp); 1913 if (pCtx->SysEnter.esp != paValues[iReg].Reg64) 1914 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64)); 1808 1915 pCtx->SysEnter.esp = paValues[iReg].Reg64; 1809 1916 iReg++; … … 1812 1919 { 1813 1920 Assert(pInput->Names[iReg] == HvX64RegisterStar); 1921 if (pCtx->msrSTAR != paValues[iReg].Reg64) 1922 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64)); 1814 1923 pCtx->msrSTAR = paValues[iReg].Reg64; 1815 1924 iReg++; 1925 1816 1926 Assert(pInput->Names[iReg] == HvX64RegisterLstar); 1927 if (pCtx->msrLSTAR != paValues[iReg].Reg64) 1928 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64)); 1817 1929 pCtx->msrLSTAR = paValues[iReg].Reg64; 1818 1930 iReg++; 1931 1819 1932 Assert(pInput->Names[iReg] == HvX64RegisterCstar); 1933 if (pCtx->msrCSTAR != paValues[iReg].Reg64) 1934 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64)); 1820 1935 pCtx->msrCSTAR = paValues[iReg].Reg64; 1821 1936 iReg++; 1937 1822 1938 Assert(pInput->Names[iReg] == HvX64RegisterSfmask); 1939 if (pCtx->msrSFMASK != paValues[iReg].Reg64) 1940 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64)); 1823 1941 pCtx->msrSFMASK = paValues[iReg].Reg64; 1824 1942 iReg++; … … 1827 1945 { 1828 1946 Assert(pInput->Names[iReg] == HvX64RegisterApicBase); 1829 if (paValues[iReg].Reg64 != APICGetBaseMsrNoCheck(pVCpu)) 1830 { 1947 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu); 1948 if (paValues[iReg].Reg64 != uOldBase) 1949 { 1950 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n", 1951 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase)); 1831 1952 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64); 1832 1953 Assert(rc2 == VINF_SUCCESS); NOREF(rc2); … … 1835 1956 1836 1957 Assert(pInput->Names[iReg] == HvX64RegisterPat); 1958 if (pCtx->msrPAT != paValues[iReg].Reg64) 1959 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64)); 1837 1960 pCtx->msrPAT = paValues[iReg].Reg64; 1838 1961 iReg++; 1962 1963 #if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */ 1964 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap); 1965 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu)) 1966 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64)); 1967 iReg++; 1968 #endif 1969 1970 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1971 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType); 1972 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType ) 1973 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64)); 1974 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64; 1975 iReg++; 1976 1977 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */ 1978 1979 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000); 1980 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 ) 1981 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64)); 1982 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64; 1983 iReg++; 1984 1985 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000); 1986 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 ) 1987 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64)); 1988 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64; 1989 iReg++; 1990 1991 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000); 1992 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 ) 1993 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64)); 1994 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64; 1995 iReg++; 1996 1997 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000); 1998 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 ) 1999 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64)); 2000 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64; 2001 iReg++; 2002 2003 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000); 2004 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 ) 2005 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64)); 2006 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64; 2007 iReg++; 2008 2009 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000); 2010 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 ) 2011 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64)); 2012 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64; 2013 iReg++; 2014 2015 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000); 2016 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 ) 2017 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64)); 2018 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64; 2019 iReg++; 2020 2021 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000); 2022 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 ) 2023 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64)); 2024 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64; 2025 iReg++; 2026 2027 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000); 2028 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 ) 2029 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64)); 2030 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64; 2031 iReg++; 2032 2033 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000); 2034 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 ) 2035 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64)); 2036 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64; 2037 iReg++; 2038 2039 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000); 2040 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 ) 2041 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64)); 2042 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64; 2043 iReg++; 2044 2045 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 2046 { 2047 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable); 2048 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable) 2049 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64)); 2050 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64; 2051 iReg++; 2052 #ifdef LOG_ENABLED 2053 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl); 2054 if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu)) 2055 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64)); 2056 iReg++; 2057 #endif 2058 } 2059 2060 /** @todo we don't save state for HvX64RegisterIa32FeatureControl */ 1839 2061 } 1840 2062 -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r72308 r72358 1147 1147 STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits", "/NEM/CPU%u/ExitCpuId", iCpu); 1148 1148 STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits", "/NEM/CPU%u/ExitMsr", iCpu); 1149 STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits", "/NEM/CPU%u/ExitUnrecoverable", iCpu); 1149 1150 STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts", "/NEM/CPU%u/GetMsgTimeout", iCpu); 1150 1151 STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops", "/NEM/CPU%u/StopCpuSuccess", iCpu); … … 1344 1345 */ 1345 1346 /** @todo HvCallMapStatsPage */ 1347 1348 /* 1349 * Adjust features. 1350 */ 1351 /** @todo Figure out how to get X2APIC working on AMD (and possible 1352 * intel), but first figure how to disable it dynamically. */ 1353 /*CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);*/ 1354 1346 1355 return VINF_SUCCESS; 1347 1356 } … … 2384 2393 * Here are some observations (mostly against build 17101): 2385 2394 * 2386 * - The VMEXIT performance is dismal (build 171 01).2395 * - The VMEXIT performance is dismal (build 17134). 2387 2396 * 2388 2397 * Our proof of concept implementation with a kernel runloop (i.e. not using … … 2413 2422 * there will only be real gains if the exitting instructions are tightly 2414 2423 * packed. 2424 * 2425 * 2426 * - Unable to access WHvX64RegisterMsrMtrrCap on AMD Ryzen (build 17134). 2427 * 2428 * 2429 * - On AMD Ryzen grub/debian 9.0 ends up with a unrecoverable exception 2430 * when IA32_MTRR_PHYSMASK0 is written. 2431 * 2432 * 2433 * - Need to figure out how to emulate X2APIC (AMD Ryzen), doesn't work with 2434 * debian 9.0/64. 2415 2435 * 2416 2436 * -
trunk/src/VBox/VMM/include/NEMInternal.h
r72343 r72358 245 245 STAMCOUNTER StatExitCpuId; 246 246 STAMCOUNTER StatExitMsr; 247 STAMCOUNTER StatExitUnrecoverable; 247 248 STAMCOUNTER StatGetMsgTimeout; 248 249 STAMCOUNTER StatStopCpuSuccess;
Note:
See TracChangeset
for help on using the changeset viewer.