VirtualBox

Changeset 72358 in vbox


Ignore:
Timestamp:
May 28, 2018 2:47:51 PM (7 years ago)
Author:
vboxsync
Message:

NEM: Sync more MSR state; don't treat unrecoverable exceptions as triple fault because checking with IEM (need more checking). bugref:9044

Location:
trunk
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r72065 r72358  
    11731173                                      uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
    11741174VMMDECL(uint64_t)   CPUMGetGuestEFER(PVMCPU pVCpu);
     1175VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32MtrrCap(PVMCPU pVCpu);
     1176VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu);
    11751177VMMDECL(VBOXSTRICTRC)   CPUMQueryGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t *puValue);
    11761178VMMDECL(VBOXSTRICTRC)   CPUMSetGuestMsr(PVMCPU pVCpu, uint32_t idMsr, uint64_t uValue);
     
    16791681VMMDECL(PCCPUMCTXCORE)  CPUMGetHyperCtxCore(PVMCPU pVCpu);
    16801682VMMDECL(PCPUMCTX)       CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
     1683VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
    16811684VMMDECL(PCCPUMCTXCORE)  CPUMGetGuestCtxCore(PVMCPU pVCpu);
    16821685VMM_INT_DECL(int)       CPUMRawEnter(PVMCPU pVCpu);
  • trunk/include/iprt/nt/hyperv.h

    r72307 r72358  
    849849AssertCompile(HvX64RegisterGdtr == 0x00070001);
    850850AssertCompile(HvX64RegisterInitialApicId == 0x0008000c);
     851AssertCompile(HvX64RegisterMtrrCap == 0x0008000d);
    851852AssertCompile(HvX64RegisterMtrrDefType == 0x0008000e);
    852853AssertCompile(HvX64RegisterMtrrPhysBaseF == 0x0008001f);
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r72208 r72358  
    232232
    233233
     234/**
     235 * Get fixed IA32_FEATURE_CONTROL value for NEM and cpumMsrRd_Ia32FeatureControl.
     236 *
     237 * @returns Fixed IA32_FEATURE_CONTROL value.
     238 * @param   pVCpu           The cross context per CPU structure.
     239 */
     240VMM_INT_DECL(uint64_t) CPUMGetGuestIa32FeatureControl(PVMCPU pVCpu)
     241{
     242    RT_NOREF_PV(pVCpu);
     243    return 1; /* Locked, no VT-X, no SYSENTER micromanagement. */
     244}
     245
    234246/** @callback_method_impl{FNCPUMRDMSR} */
    235247static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32FeatureControl(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
    236248{
    237     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
    238     *puValue = 1; /* Locked, no VT-X, no SYSENTER micromanagement. */
     249    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
     250    *puValue = CPUMGetGuestIa32FeatureControl(pVCpu);
    239251    return VINF_SUCCESS;
    240252}
     
    384396
    385397
    386 /** @callback_method_impl{FNCPUMRDMSR} */
    387 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32MtrrCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
    388 {
    389     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
     398/**
     399 * Get fixed IA32_MTRR_CAP value for NEM and cpumMsrRd_Ia32MtrrCap.
     400 *
     401 * @returns Fixed IA32_MTRR_CAP value.
     402 * @param   pVCpu           The cross context per CPU structure.
     403 */
     404VMM_INT_DECL(uint64_t) CPUMGetGuestIa32MtrrCap(PVMCPU pVCpu)
     405{
     406    RT_NOREF_PV(pVCpu);
    390407
    391408    /* This is currently a bit weird. :-) */
     
    394411    bool const      fFixedRangeRegisters            = false;
    395412    bool const      fWriteCombiningType             = false;
    396     *puValue = cVariableRangeRegs
    397              | (fFixedRangeRegisters            ? RT_BIT_64(8)  : 0)
    398              | (fWriteCombiningType             ? RT_BIT_64(10) : 0)
    399              | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
     413    return cVariableRangeRegs
     414         | (fFixedRangeRegisters            ? RT_BIT_64(8)  : 0)
     415         | (fWriteCombiningType             ? RT_BIT_64(10) : 0)
     416         | (fSystemManagementRangeRegisters ? RT_BIT_64(11) : 0);
     417}
     418
     419/** @callback_method_impl{FNCPUMRDMSR} */
     420static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_Ia32MtrrCap(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
     421{
     422    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
     423    *puValue = CPUMGetGuestIa32MtrrCap(pVCpu);
    400424    return VINF_SUCCESS;
    401425}
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r72129 r72358  
    577577}
    578578
     579
     580/**
     581 * Queries the pointer to the internal CPUMCTXMSRS structure.
     582 *
     583 * This is for NEM only.
     584 *
     585 * @returns The CPUMCTX pointer.
     586 * @param   pVCpu       The cross context virtual CPU structure.
     587 */
     588VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu)
     589{
     590    return &pVCpu->cpum.s.GuestMsrs;
     591}
     592
     593
    579594VMMDECL(int) CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
    580595{
     
    589604}
    590605
     606
    591607VMMDECL(int) CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit)
    592608{
     
    601617}
    602618
     619
    603620VMMDECL(int) CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr)
    604621{
     
    611628    return VINF_SUCCESS; /* formality, consider it void. */
    612629}
     630
    613631
    614632VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr)
  • trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h

    r72308 r72358  
    17091709 * Deals with unrecoverable exception (triple fault).
    17101710 *
     1711 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
     1712 * here too.  So we'll leave it to IEM to decide.
     1713 *
    17111714 * @returns Strict VBox status code.
    17121715 * @param   pVCpu           The cross context per CPU structure.
    17131716 * @param   pMsgHdr         The message header.
    17141717 * @param   pCtx            The register context.
     1718 * @param   pGVCpu          The global (ring-0) per CPU structure (NULL in r3).
    17151719 */
    17161720NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu,
    17171721                                                                         HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr,
    1718                                                                          PCPUMCTX pCtx)
     1722                                                                         PCPUMCTX pCtx, PGVMCPU pGVCpu)
    17191723{
    17201724    /*
     
    17261730    AssertMsg(pMsgHdr->InstructionLength < 0x10, ("%#x\n", pMsgHdr->InstructionLength));
    17271731
     1732#if 0
    17281733    /*
    17291734     * Just copy the state we've got and handle it in the loop for now.
     
    17331738         pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags));
    17341739    return VINF_EM_TRIPLE_FAULT;
     1740#else
     1741    /*
     1742     * Let IEM decide whether this is really it.
     1743     */
     1744/** @todo check if this happens becaused of incorrectly pending interrupts of smth. */
     1745    nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
     1746    VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
     1747    if (rcStrict == VINF_SUCCESS)
     1748    {
     1749        rcStrict = IEMExecOne(pVCpu);
     1750        if (rcStrict == VINF_SUCCESS)
     1751        {
     1752            Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> VINF_SUCCESS\n",
     1753                 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags ));
     1754            return VINF_SUCCESS;
     1755        }
     1756        if (rcStrict == VINF_EM_TRIPLE_FAULT)
     1757            Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n",
     1758                 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
     1759        else
     1760            Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> %Rrc (IEMExecOne)\n",
     1761                 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
     1762    }
     1763    else
     1764        Log(("UnrecovExit/%u: %04x:%08RX64: RFL=%#RX64 -> %Rrc (state import)\n",
     1765             pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
     1766    return rcStrict;
     1767#endif
    17351768}
    17361769
     
    17921825            case HvMessageTypeUnrecoverableException:
    17931826                Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
    1794                 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx);
     1827                STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
     1828                return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx, pGVCpu);
    17951829
    17961830            case HvMessageTypeInvalidVpRegisterValue:
  • trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp

    r72343 r72358  
    10931093        pInput->Elements[iReg].Value.Reg64          = pCtx->msrPAT;
    10941094        iReg++;
     1095#if 0 /** @todo HvX64RegisterMtrrCap is read only?  Seems it's not even readable. */
     1096        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1097        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrCap;
     1098        pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestIa32MtrrCap(pVCpu);
     1099        iReg++;
     1100#endif
     1101
     1102        PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
     1103
     1104        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1105        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrDefType;
     1106        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrDefType;
     1107        iReg++;
     1108
     1109        /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
     1110
     1111        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1112        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix64k00000;
     1113        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix64K_00000;
     1114        iReg++;
     1115        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1116        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix16k80000;
     1117        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix16K_80000;
     1118        iReg++;
     1119        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1120        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix16kA0000;
     1121        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix16K_A0000;
     1122        iReg++;
     1123        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1124        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kC0000;
     1125        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_C0000;
     1126        iReg++;
     1127        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1128        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kC8000;
     1129        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_C8000;
     1130        iReg++;
     1131        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1132        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kD0000;
     1133        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_D0000;
     1134        iReg++;
     1135        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1136        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kD8000;
     1137        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_D8000;
     1138        iReg++;
     1139        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1140        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kE0000;
     1141        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_E0000;
     1142        iReg++;
     1143        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1144        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kE8000;
     1145        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_E8000;
     1146        iReg++;
     1147        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1148        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kF0000;
     1149        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_F0000;
     1150        iReg++;
     1151        HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1152        pInput->Elements[iReg].Name                 = HvX64RegisterMtrrFix4kF8000;
     1153        pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MtrrFix4K_F8000;
     1154        iReg++;
     1155
     1156        const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
     1157        if (enmCpuVendor != CPUMCPUVENDOR_AMD)
     1158        {
     1159            HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1160            pInput->Elements[iReg].Name                 = HvX64RegisterIa32MiscEnable;
     1161            pInput->Elements[iReg].Value.Reg64          = pCtxMsrs->msr.MiscEnable;
     1162            iReg++;
     1163            HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
     1164            pInput->Elements[iReg].Name                 = HvX64RegisterIa32FeatureControl;
     1165            pInput->Elements[iReg].Value.Reg64          = CPUMGetGuestIa32FeatureControl(pVCpu);
     1166            iReg++;
     1167        }
    10951168    }
    10961169
     
    13831456    }
    13841457
     1458    const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
    13851459    if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
    13861460    {
    13871461        pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
    13881462        pInput->Names[iReg++] = HvX64RegisterPat;
     1463#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
     1464        pInput->Names[iReg++] = HvX64RegisterMtrrCap;
     1465#endif
     1466        pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
     1467        pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
     1468        pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
     1469        pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
     1470        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
     1471        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
     1472        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
     1473        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
     1474        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
     1475        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
     1476        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
     1477        pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
     1478        if (enmCpuVendor != CPUMCPUVENDOR_AMD)
     1479        {
     1480            pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
     1481#ifdef LOG_ENABLED
     1482            pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
     1483#endif
     1484        }
    13891485    }
    13901486
     
    17841880        if (paValues[iReg].Reg64 != pCtx->msrEFER)
    17851881        {
     1882            Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
    17861883            if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
    17871884                PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
     
    17941891    {
    17951892        Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
     1893        if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
     1894            Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
    17961895        pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
    17971896        iReg++;
     
    18001899    {
    18011900        Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
     1901        if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
     1902            Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
    18021903        pCtx->SysEnter.cs = paValues[iReg].Reg64;
    18031904        iReg++;
     1905
    18041906        Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
     1907        if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
     1908            Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
    18051909        pCtx->SysEnter.eip = paValues[iReg].Reg64;
    18061910        iReg++;
     1911
    18071912        Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
     1913        if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
     1914            Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
    18081915        pCtx->SysEnter.esp = paValues[iReg].Reg64;
    18091916        iReg++;
     
    18121919    {
    18131920        Assert(pInput->Names[iReg] == HvX64RegisterStar);
     1921        if (pCtx->msrSTAR != paValues[iReg].Reg64)
     1922            Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
    18141923        pCtx->msrSTAR   = paValues[iReg].Reg64;
    18151924        iReg++;
     1925
    18161926        Assert(pInput->Names[iReg] == HvX64RegisterLstar);
     1927        if (pCtx->msrLSTAR != paValues[iReg].Reg64)
     1928            Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
    18171929        pCtx->msrLSTAR  = paValues[iReg].Reg64;
    18181930        iReg++;
     1931
    18191932        Assert(pInput->Names[iReg] == HvX64RegisterCstar);
     1933        if (pCtx->msrCSTAR != paValues[iReg].Reg64)
     1934            Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
    18201935        pCtx->msrCSTAR  = paValues[iReg].Reg64;
    18211936        iReg++;
     1937
    18221938        Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
     1939        if (pCtx->msrSFMASK != paValues[iReg].Reg64)
     1940            Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
    18231941        pCtx->msrSFMASK = paValues[iReg].Reg64;
    18241942        iReg++;
     
    18271945    {
    18281946        Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
    1829         if (paValues[iReg].Reg64 != APICGetBaseMsrNoCheck(pVCpu))
    1830         {
     1947        const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
     1948        if (paValues[iReg].Reg64 != uOldBase)
     1949        {
     1950            Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
     1951                  pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
    18311952            VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
    18321953            Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
     
    18351956
    18361957        Assert(pInput->Names[iReg] == HvX64RegisterPat);
     1958        if (pCtx->msrPAT != paValues[iReg].Reg64)
     1959            Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
    18371960        pCtx->msrPAT    = paValues[iReg].Reg64;
    18381961        iReg++;
     1962
     1963#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
     1964        Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
     1965        if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
     1966            Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
     1967        iReg++;
     1968#endif
     1969
     1970        PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
     1971        Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
     1972        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
     1973            Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
     1974        pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
     1975        iReg++;
     1976
     1977        /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
     1978
     1979        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
     1980        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
     1981            Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
     1982        pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
     1983        iReg++;
     1984
     1985        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
     1986        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
     1987            Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
     1988        pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
     1989        iReg++;
     1990
     1991        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
     1992        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
     1993            Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
     1994        pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
     1995        iReg++;
     1996
     1997        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
     1998        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
     1999            Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
     2000        pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
     2001        iReg++;
     2002
     2003        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
     2004        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
     2005            Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
     2006        pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
     2007        iReg++;
     2008
     2009        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
     2010        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
     2011            Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
     2012        pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
     2013        iReg++;
     2014
     2015        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
     2016        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
     2017            Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
     2018        pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
     2019        iReg++;
     2020
     2021        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
     2022        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
     2023            Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
     2024        pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
     2025        iReg++;
     2026
     2027        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
     2028        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
     2029            Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
     2030        pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
     2031        iReg++;
     2032
     2033        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
     2034        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
     2035            Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
     2036        pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
     2037        iReg++;
     2038
     2039        Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
     2040        if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
     2041            Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
     2042        pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
     2043        iReg++;
     2044
     2045        if (enmCpuVendor != CPUMCPUVENDOR_AMD)
     2046        {
     2047            Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
     2048            if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
     2049                Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
     2050            pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
     2051            iReg++;
     2052#ifdef LOG_ENABLED
     2053            Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
     2054            if (paValues[iReg].Reg64 != CPUMGetGuestIa32FeatureControl(pVCpu))
     2055                Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32FeatureControl(pVCpu), paValues[iReg].Reg64));
     2056            iReg++;
     2057#endif
     2058        }
     2059
     2060        /** @todo we don't save state for HvX64RegisterIa32FeatureControl */
    18392061    }
    18402062
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp

    r72308 r72358  
    11471147                            STAMR3RegisterF(pVM, &pNemCpu->StatExitCpuId,           STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of CPUID exits",                  "/NEM/CPU%u/ExitCpuId", iCpu);
    11481148                            STAMR3RegisterF(pVM, &pNemCpu->StatExitMsr,             STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of MSR access exits",             "/NEM/CPU%u/ExitMsr", iCpu);
     1149                            STAMR3RegisterF(pVM, &pNemCpu->StatExitUnrecoverable,   STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of unrecoverable exits",          "/NEM/CPU%u/ExitUnrecoverable", iCpu);
    11491150                            STAMR3RegisterF(pVM, &pNemCpu->StatGetMsgTimeout,       STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of get message timeouts/alerts",  "/NEM/CPU%u/GetMsgTimeout", iCpu);
    11501151                            STAMR3RegisterF(pVM, &pNemCpu->StatStopCpuSuccess,      STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of successful CPU stops",         "/NEM/CPU%u/StopCpuSuccess", iCpu);
     
    13441345         */
    13451346        /** @todo HvCallMapStatsPage */
     1347
     1348        /*
     1349         * Adjust features.
     1350         */
     1351        /** @todo Figure out how to get X2APIC working on AMD (and possible
     1352         * intel), but first figure how to disable it dynamically. */
     1353        /*CPUMR3ClearGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_X2APIC);*/
     1354
    13461355        return VINF_SUCCESS;
    13471356    }
     
    23842393 * Here are some observations (mostly against build 17101):
    23852394 *
    2386  * - The VMEXIT performance is dismal (build 17101).
     2395 * - The VMEXIT performance is dismal (build 17134).
    23872396 *
    23882397 *   Our proof of concept implementation with a kernel runloop (i.e. not using
     
    24132422 *   there will only be real gains if the exitting instructions are tightly
    24142423 *   packed.
     2424 *
     2425 *
     2426 * - Unable to access WHvX64RegisterMsrMtrrCap on AMD Ryzen (build 17134).
     2427 *
     2428 *
     2429 * - On AMD Ryzen grub/debian 9.0 ends up with a unrecoverable exception
     2430 *   when IA32_MTRR_PHYSMASK0 is written.
     2431 *
     2432 *
     2433 * - Need to figure out how to emulate X2APIC (AMD Ryzen), doesn't work with
     2434 *   debian 9.0/64.
    24152435 *
    24162436 *
  • trunk/src/VBox/VMM/include/NEMInternal.h

    r72343 r72358  
    245245    STAMCOUNTER                 StatExitCpuId;
    246246    STAMCOUNTER                 StatExitMsr;
     247    STAMCOUNTER                 StatExitUnrecoverable;
    247248    STAMCOUNTER                 StatGetMsgTimeout;
    248249    STAMCOUNTER                 StatStopCpuSuccess;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette