Changeset 72877 in vbox for trunk/src/VBox
- Timestamp:
- Jul 4, 2018 2:27:12 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123422
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r72674 r72877 2134 2134 2135 2135 /** 2136 * Interpret WRMSR2137 *2138 * @returns VBox status code.2139 * @param pVM The cross context VM structure.2140 * @param pVCpu The cross context virtual CPU structure.2141 * @param pRegFrame The register frame.2142 */2143 VMM_INT_DECL(int) EMInterpretWrmsr(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame)2144 {2145 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu));2146 2147 /* Check the current privilege level, this instruction is supervisor only. */2148 if (CPUMGetGuestCPL(pVCpu) != 0)2149 {2150 Log4(("EM: Refuse WRMSR: CPL != 0\n"));2151 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */2152 }2153 2154 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx));2155 if (rcStrict != VINF_SUCCESS)2156 {2157 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));2158 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE);2159 return VERR_EM_INTERPRETER;2160 }2161 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx,2162 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)));2163 NOREF(pVM);2164 return VINF_SUCCESS;2165 }2166 2167 2168 /**2169 2136 * Interpret DRx write. 2170 2137 * … … 4248 4215 { 4249 4216 NOREF(pDis); NOREF(pvFault); NOREF(pcbSize); 4250 return EMInterpretWrmsr(pVM, pVCpu, pRegFrame); 4217 Assert(pRegFrame == CPUMGetGuestCtxCore(pVCpu)); 4218 4219 /* Check the current privilege level, this instruction is supervisor only. */ 4220 if (CPUMGetGuestCPL(pVCpu) != 0) 4221 { 4222 Log4(("EM: Refuse WRMSR: CPL != 0\n")); 4223 return VERR_EM_INTERPRETER; /** @todo raise \#GP(0) */ 4224 } 4225 4226 VBOXSTRICTRC rcStrict = CPUMSetGuestMsr(pVCpu, pRegFrame->ecx, RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx)); 4227 if (rcStrict != VINF_SUCCESS) 4228 { 4229 Log4(("EM: Refuse WRMSR: CPUMSetGuestMsr returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 4230 Assert(rcStrict == VERR_CPUM_RAISE_GP_0 || rcStrict == VERR_EM_INTERPRETER || rcStrict == VINF_CPUM_R3_MSR_WRITE); 4231 return VERR_EM_INTERPRETER; 4232 } 4233 LogFlow(("EMInterpretWrmsr %s (%x) val=%RX64\n", emMSRtoString(pRegFrame->ecx), pRegFrame->ecx, 4234 RT_MAKE_U64(pRegFrame->eax, pRegFrame->edx))); 4235 NOREF(pVM); 4236 return VINF_SUCCESS; 4251 4237 } 4252 4238 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72876 r72877 967 967 DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers) 968 968 { 969 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_ MUST_MASK);969 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 970 970 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 971 971 … … 14955 14955 * 14956 14956 * @returns Strict VBox status code. 14957 * @retval VINF_ EM_RESCHEDULE (VINF_IEM_RAISED_XCPT) if exception is raised.14957 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised. 14958 14958 * 14959 14959 * @param pVCpu The cross context virtual CPU structure. … … 14978 14978 * 14979 14979 * @returns Strict VBox status code. 14980 * @retval VINF_ EM_RESCHEDULE (VINF_IEM_RAISED_XCPT) if exception is raised.14980 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised. 14981 14981 * 14982 14982 * @param pVCpu The cross context virtual CPU structure. … … 14993 14993 iemInitExec(pVCpu, false /*fBypassHandlers*/); 14994 14994 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp); 14995 Assert(!pVCpu->iem.s.cActiveMappings); 14996 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 14997 } 14998 14999 15000 /** 15001 * Interface for HM and EM to emulate the WRMSR instruction. 15002 * 15003 * @returns Strict VBox status code. 15004 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised. 15005 * 15006 * @param pVCpu The cross context virtual CPU structure. 15007 * @param cbInstr The instruction length in bytes. 15008 * 15009 * @remarks Not all of the state needs to be synced in. Recommended 15010 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call. 15011 */ 15012 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr) 15013 { 15014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2); 15015 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK 15016 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS); 15017 //CPUMCTX_EXTRN_RSP 15018 15019 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15020 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr); 14995 15021 Assert(!pVCpu->iem.s.cActiveMappings); 14996 15022 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72820 r72877 6727 6727 { 6728 6728 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6729 uint32_t const idMsr = pCtx->ecx; 6730 /** @todo Optimize this: We don't need to get much of the MSR state here 6731 * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and 6732 * clear the applicable extern flags. */ 6729 6733 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6730 6734 | CPUMCTX_EXTRN_RFLAGS 6731 6735 | CPUMCTX_EXTRN_SS 6732 | CPUMCTX_EXTRN_ALL_MSRS); 6736 | CPUMCTX_EXTRN_ALL_MSRS 6737 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK); 6733 6738 6734 6739 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 6735 Log4Func(("idMsr=%#RX32\n", pCtx->ecx));6740 Log4Func(("idMsr=%#RX32\n", idMsr)); 6736 6741 6737 6742 /* … … 6740 6745 */ 6741 6746 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive 6742 && pCtx->ecx== MSR_K8_LSTAR)6747 && idMsr == MSR_K8_LSTAR) 6743 6748 { 6744 6749 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr) … … 6759 6764 * Handle regular MSR writes. 6760 6765 */ 6761 int rc;6766 VBOXSTRICTRC rcStrict; 6762 6767 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6763 6768 if (fSupportsNextRipSave) 6764 6769 { 6765 rc = EMInterpretWrmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6766 if (RT_LIKELY(rc == VINF_SUCCESS)) 6767 { 6768 pCtx->rip = pVmcb->ctrl.u64NextRIP; 6769 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6770 } 6770 rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip); 6771 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6772 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6771 6773 else 6772 AssertMsg( rc == VERR_EM_INTERPRETER 6773 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc)); 6774 AssertMsg( rcStrict == VINF_IEM_RAISED_XCPT 6775 || rcStrict == VINF_CPUM_R3_MSR_WRITE, 6776 ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 6774 6777 } 6775 6778 else 6776 6779 { 6777 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);6778 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */));6779 if (RT_LIKELY(rc == VINF_SUCCESS))6780 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc ); /* RIP updated by EMInterpretInstruction(). */6780 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6781 rcStrict = IEMExecOne(pVCpu); 6782 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6783 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); /* RIP updated by EMInterpretInstruction(). */ 6781 6784 else 6782 AssertMsg( rc == VERR_EM_INTERPRETER6783 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: WrMsr. EMInterpretInstruction failed rc=%Rrc\n", rc));6784 } 6785 6786 if (rc == VINF_SUCCESS)6785 AssertMsg( rcStrict == VINF_IEM_RAISED_XCPT 6786 || rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecOne status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 6787 } 6788 6789 if (rcStrict == VINF_SUCCESS) 6787 6790 { 6788 6791 /* If this is an X2APIC WRMSR access, update the APIC TPR state. */ 6789 if ( pCtx->ecx>= MSR_IA32_X2APIC_START6790 && pCtx->ecx<= MSR_IA32_X2APIC_END)6792 if ( idMsr >= MSR_IA32_X2APIC_START 6793 && idMsr <= MSR_IA32_X2APIC_END) 6791 6794 { 6792 6795 /* … … 6799 6802 else 6800 6803 { 6801 switch ( pCtx->ecx)6804 switch (idMsr) 6802 6805 { 6803 6806 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break; … … 6813 6816 6814 6817 /* RIP has been updated by above after EMInterpretWrmsr() or by EMInterpretInstruction(). */ 6815 return rc;6818 return VBOXSTRICTRC_TODO(rcStrict); 6816 6819 } 6817 6820 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72874 r72877 11895 11895 { 11896 11896 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11897 PVM pVM = pVCpu->CTX_SUFF(pVM); 11898 int rc = VINF_SUCCESS; 11899 11900 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. FS, GS (base) can be accessed by MSR writes. */ 11901 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11902 | CPUMCTX_EXTRN_RFLAGS 11903 | CPUMCTX_EXTRN_SS 11904 | CPUMCTX_EXTRN_FS 11905 | CPUMCTX_EXTRN_GS); 11906 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 11907 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 11897 11898 /** @todo Optimize this: We currently drag in in the whole MSR state 11899 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get 11900 * MSRs required. That would require changes to IEM and possibly CPUM too. 11901 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 11902 uint32_t const idMsr = pMixedCtx->ecx; /* Save it. */ 11903 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11904 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 11908 11905 AssertRCReturn(rc, rc); 11909 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax)); 11910 11911 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11912 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));11906 11907 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pMixedCtx->edx, pMixedCtx->eax)); 11908 11909 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr); 11913 11910 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 11914 11911 11915 if (RT_SUCCESS(rc)) 11916 { 11917 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 11918 11912 if (rcStrict == VINF_SUCCESS) 11913 { 11919 11914 /* If this is an X2APIC WRMSR access, update the APIC state as well. */ 11920 if ( pMixedCtx->ecx== MSR_IA32_APICBASE11921 || ( pMixedCtx->ecx>= MSR_IA32_X2APIC_START11922 && pMixedCtx->ecx<= MSR_IA32_X2APIC_END))11915 if ( idMsr == MSR_IA32_APICBASE 11916 || ( idMsr >= MSR_IA32_X2APIC_START 11917 && idMsr <= MSR_IA32_X2APIC_END)) 11923 11918 { 11924 11919 /* … … 11929 11924 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 11930 11925 } 11931 else if ( pMixedCtx->ecx== MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */11926 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ 11932 11927 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11933 else if ( pMixedCtx->ecx== MSR_K6_EFER)11928 else if (idMsr == MSR_K6_EFER) 11934 11929 { 11935 11930 /* … … 11946 11941 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 11947 11942 { 11948 switch ( pMixedCtx->ecx)11943 switch (idMsr) 11949 11944 { 11950 11945 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; … … 11956 11951 default: 11957 11952 { 11958 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))11953 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)) 11959 11954 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 11960 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))11955 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) 11961 11956 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS); 11962 11957 break; … … 11968 11963 { 11969 11964 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */ 11970 switch ( pMixedCtx->ecx)11965 switch (idMsr) 11971 11966 { 11972 11967 case MSR_IA32_SYSENTER_CS: … … 11976 11971 case MSR_K8_GS_BASE: 11977 11972 { 11978 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));11973 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr)); 11979 11974 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11980 11975 } … … 11983 11978 default: 11984 11979 { 11985 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))11980 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)) 11986 11981 { 11987 11982 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */ 11988 if ( pMixedCtx->ecx!= MSR_K6_EFER)11983 if (idMsr != MSR_K6_EFER) 11989 11984 { 11990 11985 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 11991 pMixedCtx->ecx));11986 idMsr)); 11992 11987 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11993 11988 } 11994 11989 } 11995 11990 11996 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))11991 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) 11997 11992 { 11998 11993 VMXMSREXITREAD enmRead; 11999 11994 VMXMSREXITWRITE enmWrite; 12000 int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);11995 int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite); 12001 11996 AssertRCReturn(rc2, rc2); 12002 11997 if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE) 12003 11998 { 12004 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));11999 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr)); 12005 12000 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 12006 12001 } … … 12012 12007 #endif /* VBOX_STRICT */ 12013 12008 } 12014 return rc; 12009 else 12010 AssertMsg( rcStrict == VINF_CPUM_R3_MSR_WRITE 12011 || rcStrict == VINF_IEM_RAISED_XCPT, 12012 ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12013 12014 return rcStrict; 12015 12015 } 12016 12016
Note:
See TracChangeset
for help on using the changeset viewer.