Changeset 72877 in vbox for trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
- Timestamp:
- Jul 4, 2018 2:27:12 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72874 r72877 11895 11895 { 11896 11896 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11897 PVM pVM = pVCpu->CTX_SUFF(pVM); 11898 int rc = VINF_SUCCESS; 11899 11900 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. FS, GS (base) can be accessed by MSR writes. */ 11901 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11902 | CPUMCTX_EXTRN_RFLAGS 11903 | CPUMCTX_EXTRN_SS 11904 | CPUMCTX_EXTRN_FS 11905 | CPUMCTX_EXTRN_GS); 11906 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 11907 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 11897 11898 /** @todo Optimize this: We currently drag in in the whole MSR state 11899 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get 11900 * MSRs required. That would require changes to IEM and possibly CPUM too. 11901 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 11902 uint32_t const idMsr = pMixedCtx->ecx; /* Save it. */ 11903 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11904 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 11908 11905 AssertRCReturn(rc, rc); 11909 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax)); 11910 11911 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11912 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));11906 11907 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", idMsr, pMixedCtx->edx, pMixedCtx->eax)); 11908 11909 VBOXSTRICTRC rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmxTransient->cbInstr); 11913 11910 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 11914 11911 11915 if (RT_SUCCESS(rc)) 11916 { 11917 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 11918 11912 if (rcStrict == VINF_SUCCESS) 11913 { 11919 11914 /* If this is an X2APIC WRMSR access, update the APIC state as well. */ 11920 if ( pMixedCtx->ecx== MSR_IA32_APICBASE11921 || ( pMixedCtx->ecx>= MSR_IA32_X2APIC_START11922 && pMixedCtx->ecx<= MSR_IA32_X2APIC_END))11915 if ( idMsr == MSR_IA32_APICBASE 11916 || ( idMsr >= MSR_IA32_X2APIC_START 11917 && idMsr <= MSR_IA32_X2APIC_END)) 11923 11918 { 11924 11919 /* … … 11929 11924 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 11930 11925 } 11931 else if ( pMixedCtx->ecx== MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */11926 else if (idMsr == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ 11932 11927 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11933 else if ( pMixedCtx->ecx== MSR_K6_EFER)11928 else if (idMsr == MSR_K6_EFER) 11934 11929 { 11935 11930 /* … … 11946 11941 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 11947 11942 { 11948 switch ( pMixedCtx->ecx)11943 switch (idMsr) 11949 11944 { 11950 11945 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; … … 11956 11951 default: 11957 11952 { 11958 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))11953 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)) 11959 11954 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 11960 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))11955 else if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) 11961 11956 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS); 11962 11957 break; … … 11968 11963 { 11969 11964 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */ 11970 switch ( pMixedCtx->ecx)11965 switch (idMsr) 11971 11966 { 11972 11967 case MSR_IA32_SYSENTER_CS: … … 11976 11971 case MSR_K8_GS_BASE: 11977 11972 { 11978 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));11973 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", idMsr)); 11979 11974 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11980 11975 } … … 11983 11978 default: 11984 11979 { 11985 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))11980 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr)) 11986 11981 { 11987 11982 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */ 11988 if ( pMixedCtx->ecx!= MSR_K6_EFER)11983 if (idMsr != MSR_K6_EFER) 11989 11984 { 11990 11985 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 11991 pMixedCtx->ecx));11986 idMsr)); 11992 11987 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11993 11988 } 11994 11989 } 11995 11990 11996 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))11991 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) 11997 11992 { 11998 11993 VMXMSREXITREAD enmRead; 11999 11994 VMXMSREXITWRITE enmWrite; 12000 int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);11995 int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite); 12001 11996 AssertRCReturn(rc2, rc2); 12002 11997 if (enmWrite == VMXMSREXIT_PASSTHRU_WRITE) 12003 11998 { 12004 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));11999 AssertMsgFailed(("Unexpected WRMSR for passthru, lazy-restore MSR. ecx=%#RX32\n", idMsr)); 12005 12000 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 12006 12001 } … … 12012 12007 #endif /* VBOX_STRICT */ 12013 12008 } 12014 return rc; 12009 else 12010 AssertMsg( rcStrict == VINF_CPUM_R3_MSR_WRITE 12011 || rcStrict == VINF_IEM_RAISED_XCPT, 12012 ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12013 12014 return rcStrict; 12015 12015 } 12016 12016
Note:
See TracChangeset
for help on using the changeset viewer.