Changeset 72881 in vbox
- Timestamp:
- Jul 4, 2018 3:19:39 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123426
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72878 r72881 6672 6672 * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr. 6673 6673 * 6674 * @returns VBox status code.6674 * @returns Strict VBox status code. 6675 6675 * @param pVCpu The cross context virtual CPU structure. 6676 6676 * @param pVmcb Pointer to the VM control block. 6677 6677 */ 6678 static inthmR0SvmExitReadMsr(PVMCPU pVCpu, PSVMVMCB pVmcb)6678 static VBOXSTRICTRC hmR0SvmExitReadMsr(PVMCPU pVCpu, PSVMVMCB pVmcb) 6679 6679 { 6680 6680 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6681 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR06682 | CPUMCTX_EXTRN_RFLAGS6683 | CPUMCTX_EXTRN_SS6684 | CPUMCTX_EXTRN_ALL_MSRS);6685 6686 6681 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 6687 6682 Log4Func(("idMsr=%#RX32\n", pCtx->ecx)); 6688 6683 6684 VBOXSTRICTRC rcStrict; 6689 6685 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6690 6686 if (fSupportsNextRipSave) 6691 6687 { 6692 int rc = EMInterpretRdmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6693 if (RT_LIKELY(rc == VINF_SUCCESS)) 6694 { 6695 pCtx->rip = pVmcb->ctrl.u64NextRIP; 6696 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6697 return VINF_SUCCESS; 6698 } 6699 6700 AssertMsg( rc == VERR_EM_INTERPRETER 6701 || rc == VINF_CPUM_R3_MSR_READ, ("EMInterpretRdmsr failed rc=%Rrc\n", rc)); 6702 return rc; 6703 } 6704 6705 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6706 int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0)); 6707 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 6708 { 6709 AssertMsg( rc == VERR_EM_INTERPRETER 6710 || rc == VINF_CPUM_R3_MSR_READ, ("EMInterpretInstruction failed rc=%Rrc\n", rc)); 6711 } 6712 /* RIP updated by EMInterpretInstruction(). */ 6713 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6714 return rc; 6688 /** @todo Optimize this: Only retrieve the MSR bits we need here. CPUMAllMsrs.cpp 6689 * can ask for what it needs instead of using CPUMCTX_EXTRN_ALL_MSRS. */ 6690 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 6691 rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip); 6692 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6693 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); /* RIP updated by IEMExecDecodedRdmsr(). */ 6694 else 6695 AssertMsg( rcStrict == VINF_IEM_RAISED_XCPT 6696 || rcStrict == VINF_CPUM_R3_MSR_WRITE, 6697 ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 6698 } 6699 else 6700 { 6701 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS); 6702 rcStrict = IEMExecOne(pVCpu); 6703 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6704 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); /* RIP updated by IEMExecOne(). */ 6705 else 6706 AssertMsg( rcStrict == VINF_IEM_RAISED_XCPT 6707 || rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecOne status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 6708 } 6709 return rcStrict; 6715 6710 } 6716 6711 … … 6719 6714 * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr. 6720 6715 * 6721 * @returns VBox status code.6716 * @returns Strict VBox status code. 6722 6717 * @param pVCpu The cross context virtual CPU structure. 6723 6718 * @param pVmcb Pointer to the VM control block. 6724 6719 * @param pSvmTransient Pointer to the SVM-transient structure. 6725 6720 */ 6726 static inthmR0SvmExitWriteMsr(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient)6721 static VBOXSTRICTRC hmR0SvmExitWriteMsr(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient) 6727 6722 { 6728 6723 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6729 6724 uint32_t const idMsr = pCtx->ecx; 6730 /** @todo Optimize this: We don't need to get much of the MSR state here6731 * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and6732 * clear the applicable extern flags. */6733 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR06734 | CPUMCTX_EXTRN_RFLAGS6735 | CPUMCTX_EXTRN_SS6736 | CPUMCTX_EXTRN_ALL_MSRS6737 | IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);6738 6739 6725 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 6740 6726 Log4Func(("idMsr=%#RX32\n", idMsr)); … … 6768 6754 if (fSupportsNextRipSave) 6769 6755 { 6756 /** @todo Optimize this: We don't need to get much of the MSR state here 6757 * since we're only updating. CPUMAllMsrs.cpp can ask for what it needs and 6758 * clear the applicable extern flags. */ 6759 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 6770 6760 rcStrict = IEMExecDecodedWrmsr(pVCpu, pVmcb->ctrl.u64NextRIP - pCtx->rip); 6771 6761 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6772 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6762 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); /* RIP updated by IEMExecDecodedWrmsr(). */ 6773 6763 else 6774 6764 AssertMsg( rcStrict == VINF_IEM_RAISED_XCPT … … 6778 6768 else 6779 6769 { 6780 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK );6770 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_ALL_MSRS); 6781 6771 rcStrict = IEMExecOne(pVCpu); 6782 6772 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6783 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); /* RIP updated by EMInterpretInstruction(). */6773 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); /* RIP updated by IEMExecOne(). */ 6784 6774 else 6785 6775 AssertMsg( rcStrict == VINF_IEM_RAISED_XCPT … … 6815 6805 } 6816 6806 6817 return VBOXSTRICTRC_TODO(rcStrict);6807 return rcStrict; 6818 6808 } 6819 6809 … … 6829 6819 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6830 6820 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ) 6831 return hmR0SvmExitReadMsr(pVCpu, pVmcb);6821 return VBOXSTRICTRC_TODO(hmR0SvmExitReadMsr(pVCpu, pVmcb)); 6832 6822 6833 6823 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE); 6834 return hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient);6824 return VBOXSTRICTRC_TODO(hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient)); 6835 6825 } 6836 6826 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72878 r72881 11839 11839 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11840 11840 11841 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. FS, GS (base) can be accessed by MSR reads. */ 11842 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11843 | CPUMCTX_EXTRN_RFLAGS 11844 | CPUMCTX_EXTRN_SS 11845 | CPUMCTX_EXTRN_FS 11846 | CPUMCTX_EXTRN_GS); 11847 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 11848 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 11841 /** @todo Optimize this: We currently drag in in the whole MSR state 11842 * (CPUMCTX_EXTRN_ALL_MSRS) here. We should optimize this to only get 11843 * MSRs required. That would require changes to IEM and possibly CPUM too. 11844 * (Should probably do it lazy fashion from CPUMAllMsrs.cpp). */ 11845 uint32_t const idMsr = pMixedCtx->ecx; NOREF(idMsr); /* Save it. */ 11846 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11847 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_ALL_MSRS); 11849 11848 AssertRCReturn(rc, rc); 11850 Log4Func(("ecx=%#RX32\n", pMixedCtx->ecx)); 11849 11850 Log4Func(("ecx=%#RX32\n", idMsr)); 11851 11851 11852 11852 #ifdef VBOX_STRICT 11853 11853 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 11854 11854 { 11855 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx) 11856 && pMixedCtx->ecx != MSR_K6_EFER) 11857 { 11858 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", 11859 pMixedCtx->ecx)); 11855 if ( hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, idMsr) 11856 && idMsr != MSR_K6_EFER) 11857 { 11858 AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", idMsr)); 11860 11859 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11861 11860 } 11862 if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx))11861 if (hmR0VmxIsLazyGuestMsr(pVCpu, idMsr)) 11863 11862 { 11864 11863 VMXMSREXITREAD enmRead; 11865 11864 VMXMSREXITWRITE enmWrite; 11866 int rc2 = hmR0VmxGetMsrPermission(pVCpu, pMixedCtx->ecx, &enmRead, &enmWrite);11865 int rc2 = hmR0VmxGetMsrPermission(pVCpu, idMsr, &enmRead, &enmWrite); 11867 11866 AssertRCReturn(rc2, rc2); 11868 11867 if (enmRead == VMXMSREXIT_PASSTHRU_READ) 11869 11868 { 11870 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", pMixedCtx->ecx));11869 AssertMsgFailed(("Unexpected RDMSR for a passthru lazy-restore MSR. ecx=%#RX32\n", idMsr)); 11871 11870 HMVMX_UNEXPECTED_EXIT_RET(pVCpu, pVmxTransient); 11872 11871 } … … 11875 11874 #endif 11876 11875 11877 PVM pVM = pVCpu->CTX_SUFF(pVM); 11878 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11879 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, 11880 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc)); 11876 VBOXSTRICTRC rcStrict = IEMExecDecodedRdmsr(pVCpu, pVmxTransient->cbInstr); 11881 11877 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 11882 if (RT_SUCCESS(rc))11883 {11884 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);11885 Assert(pVmxTransient->cbInstr == 2);11886 } 11887 return rc ;11878 AssertMsg( rcStrict == VINF_SUCCESS 11879 || rcStrict == VINF_CPUM_R3_MSR_READ 11880 || rcStrict == VINF_IEM_RAISED_XCPT, 11881 ("Unexpected IEMExecDecodedRdmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 11882 11883 return rcStrict; 11888 11884 } 11889 11885
Note:
See TracChangeset
for help on using the changeset viewer.