Changeset 72606 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jun 18, 2018 7:03:15 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123103
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72600 r72606 21 21 *********************************************************************************************************************************/ 22 22 #define LOG_GROUP LOG_GROUP_HM 23 #define VMCPU_INCL_CPUM_GST_CTX 23 24 #include <iprt/x86.h> 24 25 #include <iprt/asm-amd64-x86.h> … … 6882 6883 6883 6884 /** 6885 * Saves the guest SS register from the current VMCS into the guest-CPU context. 6886 * 6887 * @returns VBox status code. 6888 * @param pVCpu The cross context virtual CPU structure. 6889 * @remarks No-long-jump zone!!! 6890 */ 6891 static int hmR0VmxSaveGuestCs(PVMCPU pVCpu) 6892 { 6893 /** @todo optimize this? */ 6894 return hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx); 6895 } 6896 6897 6898 /** 6884 6899 * Saves the guest descriptor table registers and task register from the current 6885 6900 * VMCS into the guest-CPU context. … … 7106 7121 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7107 7122 AssertRCReturn(rc, rc); 7123 return rc; 7124 } 7125 7126 7127 /** 7128 * Saves guest registers needed for IEM instruction interpretation. 7129 * 7130 * @returns VBox status code (OR-able). 7131 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 7132 */ 7133 static int hmR0VmxSaveGuestRegsForIemInterpreting(PVMCPU pVCpu) 7134 { 7135 /* 7136 * Our goal here is IEM_CPUMCTX_EXTRN_MUST_MASK. 7137 * 7138 * Note! Before IEM dispatches an exception, it will call us to sync in everything. 7139 */ 7140 #if 0 /* later with CPUMCTX_EXTRN_XXX */ 7141 int rc = hmR0VmxSaveGuestRip(pVCpu, &pVCpu->cpum.GstCtx); 7142 rc |= hmR0VmxSaveGuestRflags(pVCpu, &pVCpu->cpum.GstCtx); 7143 rc |= hmR0VmxSaveGuestRsp(pVCpu, &pVCpu->cpum.GstCtx); 7144 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo Only CS and SS are strictly required here. */ 7145 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo We don't need CR2 here. */ 7146 rc |= hmR0VmxSaveGuestApicState(pVCpu, &pVCpu->cpum.GstCtx); /** @todo Only TPR is needed here. */ 7147 rc |= hmR0VmxSaveGuestDR7(pVCpu, &pVCpu->cpum.GstCtx); 7148 /* EFER is always up to date. */ 7149 AssertRCReturn(rc, rc); 7150 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST - fixme); /** @todo fix me */ 7151 #else 7152 int rc = hmR0VmxSaveGuestState(pVCpu, &pVCpu->cpum.GstCtx); 7153 AssertRCReturn(rc, rc); 7154 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7155 #endif 7156 7108 7157 return rc; 7109 7158 } … … 11789 11838 { 11790 11839 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 11791 PVM pVM = pVCpu->CTX_SUFF(pVM); 11792 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11793 if (RT_LIKELY(rc == VINF_SUCCESS)) 11794 { 11795 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 11796 Assert(pVmxTransient->cbInstr == 2); 11840 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 11841 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 11842 11843 /* 11844 * Get the state we need and update the exit history entry. 11845 */ 11846 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11847 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 11848 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 11849 rc |= hmR0VmxSaveGuestCs(pVCpu); 11850 AssertRCReturn(rc, rc); 11851 11852 VBOXSTRICTRC rcStrict; 11853 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID), 11854 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 11855 if (!pExitRec) 11856 { 11857 /* 11858 * Regular CPUID instruction execution. 11859 */ 11860 PVM pVM = pVCpu->CTX_SUFF(pVM); 11861 rcStrict = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 11862 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 11863 { 11864 rcStrict = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 11865 Assert(pVmxTransient->cbInstr == 2); 11866 } 11867 else 11868 { 11869 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc)); 11870 rcStrict = VERR_EM_INTERPRETER; 11871 } 11872 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 11797 11873 } 11798 11874 else 11799 11875 { 11800 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc)); 11801 rc = VERR_EM_INTERPRETER; 11802 } 11803 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 11804 return rc; 11876 /* 11877 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 11878 */ 11879 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 11880 int rc2 = hmR0VmxSaveGuestRegsForIemInterpreting(pVCpu); 11881 AssertRCReturn(rc2, rc2); 11882 11883 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n", 11884 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx)); 11885 11886 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 11887 11888 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 11889 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 11890 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 11891 } 11892 return VBOXSTRICTRC_TODO(rcStrict); 11805 11893 } 11806 11894 … … 12701 12789 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12702 12790 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1); 12791 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 12703 12792 12704 12793 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12705 12794 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12706 12795 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 12707 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */12708 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */12709 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */12796 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 12797 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 12798 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 12710 12799 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 12711 12800 AssertRCReturn(rc, rc); … … 12721 12810 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_VMX_IPE_1); 12722 12811 12723 /* I/O operation lookup arrays. */ 12724 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */ 12725 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */ 12726 12727 VBOXSTRICTRC rcStrict; 12728 uint32_t const cbValue = s_aIOSizes[uIOWidth]; 12729 uint32_t const cbInstr = pVmxTransient->cbInstr; 12730 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */ 12731 PVM pVM = pVCpu->CTX_SUFF(pVM); 12732 if (fIOString) 12733 { 12734 /* 12735 * INS/OUTS - I/O String instruction. 12736 * 12737 * Use instruction-information if available, otherwise fall back on 12738 * interpreting the instruction. 12739 */ 12740 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, 12741 fIOWrite ? 'w' : 'r')); 12742 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2); 12743 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo)) 12744 { 12745 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 12746 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */ 12747 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12748 AssertRCReturn(rc2, rc2); 12749 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3); 12750 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); 12751 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize; 12752 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification); 12753 if (fIOWrite) 12812 /* 12813 * Update exit history to see if this exit can be optimized. 12814 */ 12815 VBOXSTRICTRC rcStrict; 12816 PCEMEXITREC pExitRec = NULL; 12817 if ( !fGstStepping 12818 && !fDbgStepping) 12819 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 12820 !fIOString 12821 ? !fIOWrite 12822 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ) 12823 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE) 12824 : !fIOWrite 12825 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ) 12826 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE), 12827 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 12828 if (!pExitRec) 12829 { 12830 /* I/O operation lookup arrays. */ 12831 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */ 12832 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */ 12833 12834 uint32_t const cbValue = s_aIOSizes[uIOWidth]; 12835 uint32_t const cbInstr = pVmxTransient->cbInstr; 12836 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */ 12837 PVM pVM = pVCpu->CTX_SUFF(pVM); 12838 if (fIOString) 12839 { 12840 /* 12841 * INS/OUTS - I/O String instruction. 12842 * 12843 * Use instruction-information if available, otherwise fall back on 12844 * interpreting the instruction. 12845 */ 12846 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, 12847 fIOWrite ? 'w' : 'r')); 12848 AssertReturn(pMixedCtx->dx == uIOPort, VERR_VMX_IPE_2); 12849 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo)) 12754 12850 { 12755 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, 12756 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/); 12851 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 12852 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */ 12853 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12854 AssertRCReturn(rc2, rc2); 12855 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3); 12856 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); 12857 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize; 12858 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification); 12859 if (fIOWrite) 12860 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, 12861 pVmxTransient->ExitInstrInfo.StrIo.iSegReg, true /*fIoChecked*/); 12862 else 12863 { 12864 /* 12865 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES. 12866 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS. 12867 * See Intel Instruction spec. for "INS". 12868 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS". 12869 */ 12870 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/); 12871 } 12757 12872 } 12758 12873 else 12759 12874 { 12760 /* 12761 * The segment prefix for INS cannot be overridden and is always ES. We can safely assume X86_SREG_ES. 12762 * Hence "iSegReg" field is undefined in the instruction-information field in VT-x for INS. 12763 * See Intel Instruction spec. for "INS". 12764 * See Intel spec. Table 27-8 "Format of the VM-Exit Instruction-Information Field as Used for INS and OUTS". 12765 */ 12766 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, true /*fIoChecked*/); 12875 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */ 12876 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12877 AssertRCReturn(rc2, rc2); 12878 rcStrict = IEMExecOne(pVCpu); 12767 12879 } 12880 /** @todo IEM needs to be setting these flags somehow. */ 12881 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 12882 fUpdateRipAlready = true; 12768 12883 } 12769 12884 else 12770 12885 { 12771 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */ 12772 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12886 /* 12887 * IN/OUT - I/O instruction. 12888 */ 12889 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12890 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 12891 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification)); 12892 if (fIOWrite) 12893 { 12894 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue); 12895 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 12896 } 12897 else 12898 { 12899 uint32_t u32Result = 0; 12900 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue); 12901 if (IOM_SUCCESS(rcStrict)) 12902 { 12903 /* Save result of I/O IN instr. in AL/AX/EAX. */ 12904 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal); 12905 } 12906 else if (rcStrict == VINF_IOM_R3_IOPORT_READ) 12907 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue); 12908 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 12909 } 12910 } 12911 12912 if (IOM_SUCCESS(rcStrict)) 12913 { 12914 if (!fUpdateRipAlready) 12915 { 12916 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr); 12917 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 12918 } 12919 12920 /* 12921 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest. 12922 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ. 12923 */ 12924 if (fIOString) 12925 { 12926 /** @todo Single-step for INS/OUTS with REP prefix? */ 12927 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 12928 } 12929 else if ( !fDbgStepping 12930 && fGstStepping) 12931 { 12932 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12933 } 12934 12935 /* 12936 * If any I/O breakpoints are armed, we need to check if one triggered 12937 * and take appropriate action. 12938 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 12939 */ 12940 int rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 12773 12941 AssertRCReturn(rc2, rc2); 12774 rcStrict = IEMExecOne(pVCpu); 12775 } 12776 /** @todo IEM needs to be setting these flags somehow. */ 12777 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 12778 fUpdateRipAlready = true; 12942 12943 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the 12944 * execution engines about whether hyper BPs and such are pending. */ 12945 uint32_t const uDr7 = pMixedCtx->dr[7]; 12946 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) 12947 && X86_DR7_ANY_RW_IO(uDr7) 12948 && (pMixedCtx->cr4 & X86_CR4_DE)) 12949 || DBGFBpIsHwIoArmed(pVM))) 12950 { 12951 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 12952 12953 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 12954 VMMRZCallRing3Disable(pVCpu); 12955 HM_DISABLE_PREEMPT(); 12956 12957 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */); 12958 12959 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue); 12960 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 12961 { 12962 /* Raise #DB. */ 12963 if (fIsGuestDbgActive) 12964 ASMSetDR6(pMixedCtx->dr[6]); 12965 if (pMixedCtx->dr[7] != uDr7) 12966 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 12967 12968 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); 12969 } 12970 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST], 12971 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */ 12972 else if ( rcStrict2 != VINF_SUCCESS 12973 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) 12974 rcStrict = rcStrict2; 12975 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE); 12976 12977 HM_RESTORE_PREEMPT(); 12978 VMMRZCallRing3Enable(pVCpu); 12979 } 12980 } 12981 12982 #ifdef VBOX_STRICT 12983 if (rcStrict == VINF_IOM_R3_IOPORT_READ) 12984 Assert(!fIOWrite); 12985 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE) 12986 Assert(fIOWrite); 12987 else 12988 { 12989 # if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST 12990 * statuses, that the VMM device and some others may return. See 12991 * IOM_SUCCESS() for guidance. */ 12992 AssertMsg( RT_FAILURE(rcStrict) 12993 || rcStrict == VINF_SUCCESS 12994 || rcStrict == VINF_EM_RAW_EMULATE_INSTR 12995 || rcStrict == VINF_EM_DBG_BREAKPOINT 12996 || rcStrict == VINF_EM_RAW_GUEST_TRAP 12997 || rcStrict == VINF_EM_RAW_TO_R3 12998 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12999 # endif 13000 } 13001 #endif 13002 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1); 12779 13003 } 12780 13004 else 12781 13005 { 12782 13006 /* 12783 * IN/OUT - I/O instruction.13007 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 12784 13008 */ 12785 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12786 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 12787 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification)); 12788 if (fIOWrite) 12789 { 12790 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue); 12791 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 12792 } 12793 else 12794 { 12795 uint32_t u32Result = 0; 12796 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue); 12797 if (IOM_SUCCESS(rcStrict)) 12798 { 12799 /* Save result of I/O IN instr. in AL/AX/EAX. */ 12800 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal); 12801 } 12802 else if (rcStrict == VINF_IOM_R3_IOPORT_READ) 12803 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue); 12804 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 12805 } 12806 } 12807 12808 if (IOM_SUCCESS(rcStrict)) 12809 { 12810 if (!fUpdateRipAlready) 12811 { 12812 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr); 12813 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 12814 } 12815 12816 /* 12817 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest. 12818 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ. 12819 */ 12820 if (fIOString) 12821 { 12822 /** @todo Single-step for INS/OUTS with REP prefix? */ 12823 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS); 12824 } 12825 else if ( !fDbgStepping 12826 && fGstStepping) 12827 { 12828 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12829 } 12830 12831 /* 12832 * If any I/O breakpoints are armed, we need to check if one triggered 12833 * and take appropriate action. 12834 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 12835 */ 12836 int rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 13009 int rc2 = hmR0VmxSaveGuestRegsForIemInterpreting(pVCpu); 12837 13010 AssertRCReturn(rc2, rc2); 12838 13011 12839 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the 12840 * execution engines about whether hyper BPs and such are pending. */ 12841 uint32_t const uDr7 = pMixedCtx->dr[7]; 12842 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) 12843 && X86_DR7_ANY_RW_IO(uDr7) 12844 && (pMixedCtx->cr4 & X86_CR4_DE)) 12845 || DBGFBpIsHwIoArmed(pVM))) 12846 { 12847 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 12848 12849 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 12850 VMMRZCallRing3Disable(pVCpu); 12851 HM_DISABLE_PREEMPT(); 12852 12853 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /* fDr6 */); 12854 12855 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue); 12856 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 12857 { 12858 /* Raise #DB. */ 12859 if (fIsGuestDbgActive) 12860 ASMSetDR6(pMixedCtx->dr[6]); 12861 if (pMixedCtx->dr[7] != uDr7) 12862 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 12863 12864 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); 12865 } 12866 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST], 12867 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */ 12868 else if ( rcStrict2 != VINF_SUCCESS 12869 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) 12870 rcStrict = rcStrict2; 12871 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE); 12872 12873 HM_RESTORE_PREEMPT(); 12874 VMMRZCallRing3Enable(pVCpu); 12875 } 12876 } 12877 12878 #ifdef VBOX_STRICT 12879 if (rcStrict == VINF_IOM_R3_IOPORT_READ) 12880 Assert(!fIOWrite); 12881 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE) 12882 Assert(fIOWrite); 12883 else 12884 { 12885 #if 0 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST 12886 * statuses, that the VMM device and some others may return. See 12887 * IOM_SUCCESS() for guidance. */ 12888 AssertMsg( RT_FAILURE(rcStrict) 12889 || rcStrict == VINF_SUCCESS 12890 || rcStrict == VINF_EM_RAW_EMULATE_INSTR 12891 || rcStrict == VINF_EM_DBG_BREAKPOINT 12892 || rcStrict == VINF_EM_RAW_GUEST_TRAP 12893 || rcStrict == VINF_EM_RAW_TO_R3 12894 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12895 #endif 12896 } 12897 #endif 12898 12899 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1); 13012 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 13013 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13014 VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "", 13015 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth)); 13016 13017 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13018 13019 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 13020 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13021 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 13022 } 12900 13023 return rcStrict; 12901 13024 } … … 13173 13296 } 13174 13297 13298 /* 13299 * Get sufficent state and update the exit history entry. 13300 */ 13175 13301 RTGCPHYS GCPhys = 0; 13176 13302 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); … … 13186 13312 AssertRCReturn(rc, rc); 13187 13313 13188 /* 13189 * If we succeed, resume guest execution. 13190 * If we fail in interpreting the instruction because we couldn't get the guest physical address 13191 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page 13192 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this 13193 * weird case. See @bugref{6043}. 13194 */ 13195 PVM pVM = pVCpu->CTX_SUFF(pVM); 13196 VBOXSTRICTRC rcStrict2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX); 13197 Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pMixedCtx->rip, VBOXSTRICTRC_VAL(rcStrict2))); 13198 if ( rcStrict2 == VINF_SUCCESS 13199 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT 13200 || rcStrict2 == VERR_PAGE_NOT_PRESENT) 13201 { 13202 /* Successfully handled MMIO operation. */ 13203 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 13204 | HM_CHANGED_GUEST_RSP 13205 | HM_CHANGED_GUEST_RFLAGS 13206 | HM_CHANGED_GUEST_APIC_STATE); 13207 return VINF_SUCCESS; 13208 } 13209 return rcStrict2; 13314 VBOXSTRICTRC rcStrict; 13315 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO), 13316 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 13317 if (!pExitRec) 13318 { 13319 /* 13320 * If we succeed, resume guest execution. 13321 * If we fail in interpreting the instruction because we couldn't get the guest physical address 13322 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page 13323 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this 13324 * weird case. See @bugref{6043}. 13325 */ 13326 PVM pVM = pVCpu->CTX_SUFF(pVM); 13327 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX); 13328 Log4(("EPT misconfig at %#RGp RIP=%#RX64 rc=%Rrc\n", GCPhys, pMixedCtx->rip, VBOXSTRICTRC_VAL(rcStrict))); 13329 if ( rcStrict == VINF_SUCCESS 13330 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT 13331 || rcStrict == VERR_PAGE_NOT_PRESENT) 13332 { 13333 /* Successfully handled MMIO operation. */ 13334 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP 13335 | HM_CHANGED_GUEST_RSP 13336 | HM_CHANGED_GUEST_RFLAGS 13337 | HM_CHANGED_GUEST_APIC_STATE); 13338 rcStrict = VINF_SUCCESS; 13339 } 13340 } 13341 else 13342 { 13343 /* 13344 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 13345 */ 13346 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 13347 int rc2 = hmR0VmxSaveGuestRegsForIemInterpreting(pVCpu); 13348 AssertRCReturn(rc2, rc2); 13349 13350 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n", 13351 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhys)); 13352 13353 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13354 13355 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 13356 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13357 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 13358 } 13359 return VBOXSTRICTRC_TODO(rcStrict); 13210 13360 } 13211 13361
Note:
See TracChangeset
for help on using the changeset viewer.