Changeset 72655 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jun 22, 2018 10:05:53 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72646 r72655 109 109 #define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_pCtx, a_fWhat) \ 110 110 do { \ 111 hmR0SvmImportGuestState((a_pVCpu), (a_pCtx), (a_fWhat)); \ 111 if ((a_pCtx)->fExtrn & (a_fWhat)) \ 112 hmR0SvmImportGuestState((a_pVCpu), (a_pCtx), (a_fWhat)); \ 112 113 } while (0) 113 114 … … 4901 4902 4902 4903 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 4903 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_F LAGS_AND_TYPE(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK),4904 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_SVM, pSvmTransient->u64ExitCode & EMEXIT_F_TYPE_MASK), 4904 4905 pCtx->cs.u64Base + pCtx->rip, uHostTsc); 4905 4906 } … … 6498 6499 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6499 6500 6500 PVM pVM = pVCpu->CTX_SUFF(pVM); 6501 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 6502 if (RT_LIKELY(rc == VINF_SUCCESS)) 6503 { 6504 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 6505 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6501 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS); 6502 VBOXSTRICTRC rcStrict; 6503 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 6504 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID), 6505 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 6506 if (!pExitRec) 6507 { 6508 PVM pVM = pVCpu->CTX_SUFF(pVM); 6509 rcStrict = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 6510 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 6511 { 6512 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 6513 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6514 } 6515 else 6516 { 6517 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 6518 rcStrict = VERR_EM_INTERPRETER; 6519 } 6506 6520 } 6507 6521 else 6508 6522 { 6509 AssertMsgFailed(("hmR0SvmExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc)); 6510 rc = VERR_EM_INTERPRETER; 6523 /* 6524 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 6525 */ 6526 Assert(pCtx == &pVCpu->cpum.GstCtx); 6527 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 6528 6529 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n", 6530 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ecx)); 6531 6532 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 6533 6534 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 6535 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 6536 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 6511 6537 } 6512 6538 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 6513 return rc;6539 return VBOXSTRICTRC_TODO(rcStrict); 6514 6540 } 6515 6541 … … 7086 7112 } 7087 7113 7114 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS); 7088 7115 VBOXSTRICTRC rcStrict; 7089 bool fUpdateRipAlready = false; 7090 if (IoExitInfo.n.u1Str) 7091 { 7092 /* INS/OUTS - I/O String instruction. */ 7093 /** @todo Huh? why can't we use the segment prefix information given by AMD-V 7094 * in EXITINFO1? Investigate once this thing is up and running. */ 7095 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue, 7096 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r')); 7097 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2); 7098 static IEMMODE const s_aenmAddrMode[8] = 7099 { 7100 (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1 7101 }; 7102 IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7]; 7103 if (enmAddrMode != (IEMMODE)-1) 7104 { 7105 uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip; 7106 if (cbInstr <= 15 && cbInstr >= 1) 7116 PCEMEXITREC pExitRec = NULL; 7117 if ( !pVCpu->hm.s.fSingleInstruction 7118 && !pVCpu->cpum.GstCtx.eflags.Bits.u1TF) 7119 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 7120 !IoExitInfo.n.u1Str 7121 ? IoExitInfo.n.u1Type == SVM_IOIO_READ 7122 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ) 7123 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE) 7124 : IoExitInfo.n.u1Type == SVM_IOIO_READ 7125 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ) 7126 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE), 7127 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 7128 if (!pExitRec) 7129 { 7130 bool fUpdateRipAlready = false; 7131 if (IoExitInfo.n.u1Str) 7132 { 7133 /* INS/OUTS - I/O String instruction. */ 7134 /** @todo Huh? why can't we use the segment prefix information given by AMD-V 7135 * in EXITINFO1? Investigate once this thing is up and running. */ 7136 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue, 7137 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r')); 7138 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2); 7139 static IEMMODE const s_aenmAddrMode[8] = 7107 7140 { 7108 Assert(cbInstr >= 1U + IoExitInfo.n.u1Rep); 7109 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE) 7141 (IEMMODE)-1, IEMMODE_16BIT, IEMMODE_32BIT, (IEMMODE)-1, IEMMODE_64BIT, (IEMMODE)-1, (IEMMODE)-1, (IEMMODE)-1 7142 }; 7143 IEMMODE enmAddrMode = s_aenmAddrMode[(IoExitInfo.u >> 7) & 0x7]; 7144 if (enmAddrMode != (IEMMODE)-1) 7145 { 7146 uint64_t cbInstr = pVmcb->ctrl.u64ExitInfo2 - pCtx->rip; 7147 if (cbInstr <= 15 && cbInstr >= 1) 7110 7148 { 7111 /* Don't know exactly how to detect whether u3Seg is valid, currently 7112 only enabling it for Bulldozer and later with NRIP. OS/2 broke on 7113 2384 Opterons when only checking NRIP. */ 7114 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 7115 if ( fSupportsNextRipSave 7116 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First) 7149 Assert(cbInstr >= 1U + IoExitInfo.n.u1Rep); 7150 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE) 7117 7151 { 7118 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1Rep, 7119 ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3Seg, cbInstr, IoExitInfo.n.u1Rep)); 7120 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr, 7121 IoExitInfo.n.u3Seg, true /*fIoChecked*/); 7152 /* Don't know exactly how to detect whether u3Seg is valid, currently 7153 only enabling it for Bulldozer and later with NRIP. OS/2 broke on 7154 2384 Opterons when only checking NRIP. */ 7155 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 7156 if ( fSupportsNextRipSave 7157 && pVM->cpum.ro.GuestFeatures.enmMicroarch >= kCpumMicroarch_AMD_15h_First) 7158 { 7159 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_DS || cbInstr > 1U + IoExitInfo.n.u1Rep, 7160 ("u32Seg=%d cbInstr=%d u1REP=%d", IoExitInfo.n.u3Seg, cbInstr, IoExitInfo.n.u1Rep)); 7161 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr, 7162 IoExitInfo.n.u3Seg, true /*fIoChecked*/); 7163 } 7164 else if (cbInstr == 1U + IoExitInfo.n.u1Rep) 7165 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr, 7166 X86_SREG_DS, true /*fIoChecked*/); 7167 else 7168 rcStrict = IEMExecOne(pVCpu); 7169 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 7122 7170 } 7123 else if (cbInstr == 1U + IoExitInfo.n.u1Rep)7124 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr,7125 X86_SREG_DS, true /*fIoChecked*/);7126 7171 else 7127 rcStrict = IEMExecOne(pVCpu); 7128 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 7172 { 7173 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3Seg)); 7174 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr, 7175 true /*fIoChecked*/); 7176 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 7177 } 7129 7178 } 7130 7179 else 7131 7180 { 7132 AssertMsg(IoExitInfo.n.u3Seg == X86_SREG_ES /*=0*/, ("%#x\n", IoExitInfo.n.u3Seg)); 7133 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, IoExitInfo.n.u1Rep, (uint8_t)cbInstr, 7134 true /*fIoChecked*/); 7135 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 7181 AssertMsgFailed(("rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr)); 7182 rcStrict = IEMExecOne(pVCpu); 7136 7183 } 7137 7184 } 7138 7185 else 7139 7186 { 7140 AssertMsgFailed((" rip=%RX64 nrip=%#RX64 cbInstr=%#RX64\n", pCtx->rip, pVmcb->ctrl.u64ExitInfo2, cbInstr));7187 AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u)); 7141 7188 rcStrict = IEMExecOne(pVCpu); 7142 7189 } 7190 fUpdateRipAlready = true; 7143 7191 } 7144 7192 else 7145 7193 { 7146 AssertMsgFailed(("IoExitInfo=%RX64\n", IoExitInfo.u)); 7147 rcStrict = IEMExecOne(pVCpu); 7148 } 7149 fUpdateRipAlready = true; 7194 /* IN/OUT - I/O instruction. */ 7195 Assert(!IoExitInfo.n.u1Rep); 7196 7197 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE) 7198 { 7199 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue); 7200 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 7201 } 7202 else 7203 { 7204 uint32_t u32Val = 0; 7205 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue); 7206 if (IOM_SUCCESS(rcStrict)) 7207 { 7208 /* Save result of I/O IN instr. in AL/AX/EAX. */ 7209 /** @todo r=bird: 32-bit op size should clear high bits of rax! */ 7210 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal); 7211 } 7212 else if (rcStrict == VINF_IOM_R3_IOPORT_READ) 7213 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue); 7214 7215 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 7216 } 7217 } 7218 7219 if (IOM_SUCCESS(rcStrict)) 7220 { 7221 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */ 7222 if (!fUpdateRipAlready) 7223 pCtx->rip = pVmcb->ctrl.u64ExitInfo2; 7224 7225 /* 7226 * If any I/O breakpoints are armed, we need to check if one triggered 7227 * and take appropriate action. 7228 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 7229 */ 7230 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the 7231 * execution engines about whether hyper BPs and such are pending. */ 7232 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_DR7); 7233 uint32_t const uDr7 = pCtx->dr[7]; 7234 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) 7235 && X86_DR7_ANY_RW_IO(uDr7) 7236 && (pCtx->cr4 & X86_CR4_DE)) 7237 || DBGFBpIsHwIoArmed(pVM))) 7238 { 7239 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 7240 VMMRZCallRing3Disable(pVCpu); 7241 HM_DISABLE_PREEMPT(); 7242 7243 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 7244 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/); 7245 7246 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue); 7247 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 7248 { 7249 /* Raise #DB. */ 7250 pVmcb->guest.u64DR6 = pCtx->dr[6]; 7251 pVmcb->guest.u64DR7 = pCtx->dr[7]; 7252 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 7253 hmR0SvmSetPendingXcptDB(pVCpu); 7254 } 7255 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST], 7256 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */ 7257 else if ( rcStrict2 != VINF_SUCCESS 7258 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) 7259 rcStrict = rcStrict2; 7260 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE); 7261 7262 HM_RESTORE_PREEMPT(); 7263 VMMRZCallRing3Enable(pVCpu); 7264 } 7265 7266 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 7267 } 7268 7269 #ifdef VBOX_STRICT 7270 if (rcStrict == VINF_IOM_R3_IOPORT_READ) 7271 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ); 7272 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE) 7273 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE); 7274 else 7275 { 7276 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST 7277 * statuses, that the VMM device and some others may return. See 7278 * IOM_SUCCESS() for guidance. */ 7279 AssertMsg( RT_FAILURE(rcStrict) 7280 || rcStrict == VINF_SUCCESS 7281 || rcStrict == VINF_EM_RAW_EMULATE_INSTR 7282 || rcStrict == VINF_EM_DBG_BREAKPOINT 7283 || rcStrict == VINF_EM_RAW_GUEST_TRAP 7284 || rcStrict == VINF_EM_RAW_TO_R3 7285 || rcStrict == VINF_TRPM_XCPT_DISPATCHED 7286 || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 7287 } 7288 #endif 7150 7289 } 7151 7290 else 7152 7291 { 7153 /* IN/OUT - I/O instruction. */7154 Assert(!IoExitInfo.n.u1Rep);7155 7156 if (IoExitInfo.n.u1Type == SVM_IOIO_WRITE)7157 {7158 rcStrict = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, cbValue);7159 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);7160 }7161 else7162 {7163 uint32_t u32Val = 0;7164 rcStrict = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, cbValue);7165 if (IOM_SUCCESS(rcStrict))7166 {7167 /* Save result of I/O IN instr. in AL/AX/EAX. */7168 /** @todo r=bird: 32-bit op size should clear high bits of rax! */7169 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);7170 }7171 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)7172 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, cbValue);7173 7174 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);7175 }7176 }7177 7178 if (IOM_SUCCESS(rcStrict))7179 {7180 /* AMD-V saves the RIP of the instruction following the IO instruction in EXITINFO2. */7181 if (!fUpdateRipAlready)7182 pCtx->rip = pVmcb->ctrl.u64ExitInfo2;7183 7184 7292 /* 7185 * If any I/O breakpoints are armed, we need to check if one triggered 7186 * and take appropriate action. 7187 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 7293 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 7188 7294 */ 7189 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the 7190 * execution engines about whether hyper BPs and such are pending. */ 7191 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_DR7); 7192 uint32_t const uDr7 = pCtx->dr[7]; 7193 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) 7194 && X86_DR7_ANY_RW_IO(uDr7) 7195 && (pCtx->cr4 & X86_CR4_DE)) 7196 || DBGFBpIsHwIoArmed(pVM))) 7197 { 7198 /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */ 7199 VMMRZCallRing3Disable(pVCpu); 7200 HM_DISABLE_PREEMPT(); 7201 7202 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 7203 CPUMR0DebugStateMaybeSaveGuest(pVCpu, false /*fDr6*/); 7204 7205 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, IoExitInfo.n.u16Port, cbValue); 7206 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 7207 { 7208 /* Raise #DB. */ 7209 pVmcb->guest.u64DR6 = pCtx->dr[6]; 7210 pVmcb->guest.u64DR7 = pCtx->dr[7]; 7211 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 7212 hmR0SvmSetPendingXcptDB(pVCpu); 7213 } 7214 /* rcStrict is VINF_SUCCESS, VINF_IOM_R3_IOPORT_COMMIT_WRITE, or in [VINF_EM_FIRST..VINF_EM_LAST], 7215 however we can ditch VINF_IOM_R3_IOPORT_COMMIT_WRITE as it has VMCPU_FF_IOM as backup. */ 7216 else if ( rcStrict2 != VINF_SUCCESS 7217 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) 7218 rcStrict = rcStrict2; 7219 AssertCompile(VINF_EM_LAST < VINF_IOM_R3_IOPORT_COMMIT_WRITE); 7220 7221 HM_RESTORE_PREEMPT(); 7222 VMMRZCallRing3Enable(pVCpu); 7223 } 7224 7225 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 7226 } 7227 7228 #ifdef VBOX_STRICT 7229 if (rcStrict == VINF_IOM_R3_IOPORT_READ) 7230 Assert(IoExitInfo.n.u1Type == SVM_IOIO_READ); 7231 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE) 7232 Assert(IoExitInfo.n.u1Type == SVM_IOIO_WRITE); 7233 else 7234 { 7235 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST 7236 * statuses, that the VMM device and some others may return. See 7237 * IOM_SUCCESS() for guidance. */ 7238 AssertMsg( RT_FAILURE(rcStrict) 7239 || rcStrict == VINF_SUCCESS 7240 || rcStrict == VINF_EM_RAW_EMULATE_INSTR 7241 || rcStrict == VINF_EM_DBG_BREAKPOINT 7242 || rcStrict == VINF_EM_RAW_GUEST_TRAP 7243 || rcStrict == VINF_EM_RAW_TO_R3 7244 || rcStrict == VINF_TRPM_XCPT_DISPATCHED 7245 || rcStrict == VINF_EM_TRIPLE_FAULT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 7246 } 7247 #endif 7295 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 7296 STAM_COUNTER_INC(!IoExitInfo.n.u1Str 7297 ? IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead 7298 : IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead); 7299 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 7300 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IoExitInfo.n.u1Rep ? "REP " : "", 7301 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? "OUT" : "IN", IoExitInfo.n.u1Str ? "S" : "", IoExitInfo.n.u16Port, uIOWidth)); 7302 7303 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 7304 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7305 7306 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 7307 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 7308 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 7309 } 7248 7310 return VBOXSTRICTRC_TODO(rcStrict); 7249 7311 } … … 7307 7369 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages. 7308 7370 */ 7309 int rc;7310 7371 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD); 7311 7372 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) … … 7316 7377 return VINF_EM_RAW_INJECT_TRPM_EVENT; 7317 7378 7318 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr, 7319 u32ErrCode); 7320 rc = VBOXSTRICTRC_VAL(rc2); 7321 7322 /* 7323 * If we succeed, resume guest execution. 7324 * If we fail in interpreting the instruction because we couldn't get the guest physical address 7325 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page 7326 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this 7327 * weird case. See @bugref{6043}. 7328 */ 7329 if ( rc == VINF_SUCCESS 7330 || rc == VERR_PAGE_TABLE_NOT_PRESENT 7331 || rc == VERR_PAGE_NOT_PRESENT) 7332 { 7333 /* Successfully handled MMIO operation. */ 7334 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 7335 rc = VINF_SUCCESS; 7336 } 7337 return rc; 7379 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS); 7380 VBOXSTRICTRC rcStrict; 7381 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 7382 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO), 7383 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 7384 if (!pExitRec) 7385 { 7386 7387 rcStrict = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr, 7388 u32ErrCode); 7389 7390 /* 7391 * If we succeed, resume guest execution. 7392 * If we fail in interpreting the instruction because we couldn't get the guest physical address 7393 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page 7394 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this 7395 * weird case. See @bugref{6043}. 7396 */ 7397 if ( rcStrict == VINF_SUCCESS 7398 || rcStrict == VERR_PAGE_TABLE_NOT_PRESENT 7399 || rcStrict == VERR_PAGE_NOT_PRESENT) 7400 { 7401 /* Successfully handled MMIO operation. */ 7402 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 7403 rcStrict = VINF_SUCCESS; 7404 } 7405 } 7406 else 7407 { 7408 /* 7409 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 7410 */ 7411 Assert(pCtx == &pVCpu->cpum.GstCtx); 7412 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 7413 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n", 7414 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysFaultAddr)); 7415 7416 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 7417 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7418 7419 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", 7420 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 7421 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 7422 } 7423 return VBOXSTRICTRC_TODO(rcStrict); 7338 7424 } 7339 7425 7340 7426 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode); 7341 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr);7427 int rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr); 7342 7428 TRPMResetTrap(pVCpu); 7343 7429 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72644 r72655 9730 9730 * by amending the history entry added here. 9731 9731 */ 9732 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_F LAGS_AND_TYPE(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK),9732 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_VMX, pVmxTransient->uExitReason & EMEXIT_F_TYPE_MASK), 9733 9733 UINT64_MAX, uHostTsc); 9734 9734 … … 12260 12260 12261 12261 VBOXSTRICTRC rcStrict; 12262 PCEMEXITREC pExitRec; 12263 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 12264 EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 12265 EMEXITTYPE_CPUID), 12266 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 12262 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 12263 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_CPUID), 12264 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 12267 12265 if (!pExitRec) 12268 12266 { … … 12297 12295 12298 12296 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 12297 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 12299 12298 12300 12299 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 13234 13233 !fIOString 13235 13234 ? !fIOWrite 13236 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 13237 EMEXITTYPE_IO_PORT_READ) 13238 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 13239 EMEXITTYPE_IO_PORT_WRITE) 13235 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_READ) 13236 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_WRITE) 13240 13237 : !fIOWrite 13241 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 13242 EMEXITTYPE_IO_PORT_STR_READ) 13243 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 13244 EMEXITTYPE_IO_PORT_STR_WRITE), 13238 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_READ) 13239 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_IO_PORT_STR_WRITE), 13245 13240 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 13246 13241 if (!pExitRec) … … 13427 13422 int rc2 = hmR0VmxSaveGuestRegsForIemInterpreting(pVCpu); 13428 13423 AssertRCReturn(rc2, rc2); 13429 13424 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead 13425 : fIOWrite ? &pVCpu->hm.s.StatExitIOStringWrite : &pVCpu->hm.s.StatExitIOStringRead); 13430 13426 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 13431 13427 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, … … 13434 13430 13435 13431 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13432 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 13436 13433 13437 13434 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 13731 13728 13732 13729 VBOXSTRICTRC rcStrict; 13733 PCEMEXITREC pExitRec; 13734 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 13735 EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 13736 EMEXITTYPE_MMIO), 13737 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 13730 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 13731 EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM | EMEXIT_F_HM, EMEXITTYPE_MMIO), 13732 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 13738 13733 if (!pExitRec) 13739 13734 { … … 13773 13768 13774 13769 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13770 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 13775 13771 13776 13772 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n",
Note:
See TracChangeset
for help on using the changeset viewer.