Changeset 47681 in vbox for trunk/src/VBox
- Timestamp:
- Aug 12, 2013 10:51:55 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/DBGFAll.cpp
r47671 r47681 24 24 #include "DBGFInternal.h" 25 25 #include <VBox/vmm/vm.h> 26 #include <VBox/err.h> 26 27 #include <iprt/assert.h> 27 28 … … 131 132 132 133 /** 134 * Checks if any of the hardware I/O breakpoints are armed. 135 * 136 * @returns true if armed, false if not. 137 * @param pVM The cross context VM structure. 138 */ 139 VMM_INT_DECL(bool) DBGFBpIsHwIoArmed(PVM pVM) 140 { 141 Assert(RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints) == 4); 142 /** @todo cache this! */ 143 return ( pVM->dbgf.s.aHwBreakpoints[0].u.Reg.fType == X86_DR7_RW_IO 144 && pVM->dbgf.s.aHwBreakpoints[0].fEnabled 145 && pVM->dbgf.s.aHwBreakpoints[0].enmType == DBGFBPTYPE_REG 146 ) 147 || ( pVM->dbgf.s.aHwBreakpoints[1].u.Reg.fType == X86_DR7_RW_IO 148 && pVM->dbgf.s.aHwBreakpoints[1].fEnabled 149 && pVM->dbgf.s.aHwBreakpoints[1].enmType == DBGFBPTYPE_REG 150 ) 151 || ( pVM->dbgf.s.aHwBreakpoints[2].u.Reg.fType == X86_DR7_RW_IO 152 && pVM->dbgf.s.aHwBreakpoints[2].fEnabled 153 && pVM->dbgf.s.aHwBreakpoints[2].enmType == DBGFBPTYPE_REG 154 ) 155 || ( pVM->dbgf.s.aHwBreakpoints[3].u.Reg.fType == X86_DR7_RW_IO 156 && pVM->dbgf.s.aHwBreakpoints[3].fEnabled 157 && pVM->dbgf.s.aHwBreakpoints[3].enmType == DBGFBPTYPE_REG 158 ); 159 } 160 161 162 /** 163 * Checks I/O access for guest or hypervisor breakpoints. 164 * 165 * @returns Strict VBox status code 166 * @retval VINF_SUCCESS no breakpoint. 167 * @retval VINF_EM_DBG_BREAKPOINT hypervisor breakpoint triggered. 168 * @retval VINF_EM_RAW_GUEST_TRAP guest breakpoint triggered, DR6 and DR7 have 169 * been updated appropriately. 170 * 171 * @param pVM The cross context VM structure. 172 * @param pVCpu The cross context CPU structure for the calling EMT. 173 * @param pCtx The CPU context for the calling EMT. 174 * @param uIoPort The I/O port being accessed. 175 * @param cbValue The size/width of the access, in bytes. 176 */ 177 VMM_INT_DECL(VBOXSTRICTRC) DBGFBpCheckIo(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTIOPORT uIoPort, uint8_t cbValue) 178 { 179 static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 }; 180 uint32_t const uIoPortFirst = uIoPort; 181 uint32_t const uIoPortLast = uIoPortFirst + cbValue - 1; 182 183 184 /* 185 * Check hyper breakpoints first as the VMM debugger has priority over 186 * the guest. 187 */ 188 for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++) 189 { 190 if ( pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.fType == X86_DR7_RW_IO 191 && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled 192 && pVM->dbgf.s.aHwBreakpoints[iBp].enmType == DBGFBPTYPE_REG ) 193 { 194 uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.cb; Assert(RT_IS_POWER_OF_TWO(cbReg)); 195 uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr & ~(uint64_t)(cbReg - 1); 196 uint64_t uDrXLast = uDrXFirst + cbReg - 1; 197 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) 198 { 199 /* (See also DBGFRZTrap01Handler.) */ 200 pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp; 201 pVCpu->dbgf.s.fSingleSteppingRaw = false; 202 203 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", 204 pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); 205 return VINF_EM_DBG_BREAKPOINT; 206 } 207 } 208 } 209 210 /* 211 * Check the guest. 212 */ 213 uint32_t const uDr7 = pCtx->dr[7]; 214 if ( (uDr7 & X86_DR7_ENABLED_MASK) 215 && X86_DR7_ANY_RW_IO(uDr7) 216 && (pCtx->cr4 & X86_CR4_DE) ) 217 { 218 for (unsigned iBp = 0; iBp < 4; iBp++) 219 { 220 if ( (uDr7 & X86_DR7_L_G(iBp)) 221 && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO) 222 { 223 /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */ 224 static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 }; 225 uint8_t cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)]; 226 uint64_t uDrXFirst = pCtx->dr[iBp] & ~(uint64_t)cbInvAlign; 227 uint64_t uDrXLast = uDrXFirst + cbInvAlign; 228 229 if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) 230 { 231 /* 232 * Update DR6 and DR7. 233 * 234 * See "AMD64 Architecture Programmer's Manual Volume 2", 235 * chapter 13.1.1.3 for details on DR6 bits. The basics is 236 * that the B0..B3 bits are always cleared while the others 237 * must be cleared by software. 238 * 239 * The following section says the GD bit is always cleared 240 * when generating a #DB so the handler can safely access 241 * the debug registers. 242 */ 243 pCtx->dr[6] &= ~X86_DR6_B_MASK; 244 pCtx->dr[6] |= X86_DR6_B(iBp); 245 pCtx->dr[7] &= ~X86_DR7_GD; 246 LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", 247 pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); 248 return VINF_EM_RAW_GUEST_TRAP; 249 } 250 } 251 } 252 } 253 return VINF_SUCCESS; 254 } 255 256 257 /** 133 258 * Returns the single stepping state for a virtual CPU. 134 259 * -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r47663 r47681 87 87 * XXX Host-CPU hot-plugging? 88 88 */ 89 VMMR0 DECL(int) CPUMR0ModuleInit(void)89 VMMR0_INT_DECL(int) CPUMR0ModuleInit(void) 90 90 { 91 91 int rc = VINF_SUCCESS; … … 100 100 * Terminate the module. 101 101 */ 102 VMMR0 DECL(int) CPUMR0ModuleTerm(void)102 VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void) 103 103 { 104 104 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI … … 166 166 * @param pVM Pointer to the VM. 167 167 */ 168 VMMR0 DECL(int) CPUMR0Init(PVM pVM)168 VMMR0_INT_DECL(int) CPUMR0Init(PVM pVM) 169 169 { 170 170 LogFlow(("CPUMR0Init: %p\n", pVM)); … … 283 283 * @param pCtx Pointer to the guest CPU context. 284 284 */ 285 VMMR0 DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)285 VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 286 286 { 287 287 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR); … … 446 446 * @param pCtx Pointer to the guest CPU context. 447 447 */ 448 VMMR0 DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)448 VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 449 449 { 450 450 Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR); … … 573 573 * @returns true if either guest or hypervisor debug registers were loaded. 574 574 * @param pVCpu The cross context CPU structure for the calling EMT. 575 * @param fD R6 Whether to include DR6 or not.575 * @param fDr6 Whether to include DR6 or not. 576 576 * @thread EMT(pVCpu) 577 577 */ 578 VMMR0 DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDR6)578 VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6) 579 579 { 580 580 bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER)); … … 582 582 /* 583 583 * Do we need to save the guest DRx registered loaded into host registers? 584 * (DR7 and DR6 (if fD R6 is true) are left to the caller.)584 * (DR7 and DR6 (if fDr6 is true) are left to the caller.) 585 585 */ 586 586 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST) … … 591 591 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6]; 592 592 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest); 593 if (!fD R6)593 if (!fDr6) 594 594 pVCpu->cpum.s.Guest.dr[6] = uDr6; 595 595 } … … 605 605 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3(); 606 606 #endif 607 if (fD R6)607 if (fDr6) 608 608 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6(); 609 609 } … … 644 644 645 645 /** 646 * Saves the guest DRx state if it resides host registers. 647 * 648 * This does NOT clear any use flags, so the host registers remains loaded with 649 * the guest DRx state upon return. The purpose is only to make sure the values 650 * in the CPU context structure is up to date. 651 * 652 * @returns true if the host registers contains guest values, false if not. 653 * @param pVCpu The cross context CPU structure for the calling EMT. 654 * @param fDr6 Whether to include DR6 or not. 655 * @thread EMT(pVCpu) 656 */ 657 VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6) 658 { 659 /* 660 * Do we need to save the guest DRx registered loaded into host registers? 661 * (DR7 and DR6 (if fDr6 is true) are left to the caller.) 662 */ 663 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST) 664 { 665 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 666 if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest)) 667 { 668 uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6]; 669 HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest); 670 if (!fDr6) 671 pVCpu->cpum.s.Guest.dr[6] = uDr6; 672 } 673 else 674 #endif 675 { 676 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 677 cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]); 678 #else 679 pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0(); 680 pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1(); 681 pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2(); 682 pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3(); 683 #endif 684 if (fDr6) 685 pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6(); 686 } 687 return true; 688 } 689 return false; 690 } 691 692 693 /** 646 694 * Lazily sync in the debug state. 647 695 * 648 696 * @param pVCpu The cross context CPU structure for the calling EMT. 649 * @param fD R6 Whether to include DR6 or not.697 * @param fDr6 Whether to include DR6 or not. 650 698 * @thread EMT(pVCpu) 651 699 */ 652 VMMR0 DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDR6)700 VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6) 653 701 { 654 702 /* … … 660 708 /* 661 709 * Activate the guest state DR0-3. 662 * DR7 and DR6 (if fD R6 is true) are left to the caller.710 * DR7 and DR6 (if fDr6 is true) are left to the caller. 663 711 */ 664 712 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 676 724 ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]); 677 725 #endif 678 if (fD R6)726 if (fDr6) 679 727 ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]); 680 728 … … 689 737 * @returns VBox status code. 690 738 * @param pVCpu The cross context CPU structure for the calling EMT. 691 * @param fD R6 Whether to include DR6 or not.739 * @param fDr6 Whether to include DR6 or not. 692 740 * @thread EMT(pVCpu) 693 741 */ 694 VMMR0 DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDR6)742 VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6) 695 743 { 696 744 /* … … 707 755 /* 708 756 * Activate the guest state DR0-3. 709 * DR7 and DR6 (if fD R6 is true) are left to the caller.757 * DR7 and DR6 (if fDr6 is true) are left to the caller. 710 758 */ 711 759 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 723 771 ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]); 724 772 #endif 725 if (fD R6)773 if (fDr6) 726 774 ASMSetDR6(X86_DR6_INIT_VAL); 727 775 … … 884 932 * @param idHostCpu The ID of the current host CPU. 885 933 */ 886 VMMR0 DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu)934 VMMR0_INT_DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu) 887 935 { 888 936 pVM->cpum.s.pvApicBase = g_aLApics[RTMpCpuIdToSetIndex(idHostCpu)].pv; -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47680 r47681 9011 9011 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1); 9012 9012 9013 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);9014 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);9015 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);9016 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */9017 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */9018 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */9013 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 9014 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 9015 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 9016 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */ 9017 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */ 9018 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */ 9019 9019 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 9020 AssertRCReturn(rc , rc);9020 AssertRCReturn(rc2, rc2); 9021 9021 9022 9022 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ … … 9032 9032 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */ 9033 9033 9034 VBOXSTRICTRC rcStrict; 9034 9035 const uint32_t cbValue = s_aIOSizes[uIOWidth]; 9035 9036 const uint32_t cbInstr = pVmxTransient->cbInstr; … … 9049 9050 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.vmx_basic_info)) 9050 9051 { 9051 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);9052 rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient); 9052 9053 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */ 9053 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);9054 AssertRCReturn(rc , rc);9054 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 9055 AssertRCReturn(rc2, rc2); 9055 9056 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_HMVMX_IPE_3); 9056 9057 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); … … 9059 9060 if (fIOWrite) 9060 9061 { 9061 rc = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,9062 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);9063 //if (rc == VINF_IOM_R3_IOPORT_WRITE)9062 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, 9063 pVmxTransient->ExitInstrInfo.StrIo.iSegReg); 9064 //if (rcStrict == VINF_IOM_R3_IOPORT_WRITE) 9064 9065 // hmR0SavePendingIOPortWriteStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr, 9065 9066 // pVmxTransient->ExitInstrInfo.StrIo.iSegReg); … … 9070 9071 ("%#x (%#llx)\n", pVmxTransient->ExitInstrInfo.StrIo.iSegReg, pVmxTransient->ExitInstrInfo.u), 9071 9072 VERR_HMVMX_IPE_4); 9072 rc = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);9073 //if (rc == VINF_IOM_R3_IOPORT_READ)9073 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr); 9074 //if (rcStrict == VINF_IOM_R3_IOPORT_READ) 9074 9075 // hmR0SavePendingIOPortReadStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr); 9075 9076 } … … 9078 9079 { 9079 9080 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */ 9080 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);9081 AssertRCReturn(rc , rc);9082 rc = IEMExecOne(pVCpu);9081 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 9082 AssertRCReturn(rc2, rc2); 9083 rcStrict = IEMExecOne(pVCpu); 9083 9084 } 9084 9085 /** @todo IEM needs to be setting these flags somehow. */ … … 9088 9089 #else 9089 9090 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 9090 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);9091 if (RT_SUCCESS(rc ))9091 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL); 9092 if (RT_SUCCESS(rcStrict)) 9092 9093 { 9093 9094 if (fIOWrite) 9094 9095 { 9095 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix, 9096 (DISCPUMODE)pDis->uAddrMode, cbValue); 9097 rc = VBOXSTRICTRC_VAL(rc2); 9096 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix, 9097 (DISCPUMODE)pDis->uAddrMode, cbValue); 9098 9098 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 9099 9099 } 9100 9100 else 9101 9101 { 9102 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix, 9103 (DISCPUMODE)pDis->uAddrMode, cbValue); 9104 rc = VBOXSTRICTRC_VAL(rc2); 9102 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix, 9103 (DISCPUMODE)pDis->uAddrMode, cbValue); 9105 9104 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 9106 9105 } … … 9108 9107 else 9109 9108 { 9110 AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));9111 rc = VINF_EM_RAW_EMULATE_INSTR;9109 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip)); 9110 rcStrict = VINF_EM_RAW_EMULATE_INSTR; 9112 9111 } 9113 9112 #endif … … 9123 9122 if (fIOWrite) 9124 9123 { 9125 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue); 9126 rc = VBOXSTRICTRC_VAL(rc2); 9127 if (rc == VINF_IOM_R3_IOPORT_WRITE) 9124 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue); 9125 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE) 9128 9126 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue); 9129 9127 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); … … 9132 9130 { 9133 9131 uint32_t u32Result = 0; 9134 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue); 9135 rc = VBOXSTRICTRC_VAL(rc2); 9136 if (IOM_SUCCESS(rc)) 9132 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue); 9133 if (IOM_SUCCESS(rcStrict)) 9137 9134 { 9138 9135 /* Save result of I/O IN instr. in AL/AX/EAX. */ 9139 9136 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal); 9140 9137 } 9141 else if (rc == VINF_IOM_R3_IOPORT_READ)9138 else if (rcStrict == VINF_IOM_R3_IOPORT_READ) 9142 9139 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue); 9143 9140 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); … … 9145 9142 } 9146 9143 9147 if (IOM_SUCCESS(rc ))9144 if (IOM_SUCCESS(rcStrict)) 9148 9145 { 9149 9146 if (!fUpdateRipAlready) … … 9152 9149 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP; 9153 9150 } 9154 if (RT_LIKELY(rc == VINF_SUCCESS)) 9155 { 9156 /* 9157 * If any I/O breakpoints are armed, then we should check if a debug trap needs to be generated. 9158 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 9159 */ 9160 /** @todo We're not honoring I/O BPs if informational status code is returned. 9161 * We're also ignoring our own debugger's attempt at using I/O 9162 * breakpoints. The whole host & guest debugger stuff needs to be 9163 * looked over at some point. For now, it's just best effort. */ 9164 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 9165 AssertRCReturn(rc, rc); 9166 uint32_t const uDr7 = pMixedCtx->dr[7]; 9167 if ( (uDr7 & X86_DR7_ENABLED_MASK) 9151 9152 /* 9153 * If any I/O breakpoints are armed, we need to check if one triggered 9154 * and take appropriate action. 9155 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 9156 */ 9157 /** @todo We're not honoring I/O BPs if informational status code is returned. 9158 * We're also ignoring our own debugger's attempt at using I/O 9159 * breakpoints. The whole host & guest debugger stuff needs to be 9160 * looked over at some point. For now, it's just best effort. */ 9161 int rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 9162 AssertRCReturn(rc2, rc2); 9163 9164 uint32_t const uDr7 = pMixedCtx->dr[7]; 9165 if ( ( (uDr7 & X86_DR7_ENABLED_MASK) 9168 9166 && X86_DR7_ANY_RW_IO(uDr7) 9169 && (pMixedCtx->cr4 & X86_CR4_DE) ) 9167 && (pMixedCtx->cr4 & X86_CR4_DE)) 9168 || DBGFBpIsHwIoArmed(pVM)) 9169 { 9170 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 9171 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/); 9172 9173 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue); 9174 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 9170 9175 { 9171 /** @todo We're a little late here if we're doing string I/O, as we're supposed 9172 * to break after the each repetition. Not sooo important, just for a 9173 * rainy day. (Should probably refactor some of this code; after the uDr7 9174 * detection let someone else handle it.) */ 9175 /** @todo The AMD is mumbling something that sounds like cbValue == cbBp. The 9176 * Intel manual describes it differently, data and I/O breakpoints are to 9177 * be matched in the same way, probably. Bochs does it that way. We've 9178 * implemented it that way too, but it would be worth having a 9179 * bootsector testcase for asserting the correct behavior (as well as 9180 * correctness of this code). */ 9181 /** @todo r=bird: DR0-3 are normally in host registers when the guest is using 9182 * them, so we're testing against potentially stale values here! */ 9183 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck); 9184 uint32_t uIOPortLast = uIOPort + cbValue - 1; 9185 for (unsigned iBp = 0; iBp < 4; iBp++) 9186 { 9187 if ( (uDr7 & X86_DR7_L_G(iBp)) 9188 && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO) 9189 { 9190 /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */ 9191 static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 }; 9192 uint8_t cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)]; 9193 uint64_t uDrXFirst = pMixedCtx->dr[iBp] & ~(uint64_t)cbInvAlign; 9194 uint64_t uDrXLast = uDrXFirst + cbInvAlign; 9195 9196 if (uDrXFirst <= uIOPortLast && uDrXLast >= uIOPort) 9197 { 9198 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 9199 uint64_t uDR6 = ASMGetDR6(); 9200 9201 /* Clear all breakpoint status flags and set the one we just hit. */ 9202 uDR6 &= ~X86_DR6_B_MASK; 9203 uDR6 |= X86_DR6_B(iBp); 9204 9205 /* 9206 * Note: AMD64 Architecture Programmer's Manual 13.1: 9207 * Bits 15:13 of the DR6 register is never cleared by the processor and must 9208 * be cleared by software after the contents have been read. 9209 */ 9210 ASMSetDR6(uDR6); 9211 9212 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */ 9213 pMixedCtx->dr[7] &= ~X86_DR7_GD; 9214 9215 /* Paranoia. */ 9216 pMixedCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK); 9217 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK; 9218 9219 /* Resync DR7 */ 9220 /** @todo probably cheaper to just reload DR7, nothing else needs changing. */ 9221 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 9222 9223 /* Set #DB to be injected into the VM and continue guest execution. */ 9224 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); 9225 break; 9226 } 9227 } 9228 } 9176 /* Raise #DB. */ 9177 if (fIsGuestDbgActive) 9178 ASMSetDR6(pMixedCtx->dr[6]); 9179 if (pMixedCtx->dr[7] != uDr7) 9180 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 9181 9182 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); 9229 9183 } 9184 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */ 9185 else if ( rcStrict2 != VINF_SUCCESS 9186 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) 9187 rcStrict = rcStrict2; 9230 9188 } 9231 9189 } 9232 9190 9233 9191 #ifdef DEBUG 9234 if (rc == VINF_IOM_R3_IOPORT_READ)9192 if (rcStrict == VINF_IOM_R3_IOPORT_READ) 9235 9193 Assert(!fIOWrite); 9236 else if (rc == VINF_IOM_R3_IOPORT_WRITE)9194 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE) 9237 9195 Assert(fIOWrite); 9238 9196 else 9239 9197 { 9240 AssertMsg( RT_FAILURE(rc) 9241 || rc == VINF_SUCCESS 9242 || rc == VINF_EM_RAW_EMULATE_INSTR 9243 || rc == VINF_EM_RAW_GUEST_TRAP 9244 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc)); 9198 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST 9199 * statuses, that the VMM device and some others may return. See 9200 * IOM_SUCCESS() for guidance. */ 9201 AssertMsg( RT_FAILURE(rcStrict) 9202 || rcStrict == VINF_SUCCESS 9203 || rcStrict == VINF_EM_RAW_EMULATE_INSTR 9204 || rcStrict == VINF_EM_DBG_BREAKPOINT 9205 || rcStrict == VINF_EM_RAW_GUEST_TRAP 9206 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 9245 9207 } 9246 9208 #endif 9247 9209 9248 9210 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1); 9249 return rc;9211 return VBOXSTRICTRC_TODO(rcStrict); 9250 9212 } 9251 9213 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r47655 r47681 2806 2806 } 2807 2807 2808 if (IOM_SUCCESS(rcStrict)) 2809 { 2810 /* 2811 * Check for I/O breakpoints. 2812 */ 2813 uint32_t const uDr7 = pCtx->dr[7]; 2814 if ( ( (uDr7 & X86_DR7_ENABLED_MASK) 2815 && X86_DR7_ANY_RW_IO(uDr7) 2816 && (pCtx->cr4 & X86_CR4_DE)) 2817 || DBGFBpIsHwIoArmed(pVM)) 2818 { 2819 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, pVCpu->hm.s.PendingIO.s.Port.uPort, 2820 pVCpu->hm.s.PendingIO.s.Port.cbSize); 2821 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP) 2822 rcStrict2 = TRPMAssertTrap(pVCpu, X86_XCPT_DB, TRPM_TRAP); 2823 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */ 2824 else if (rcStrict2 != VINF_SUCCESS && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict)) 2825 rcStrict = rcStrict2; 2826 } 2827 } 2808 2828 return rcStrict; 2809 2829 }
Note:
See TracChangeset
for help on using the changeset viewer.