VirtualBox

Changeset 47681 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Aug 12, 2013 10:51:55 PM (11 years ago)
Author:
vboxsync
Message:

VMM: I/O breakpoints.

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/DBGFAll.cpp

    r47671 r47681  
    2424#include "DBGFInternal.h"
    2525#include <VBox/vmm/vm.h>
     26#include <VBox/err.h>
    2627#include <iprt/assert.h>
    2728
     
    131132
    132133/**
     134 * Checks if any of the hardware I/O breakpoints are armed.
     135 *
     136 * @returns true if armed, false if not.
     137 * @param   pVM         The cross context VM structure.
     138 */
     139VMM_INT_DECL(bool) DBGFBpIsHwIoArmed(PVM pVM)
     140{
     141    Assert(RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints) == 4);
     142    /** @todo cache this! */
     143    return (   pVM->dbgf.s.aHwBreakpoints[0].u.Reg.fType == X86_DR7_RW_IO
     144            && pVM->dbgf.s.aHwBreakpoints[0].fEnabled
     145            && pVM->dbgf.s.aHwBreakpoints[0].enmType     == DBGFBPTYPE_REG
     146           )
     147        || (   pVM->dbgf.s.aHwBreakpoints[1].u.Reg.fType == X86_DR7_RW_IO
     148            && pVM->dbgf.s.aHwBreakpoints[1].fEnabled
     149            && pVM->dbgf.s.aHwBreakpoints[1].enmType     == DBGFBPTYPE_REG
     150           )
     151        || (   pVM->dbgf.s.aHwBreakpoints[2].u.Reg.fType == X86_DR7_RW_IO
     152            && pVM->dbgf.s.aHwBreakpoints[2].fEnabled
     153            && pVM->dbgf.s.aHwBreakpoints[2].enmType     == DBGFBPTYPE_REG
     154           )
     155        || (   pVM->dbgf.s.aHwBreakpoints[3].u.Reg.fType == X86_DR7_RW_IO
     156            && pVM->dbgf.s.aHwBreakpoints[3].fEnabled
     157            && pVM->dbgf.s.aHwBreakpoints[3].enmType     == DBGFBPTYPE_REG
     158           );
     159}
     160
     161
     162/**
     163 * Checks I/O access for guest or hypervisor breakpoints.
     164 *
     165 * @returns Strict VBox status code
     166 * @retval  VINF_SUCCESS no breakpoint.
     167 * @retval  VINF_EM_DBG_BREAKPOINT hypervisor breakpoint triggered.
     168 * @retval  VINF_EM_RAW_GUEST_TRAP guest breakpoint triggered, DR6 and DR7 have
     169 *          been updated appropriately.
     170 *
     171 * @param   pVM         The cross context VM structure.
     172 * @param   pVCpu       The cross context CPU structure for the calling EMT.
     173 * @param   pCtx        The CPU context for the calling EMT.
     174 * @param   uIoPort     The I/O port being accessed.
     175 * @param   cbValue     The size/width of the access, in bytes.
     176 */
     177VMM_INT_DECL(VBOXSTRICTRC)  DBGFBpCheckIo(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTIOPORT uIoPort, uint8_t cbValue)
     178{
     179    static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 };
     180    uint32_t const uIoPortFirst = uIoPort;
     181    uint32_t const uIoPortLast  = uIoPortFirst + cbValue - 1;
     182
     183
     184    /*
     185     * Check hyper breakpoints first as the VMM debugger has priority over
     186     * the guest.
     187     */
     188    for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
     189    {
     190        if (   pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.fType == X86_DR7_RW_IO
     191            && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled
     192            && pVM->dbgf.s.aHwBreakpoints[iBp].enmType     == DBGFBPTYPE_REG )
     193        {
     194            uint8_t  cbReg      = pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.cb; Assert(RT_IS_POWER_OF_TWO(cbReg));
     195            uint64_t uDrXFirst  = pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr & ~(uint64_t)(cbReg - 1);
     196            uint64_t uDrXLast   = uDrXFirst + cbReg - 1;
     197            if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst)
     198            {
     199                /* (See also DBGFRZTrap01Handler.) */
     200                pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp;
     201                pVCpu->dbgf.s.fSingleSteppingRaw = false;
     202
     203                LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n",
     204                         pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort));
     205                return VINF_EM_DBG_BREAKPOINT;
     206            }
     207        }
     208    }
     209
     210    /*
     211     * Check the guest.
     212     */
     213    uint32_t const uDr7 = pCtx->dr[7];
     214    if (   (uDr7 & X86_DR7_ENABLED_MASK)
     215        && X86_DR7_ANY_RW_IO(uDr7)
     216        && (pCtx->cr4 & X86_CR4_DE) )
     217    {
     218        for (unsigned iBp = 0; iBp < 4; iBp++)
     219        {
     220            if (   (uDr7 & X86_DR7_L_G(iBp))
     221                && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO)
     222            {
     223                /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */
     224                static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 };
     225                uint8_t  cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)];
     226                uint64_t uDrXFirst  = pCtx->dr[iBp] & ~(uint64_t)cbInvAlign;
     227                uint64_t uDrXLast   = uDrXFirst + cbInvAlign;
     228
     229                if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst)
     230                {
     231                    /*
     232                     * Update DR6 and DR7.
     233                     *
     234                     * See "AMD64 Architecture Programmer's Manual Volume 2",
     235                     * chapter 13.1.1.3 for details on DR6 bits.  The basics is
     236                     * that the B0..B3 bits are always cleared while the others
     237                     * must be cleared by software.
     238                     *
     239                     * The following section says the GD bit is always cleared
     240                     * when generating a #DB so the handler can safely access
     241                     * the debug registers.
     242                     */
     243                    pCtx->dr[6] &= ~X86_DR6_B_MASK;
     244                    pCtx->dr[6] |= X86_DR6_B(iBp);
     245                    pCtx->dr[7] &= ~X86_DR7_GD;
     246                    LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n",
     247                             pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort));
     248                    return VINF_EM_RAW_GUEST_TRAP;
     249                }
     250            }
     251        }
     252    }
     253    return VINF_SUCCESS;
     254}
     255
     256
     257/**
    133258 * Returns the single stepping state for a virtual CPU.
    134259 *
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r47663 r47681  
    8787 * XXX Host-CPU hot-plugging?
    8888 */
    89 VMMR0DECL(int) CPUMR0ModuleInit(void)
     89VMMR0_INT_DECL(int) CPUMR0ModuleInit(void)
    9090{
    9191    int rc = VINF_SUCCESS;
     
    100100 * Terminate the module.
    101101 */
    102 VMMR0DECL(int) CPUMR0ModuleTerm(void)
     102VMMR0_INT_DECL(int) CPUMR0ModuleTerm(void)
    103103{
    104104#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
     
    166166 * @param   pVM         Pointer to the VM.
    167167 */
    168 VMMR0DECL(int) CPUMR0Init(PVM pVM)
     168VMMR0_INT_DECL(int) CPUMR0Init(PVM pVM)
    169169{
    170170    LogFlow(("CPUMR0Init: %p\n", pVM));
     
    283283 * @param   pCtx        Pointer to the guest CPU context.
    284284 */
    285 VMMR0DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     285VMMR0_INT_DECL(int) CPUMR0LoadGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    286286{
    287287    Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
     
    446446 * @param   pCtx        Pointer to the guest CPU context.
    447447 */
    448 VMMR0DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     448VMMR0_INT_DECL(int) CPUMR0SaveGuestFPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    449449{
    450450    Assert(pVM->cpum.s.CPUFeatures.edx.u1FXSR);
     
    573573 * @returns true if either guest or hypervisor debug registers were loaded.
    574574 * @param   pVCpu       The cross context CPU structure for the calling EMT.
    575  * @param   fDR6        Whether to include DR6 or not.
     575 * @param   fDr6        Whether to include DR6 or not.
    576576 * @thread  EMT(pVCpu)
    577577 */
    578 VMMR0DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDR6)
     578VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDr6)
    579579{
    580580    bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
     
    582582    /*
    583583     * Do we need to save the guest DRx registered loaded into host registers?
    584      * (DR7 and DR6 (if fDR6 is true) are left to the caller.)
     584     * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
    585585     */
    586586    if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
     
    591591            uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
    592592            HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
    593             if (!fDR6)
     593            if (!fDr6)
    594594                pVCpu->cpum.s.Guest.dr[6] = uDr6;
    595595        }
     
    605605            pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
    606606#endif
    607             if (fDR6)
     607            if (fDr6)
    608608                pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
    609609        }
     
    644644
    645645/**
     646 * Saves the guest DRx state if it resides host registers.
     647 *
     648 * This does NOT clear any use flags, so the host registers remains loaded with
     649 * the guest DRx state upon return.  The purpose is only to make sure the values
     650 * in the CPU context structure is up to date.
     651 *
     652 * @returns true if the host registers contains guest values, false if not.
     653 * @param   pVCpu       The cross context CPU structure for the calling EMT.
     654 * @param   fDr6        Whether to include DR6 or not.
     655 * @thread  EMT(pVCpu)
     656 */
     657VMMR0_INT_DECL(bool) CPUMR0DebugStateMaybeSaveGuest(PVMCPU pVCpu, bool fDr6)
     658{
     659    /*
     660     * Do we need to save the guest DRx registered loaded into host registers?
     661     * (DR7 and DR6 (if fDr6 is true) are left to the caller.)
     662     */
     663    if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
     664    {
     665#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     666        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
     667        {
     668            uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
     669            HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
     670            if (!fDr6)
     671                pVCpu->cpum.s.Guest.dr[6] = uDr6;
     672        }
     673        else
     674#endif
     675        {
     676#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     677            cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
     678#else
     679            pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
     680            pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
     681            pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
     682            pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
     683#endif
     684            if (fDr6)
     685                pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
     686        }
     687        return true;
     688    }
     689    return false;
     690}
     691
     692
     693/**
    646694 * Lazily sync in the debug state.
    647695 *
    648696 * @param   pVCpu       The cross context CPU structure for the calling EMT.
    649  * @param   fDR6        Whether to include DR6 or not.
     697 * @param   fDr6        Whether to include DR6 or not.
    650698 * @thread  EMT(pVCpu)
    651699 */
    652 VMMR0DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDR6)
     700VMMR0_INT_DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDr6)
    653701{
    654702    /*
     
    660708    /*
    661709     * Activate the guest state DR0-3.
    662      * DR7 and DR6 (if fDR6 is true) are left to the caller.
     710     * DR7 and DR6 (if fDr6 is true) are left to the caller.
    663711     */
    664712#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     
    676724        ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
    677725#endif
    678         if (fDR6)
     726        if (fDr6)
    679727            ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
    680728
     
    689737 * @returns VBox status code.
    690738 * @param   pVCpu       The cross context CPU structure for the calling EMT.
    691  * @param   fDR6        Whether to include DR6 or not.
     739 * @param   fDr6        Whether to include DR6 or not.
    692740 * @thread  EMT(pVCpu)
    693741 */
    694 VMMR0DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDR6)
     742VMMR0_INT_DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDr6)
    695743{
    696744    /*
     
    707755    /*
    708756     * Activate the guest state DR0-3.
    709      * DR7 and DR6 (if fDR6 is true) are left to the caller.
     757     * DR7 and DR6 (if fDr6 is true) are left to the caller.
    710758     */
    711759#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     
    723771        ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
    724772#endif
    725         if (fDR6)
     773        if (fDr6)
    726774            ASMSetDR6(X86_DR6_INIT_VAL);
    727775
     
    884932 * @param   idHostCpu   The ID of the current host CPU.
    885933 */
    886 VMMR0DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu)
     934VMMR0_INT_DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu)
    887935{
    888936    pVM->cpum.s.pvApicBase = g_aLApics[RTMpCpuIdToSetIndex(idHostCpu)].pv;
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r47680 r47681  
    90119011    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
    90129012
    9013     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    9014     rc    |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    9015     rc    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    9016     rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);         /* Eflag checks in EMInterpretDisasCurrent(). */
    9017     rc    |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);    /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
    9018     rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);    /* SELM checks in EMInterpretDisasCurrent(). */
     9013    int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9014    rc2    |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     9015    rc2    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     9016    rc2    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);         /* Eflag checks in EMInterpretDisasCurrent(). */
     9017    rc2    |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);    /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
     9018    rc2    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);    /* SELM checks in EMInterpretDisasCurrent(). */
    90199019    /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
    9020     AssertRCReturn(rc, rc);
     9020    AssertRCReturn(rc2, rc2);
    90219021
    90229022    /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
     
    90329032    static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff };  /* AND masks for saving the result (in AL/AX/EAX). */
    90339033
     9034    VBOXSTRICTRC   rcStrict;
    90349035    const uint32_t cbValue  = s_aIOSizes[uIOWidth];
    90359036    const uint32_t cbInstr  = pVmxTransient->cbInstr;
     
    90499050        if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.vmx_basic_info))
    90509051        {
    9051             rc  = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
     9052            rc2  = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
    90529053            /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
    9053             rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    9054             AssertRCReturn(rc, rc);
     9054            rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     9055            AssertRCReturn(rc2, rc2);
    90559056            AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_HMVMX_IPE_3);
    90569057            AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
     
    90599060            if (fIOWrite)
    90609061            {
    9061                 rc = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
    9062                                           pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
    9063                 //if (rc == VINF_IOM_R3_IOPORT_WRITE)
     9062                rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
     9063                                                pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
     9064                //if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
    90649065                //    hmR0SavePendingIOPortWriteStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr,
    90659066                //                                  pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
     
    90709071                                ("%#x (%#llx)\n", pVmxTransient->ExitInstrInfo.StrIo.iSegReg, pVmxTransient->ExitInstrInfo.u),
    90719072                                VERR_HMVMX_IPE_4);
    9072                 rc = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
    9073                 //if (rc == VINF_IOM_R3_IOPORT_READ)
     9073                rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
     9074                //if (rcStrict == VINF_IOM_R3_IOPORT_READ)
    90749075                //    hmR0SavePendingIOPortReadStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr);
    90759076            }
     
    90789079        {
    90799080            /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
    9080             rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    9081             AssertRCReturn(rc, rc);
    9082             rc = IEMExecOne(pVCpu);
     9081            rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     9082            AssertRCReturn(rc2, rc2);
     9083            rcStrict = IEMExecOne(pVCpu);
    90839084        }
    90849085        /** @todo IEM needs to be setting these flags somehow. */
     
    90889089#else
    90899090        PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
    9090         rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
    9091         if (RT_SUCCESS(rc))
     9091        rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
     9092        if (RT_SUCCESS(rcStrict))
    90929093        {
    90939094            if (fIOWrite)
    90949095            {
    9095                 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
    9096                                                       (DISCPUMODE)pDis->uAddrMode, cbValue);
    9097                 rc = VBOXSTRICTRC_VAL(rc2);
     9096                rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
     9097                                              (DISCPUMODE)pDis->uAddrMode, cbValue);
    90989098                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
    90999099            }
    91009100            else
    91019101            {
    9102                 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
    9103                                                      (DISCPUMODE)pDis->uAddrMode, cbValue);
    9104                 rc = VBOXSTRICTRC_VAL(rc2);
     9102                rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
     9103                                             (DISCPUMODE)pDis->uAddrMode, cbValue);
    91059104                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
    91069105            }
     
    91089107        else
    91099108        {
    9110             AssertMsg(rc == VERR_EM_INTERPRETER, ("rc=%Rrc RIP %#RX64\n", rc, pMixedCtx->rip));
    9111             rc = VINF_EM_RAW_EMULATE_INSTR;
     9109            AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
     9110            rcStrict = VINF_EM_RAW_EMULATE_INSTR;
    91129111        }
    91139112#endif
     
    91239122        if (fIOWrite)
    91249123        {
    9125             VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
    9126             rc = VBOXSTRICTRC_VAL(rc2);
    9127             if (rc == VINF_IOM_R3_IOPORT_WRITE)
     9124            rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
     9125            if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
    91289126                HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
    91299127            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
     
    91329130        {
    91339131            uint32_t u32Result = 0;
    9134             VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
    9135             rc = VBOXSTRICTRC_VAL(rc2);
    9136             if (IOM_SUCCESS(rc))
     9132            rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
     9133            if (IOM_SUCCESS(rcStrict))
    91379134            {
    91389135                /* Save result of I/O IN instr. in AL/AX/EAX. */
    91399136                pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
    91409137            }
    9141             else if (rc == VINF_IOM_R3_IOPORT_READ)
     9138            else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
    91429139                HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
    91439140            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
     
    91459142    }
    91469143
    9147     if (IOM_SUCCESS(rc))
     9144    if (IOM_SUCCESS(rcStrict))
    91489145    {
    91499146        if (!fUpdateRipAlready)
     
    91529149            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    91539150        }
    9154         if (RT_LIKELY(rc == VINF_SUCCESS))
    9155         {
    9156             /*
    9157              * If any I/O breakpoints are armed, then we should check if a debug trap needs to be generated.
    9158              * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
    9159              */
    9160             /** @todo We're not honoring I/O BPs if informational status code is returned.
    9161              *        We're also ignoring our own debugger's attempt at using I/O
    9162              *        breakpoints.  The whole host & guest debugger stuff needs to be
    9163              *        looked over at some point.  For now, it's just best effort. */
    9164             rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
    9165             AssertRCReturn(rc, rc);
    9166             uint32_t const uDr7 = pMixedCtx->dr[7];
    9167             if (   (uDr7 & X86_DR7_ENABLED_MASK)
     9151
     9152        /*
     9153         * If any I/O breakpoints are armed, we need to check if one triggered
     9154         * and take appropriate action.
     9155         * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
     9156         */
     9157        /** @todo We're not honoring I/O BPs if informational status code is returned.
     9158         *        We're also ignoring our own debugger's attempt at using I/O
     9159         *        breakpoints.  The whole host & guest debugger stuff needs to be
     9160         *        looked over at some point.  For now, it's just best effort. */
     9161        int rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
     9162        AssertRCReturn(rc2, rc2);
     9163
     9164        uint32_t const uDr7 = pMixedCtx->dr[7];
     9165        if (   (   (uDr7 & X86_DR7_ENABLED_MASK)
    91689166                && X86_DR7_ANY_RW_IO(uDr7)
    9169                 && (pMixedCtx->cr4 & X86_CR4_DE) )
     9167                && (pMixedCtx->cr4 & X86_CR4_DE))
     9168            || DBGFBpIsHwIoArmed(pVM))
     9169        {
     9170            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
     9171            bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
     9172
     9173            VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
     9174            if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
    91709175            {
    9171                 /** @todo We're a little late here if we're doing string I/O, as we're supposed
    9172                  *        to break after the each repetition.  Not sooo important, just for a
    9173                  *        rainy day. (Should probably refactor some of this code; after the uDr7
    9174                  *        detection let someone else handle it.) */
    9175                 /** @todo The AMD is mumbling something that sounds like cbValue == cbBp.  The
    9176                  *        Intel manual describes it differently, data and I/O breakpoints are to
    9177                  *        be matched in the same way, probably.  Bochs does it that way. We've
    9178                  *        implemented it that way too, but it would be worth having a
    9179                  *        bootsector testcase for asserting the correct behavior (as well as
    9180                  *        correctness of this code). */
    9181                 /** @todo r=bird: DR0-3 are normally in host registers when the guest is using
    9182                  *        them, so we're testing against potentially stale values here! */
    9183                 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
    9184                 uint32_t uIOPortLast = uIOPort + cbValue - 1;
    9185                 for (unsigned iBp = 0; iBp < 4; iBp++)
    9186                 {
    9187                     if (   (uDr7 & X86_DR7_L_G(iBp))
    9188                         && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO)
    9189                     {
    9190                         /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */
    9191                         static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 };
    9192                         uint8_t  cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)];
    9193                         uint64_t uDrXFirst  = pMixedCtx->dr[iBp] & ~(uint64_t)cbInvAlign;
    9194                         uint64_t uDrXLast   = uDrXFirst + cbInvAlign;
    9195 
    9196                         if (uDrXFirst <= uIOPortLast && uDrXLast >= uIOPort)
    9197                         {
    9198                             Assert(CPUMIsGuestDebugStateActive(pVCpu));
    9199                             uint64_t uDR6 = ASMGetDR6();
    9200 
    9201                             /* Clear all breakpoint status flags and set the one we just hit. */
    9202                             uDR6 &= ~X86_DR6_B_MASK;
    9203                             uDR6 |= X86_DR6_B(iBp);
    9204 
    9205                             /*
    9206                              * Note: AMD64 Architecture Programmer's Manual 13.1:
    9207                              * Bits 15:13 of the DR6 register is never cleared by the processor and must
    9208                              * be cleared by software after the contents have been read.
    9209                              */
    9210                             ASMSetDR6(uDR6);
    9211 
    9212                             /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
    9213                             pMixedCtx->dr[7] &= ~X86_DR7_GD;
    9214 
    9215                             /* Paranoia. */
    9216                             pMixedCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
    9217                             pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
    9218 
    9219                             /* Resync DR7 */
    9220                             /** @todo probably cheaper to just reload DR7, nothing else needs changing. */
    9221                             pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    9222 
    9223                             /* Set #DB to be injected into the VM and continue guest execution. */
    9224                             hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
    9225                             break;
    9226                         }
    9227                     }
    9228                 }
     9176                /* Raise #DB. */
     9177                if (fIsGuestDbgActive)
     9178                    ASMSetDR6(pMixedCtx->dr[6]);
     9179                if (pMixedCtx->dr[7] != uDr7)
     9180                    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     9181
     9182                hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
    92299183            }
     9184            /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
     9185            else if (   rcStrict2 != VINF_SUCCESS
     9186                     && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
     9187                rcStrict = rcStrict2;
    92309188        }
    92319189    }
    92329190
    92339191#ifdef DEBUG
    9234     if (rc == VINF_IOM_R3_IOPORT_READ)
     9192    if (rcStrict == VINF_IOM_R3_IOPORT_READ)
    92359193        Assert(!fIOWrite);
    9236     else if (rc == VINF_IOM_R3_IOPORT_WRITE)
     9194    else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
    92379195        Assert(fIOWrite);
    92389196    else
    92399197    {
    9240         AssertMsg(   RT_FAILURE(rc)
    9241                   || rc == VINF_SUCCESS
    9242                   || rc == VINF_EM_RAW_EMULATE_INSTR
    9243                   || rc == VINF_EM_RAW_GUEST_TRAP
    9244                   || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc));
     9198        /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
     9199         *        statuses, that the VMM device and some others may return. See
     9200         *        IOM_SUCCESS() for guidance. */
     9201        AssertMsg(   RT_FAILURE(rcStrict)
     9202                  || rcStrict == VINF_SUCCESS
     9203                  || rcStrict == VINF_EM_RAW_EMULATE_INSTR
     9204                  || rcStrict == VINF_EM_DBG_BREAKPOINT
     9205                  || rcStrict == VINF_EM_RAW_GUEST_TRAP
     9206                  || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    92459207    }
    92469208#endif
    92479209
    92489210    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
    9249     return rc;
     9211    return VBOXSTRICTRC_TODO(rcStrict);
    92509212}
    92519213
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r47655 r47681  
    28062806    }
    28072807
     2808    if (IOM_SUCCESS(rcStrict))
     2809    {
     2810        /*
     2811         * Check for I/O breakpoints.
     2812         */
     2813        uint32_t const uDr7 = pCtx->dr[7];
     2814        if (   (   (uDr7 & X86_DR7_ENABLED_MASK)
     2815                && X86_DR7_ANY_RW_IO(uDr7)
     2816                && (pCtx->cr4 & X86_CR4_DE))
     2817            || DBGFBpIsHwIoArmed(pVM))
     2818        {
     2819            VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pCtx, pVCpu->hm.s.PendingIO.s.Port.uPort,
     2820                                                   pVCpu->hm.s.PendingIO.s.Port.cbSize);
     2821            if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
     2822                rcStrict2 = TRPMAssertTrap(pVCpu, X86_XCPT_DB, TRPM_TRAP);
     2823            /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
     2824            else if (rcStrict2 != VINF_SUCCESS && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
     2825                rcStrict = rcStrict2;
     2826        }
     2827    }
    28082828    return rcStrict;
    28092829}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette