VirtualBox

Changeset 105072 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jun 28, 2024 12:03:20 PM (7 months ago)
Author:
vboxsync
Message:

VMM/IEM,DBGF,bs3-cpu-weird-1: Early data breakpoint support, mostly untested except for the ring transition tests in bs3-cpu-weird-1. bugref:10715

Location:
trunk/src/VBox/VMM/VMMAll
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/DBGFAll.cpp

    r99051 r105072  
    5252AssertCompileMembersSameSizeAndOffset(VM, dbgf.s.cSelectedEvents,       VM, dbgf.ro.cSelectedEvents);
    5353
    54 
    5554#if !defined(VBOX_VMM_TARGET_ARMV8)
     55
     56
    5657/**
    5758 * Gets the hardware breakpoint configuration as DR7.
     
    184185 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    185186 * @param   GCPtrPC     The unsegmented PC address.
    186  */
    187 VMM_INT_DECL(VBOXSTRICTRC)  DBGFBpCheckInstruction(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrPC)
     187 * @param   fCheckGuest Whether to include guest breakpoints or not.
     188 */
     189VMM_INT_DECL(VBOXSTRICTRC)  DBGFBpCheckInstruction(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrPC, bool fCheckGuest)
    188190{
    189191    CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
     
    218220     * Check the guest.
    219221     */
     222    if (fCheckGuest)
     223    {
     224        uint32_t const fDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
     225        if (X86_DR7_ANY_EO_ENABLED(fDr7) && !pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
     226        {
     227            /*
     228             * The CPU (10980XE & 6700K at least) will set the DR6.BPx bits for any
     229             * DRx that matches the current PC and is configured as an execution
     230             * breakpoint (RWx=EO, LENx=1byte).  They don't have to be enabled,
     231             * however one that is enabled must match for the #DB to be raised and
     232             * DR6 to be modified, of course.
     233             */
     234            CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
     235            uint32_t fMatched = 0;
     236            uint32_t fEnabled = 0;
     237            for (unsigned iBp = 0, uBpMask = 1; iBp < 4; iBp++, uBpMask <<= 1)
     238                if (X86_DR7_IS_EO_CFG(fDr7, iBp))
     239                {
     240                    if (fDr7 & X86_DR7_L_G(iBp))
     241                        fEnabled |= uBpMask;
     242                    if (pVCpu->cpum.GstCtx.dr[iBp] == GCPtrPC)
     243                        fMatched |= uBpMask;
     244                }
     245            if (!(fEnabled & fMatched))
     246            { /*likely*/ }
     247            else
     248            {
     249                /*
     250                 * Update DR6 and DR7.
     251                 *
     252                 * See "AMD64 Architecture Programmer's Manual Volume 2", chapter
     253                 * 13.1.1.3 for details on DR6 bits.  The basics is that the B0..B3
     254                 * bits are always cleared while the others must be cleared by software.
     255                 *
     256                 * The following sub chapters says the GD bit is always cleared when
     257                 * generating a #DB so the handler can safely access the debug registers.
     258                 */
     259                CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_DR6);
     260                pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
     261                if (pVM->cpum.ro.GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INTEL)
     262                    pVCpu->cpum.GstCtx.dr[6] |= fMatched & fEnabled;
     263                else
     264                    pVCpu->cpum.GstCtx.dr[6] |= fMatched;    /* Intel: All matched, regardless of whether they're enabled or not  */
     265                pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
     266                LogFlow(("DBGFBpCheckInstruction: hit hw breakpoints %#x at %04x:%RGv (%RGv)\n",
     267                         fMatched, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPtrPC));
     268                return VINF_EM_RAW_GUEST_TRAP;
     269            }
     270        }
     271    }
     272    return VINF_SUCCESS;
     273}
     274
     275
     276/**
     277 * Common worker for DBGFBpCheckDataRead and DBGFBpCheckDataWrite.
     278 */
     279template<bool const a_fRead>
     280DECL_FORCE_INLINE(uint32_t) dbgfBpCheckData(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrAccess, uint32_t cbAccess, bool fSysAccess)
     281{
     282    AssertCompile((X86_DR7_RW_RW & 1) && (X86_DR7_RW_WO & 1));
     283    CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_DR7);
     284
     285    uint32_t      fRet           = 0;
     286    RTGCPTR const GCPtrAccessPfn = GCPtrAccess >> GUEST_PAGE_SHIFT;
     287    Assert(((GCPtrAccess + cbAccess - 1) >> GUEST_PAGE_SHIFT) == GCPtrAccessPfn); /* No page crossing expected here! */
     288
     289    /*
     290     * Check hyper breakpoints first as the VMM debugger has priority over
     291     * the guest.
     292     */
     293    if (pVM->dbgf.s.cEnabledHwBreakpoints > 0)
     294        for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++)
     295        {
     296            if (   (pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr >> GUEST_PAGE_SHIFT) != GCPtrAccessPfn
     297                || (  a_fRead
     298                    ? pVM->dbgf.s.aHwBreakpoints[iBp].fType != X86_DR7_RW_RW
     299                    : !(pVM->dbgf.s.aHwBreakpoints[iBp].fType & 1))
     300                || pVM->dbgf.s.aHwBreakpoints[iBp].cb    != 0
     301                || !pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled
     302                || pVM->dbgf.s.aHwBreakpoints[iBp].hBp   == NIL_DBGFBP)
     303            { /*likely*/ }
     304            else
     305            {
     306                /* The page is of interest. */
     307                AssertCompile(!((CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK) & UINT32_C(1)));
     308                fRet |= UINT32_C(1);
     309
     310                /* If the access overlapping the breakpoint area, we have a hit. */
     311                if (   GCPtrAccess            < pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr + pVM->dbgf.s.aHwBreakpoints[iBp].cb
     312                    && GCPtrAccess + cbAccess > pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr)
     313                {
     314                    pVCpu->dbgf.s.hBpActive          = pVM->dbgf.s.aHwBreakpoints[iBp].hBp; /* ? */
     315                    pVCpu->dbgf.s.fSingleSteppingRaw = false;
     316                    LogFlow(("DBGFBpCheckData%s: hit hw breakpoint %u when accessing %RGv LB %#x\n",
     317                             a_fRead ? "Read" : "Write", iBp, GCPtrAccess, cbAccess));
     318                    fRet |= CPUMCTX_DBG_DBGF_BP;
     319                }
     320            }
     321        }
     322
     323    /*
     324     * Check the guest.
     325     */
    220326    uint32_t const fDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
    221     if (X86_DR7_ANY_EO_ENABLED(fDr7) && !pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
     327    if (    (a_fRead ? X86_DR7_ANY_RW_ENABLED(fDr7) : X86_DR7_ANY_W_ENABLED(fDr7))
     328        && !pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
    222329    {
    223         /*
    224          * The CPU (10980XE & 6700K at least) will set the DR6.BPx bits for any
    225          * DRx that matches the current PC and is configured as an execution
    226          * breakpoint (RWx=EO, LENx=1byte).  They don't have to be enabled,
    227          * however one that is enabled must match for the #DB to be raised and
    228          * DR6 to be modified, of course.
    229          */
    230         CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
     330        /* This is a bit suboptimal... Need a NORET variant. */
     331        int rcIgn = VINF_SUCCESS;
     332        CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_DR0_DR3, rcIgn);
     333        RT_NOREF(rcIgn);
     334
     335        /** @todo Not sure what exactly intel and amd CPUs does here wrt disabled
     336         *        breakpoint configurations.  We need a testcase for this.  Following
     337         *        the guidelines of the execution breakpoints for now and making
     338         *        intel CPUs set status flags regardless of enabled or not. */
    231339        uint32_t fMatched = 0;
    232340        uint32_t fEnabled = 0;
    233         for (unsigned iBp = 0, uBpMask = 1; iBp < 4; iBp++, uBpMask <<= 1)
    234             if (X86_DR7_IS_EO_CFG(fDr7, iBp))
    235             {
    236                 if (fDr7 & X86_DR7_L_G(iBp))
    237                     fEnabled |= uBpMask;
    238                 if (pVCpu->cpum.GstCtx.dr[iBp] == GCPtrPC)
    239                     fMatched |= uBpMask;
     341        for (uint32_t iBp = 0, fBpMask = CPUMCTX_DBG_HIT_DR0, fDr7Cfg = fDr7 >> 16, fDr7En = fDr7;
     342             iBp < 4;
     343             iBp++, fBpMask <<= 1, fDr7Cfg >>= 4, fDr7En >>= 2)
     344            if (   (a_fRead ? (fDr7Cfg & 3) == X86_DR7_RW_RW : (fDr7Cfg & 1) != 0)
     345                && (pVCpu->cpum.GstCtx.dr[iBp] >> GUEST_PAGE_SHIFT) == GCPtrAccessPfn)
     346            {
     347                if (fDr7En & 3)
     348                {
     349                    fEnabled |= fBpMask;
     350                    fRet     |= UINT32_C(1);
     351                }
     352                static uint8_t const s_acbBp[] = { 1, 2, 8, 4 };
     353                uint8_t const        cbBp      = s_acbBp[(fDr7Cfg >> 2) & 3];
     354                if (   GCPtrAccess            < pVCpu->cpum.GstCtx.dr[iBp] + cbBp
     355                    && GCPtrAccess + cbAccess > pVCpu->cpum.GstCtx.dr[iBp])
     356                    fMatched |= fBpMask;
    240357            }
    241358        if (!(fEnabled & fMatched))
    242359        { /*likely*/ }
    243         else if (fEnabled & fMatched)
    244         {
    245             /*
    246              * Update DR6 and DR7.
    247              *
    248              * See "AMD64 Architecture Programmer's Manual Volume 2", chapter
    249              * 13.1.1.3 for details on DR6 bits.  The basics is that the B0..B3
    250              * bits are always cleared while the others must be cleared by software.
    251              *
    252              * The following sub chapters says the GD bit is always cleared when
    253              * generating a #DB so the handler can safely access the debug registers.
    254              */
    255             CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_DR6);
    256             pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
     360        else
     361        {
    257362            if (pVM->cpum.ro.GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INTEL)
    258                 pVCpu->cpum.GstCtx.dr[6] |= fMatched & fEnabled;
     363                fRet |= fMatched & fEnabled;
     364            else if (!fSysAccess)
     365                fRet |= fMatched;
    259366            else
    260                 pVCpu->cpum.GstCtx.dr[6] |= fMatched;    /* Intel: All matched, regardless of whether they're enabled or not  */
    261             pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
    262             LogFlow(("DBGFBpCheckInstruction: hit hw breakpoints %#x at %04x:%RGv (%RGv)\n",
    263                      fMatched, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPtrPC));
    264             return VINF_EM_RAW_GUEST_TRAP;
     367                fRet |= CPUMCTX_DBG_HIT_DRX_SILENT; /* see bs3-cpu-weird-1 for special intel behviour  */
     368            LogFlow(("DBGFBpCheckData%s: hit hw breakpoints %#x (fRet=%#x) when accessing %RGv LB %#x\n",
     369                     a_fRead ? "Read" : "Write", fMatched, fRet, GCPtrAccess, cbAccess));
    265370        }
    266371    }
    267     return VINF_SUCCESS;
     372
     373    return fRet;
     374}
     375
     376
     377/**
     378 * Checks read data access for guest or hypervisor hardware breakpoints.
     379 *
     380 * @returns Anything in CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK if
     381 *          there is a hit, zero or one if no hit.  Bit 0 is set if the page
     382 *          being accessed has a data breakpoint associated with it and needs
     383 *          special handling.
     384 *
     385 * @param   pVM         The cross context VM structure.
     386 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     387 * @param   GCPtrAccess The address being accessed.
     388 * @param   cbAccess    The size of the access.  Must not cross a page
     389 *                      boundrary.
     390 * @param   fSysAccess  Set if a system access, like GDT, LDT or IDT.
     391 */
     392VMM_INT_DECL(uint32_t) DBGFBpCheckDataRead(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrAccess, uint32_t cbAccess, bool fSysAccess)
     393{
     394    return dbgfBpCheckData<true /*a_fRead*/>(pVM, pVCpu, GCPtrAccess, cbAccess, fSysAccess);
     395}
     396
     397
     398/**
     399 * Checks read data access for guest or hypervisor hardware breakpoints.
     400 *
     401 * @returns Anything in CPUMCTX_DBG_DBGF_MASK if there is a hit, zero or one if
     402 *          no hit.  Bit 0 is set if the page being accessed has a data
     403 *          breakpoint associated with it and needs special handling.
     404 *
     405 * @param   pVM         The cross context VM structure.
     406 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     407 * @param   GCPtrAccess The address being accessed.
     408 * @param   cbAccess    The size of the access.  Must not cross a page
     409 *                      boundrary.
     410 * @param   fSysAccess  Set if a system access, like GDT, LDT or IDT.
     411 */
     412VMM_INT_DECL(uint32_t) DBGFBpCheckDataWrite(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrAccess, uint32_t cbAccess, bool fSysAccess)
     413{
     414    return dbgfBpCheckData<false /*a_fRead*/>(pVM, pVCpu, GCPtrAccess, cbAccess, fSysAccess);
    268415}
    269416
     
    445592    return 0;
    446593}
    447 #endif /* VBOX_VMM_TARGET_ARMV8 */
    448 
     594
     595#endif /* !VBOX_VMM_TARGET_ARMV8 */
    449596
    450597/**
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r105036 r105072  
    199199 * path.
    200200 *
     201 * This will also invalidate TLB entries for any pages with active data
     202 * breakpoints on them.
     203 *
    201204 * @returns IEM_F_BRK_PENDING_XXX or zero.
    202205 * @param   pVCpu               The cross context virtual CPU structure of the
     
    210213
    211214    /*
     215     * Helper for invalidate the data TLB for breakpoint addresses.
     216     *
     217     * This is to make sure any access to the page will always trigger a TLB
     218     * load for as long as the breakpoint is enabled.
     219     */
     220#ifdef IEM_WITH_DATA_TLB
     221# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
     222        RTGCPTR uTagNoRev = (a_uValue); \
     223        uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
     224        uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
     225        if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
     226            pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
     227        if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
     228            pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
     229    } while (0)
     230#else
     231# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
     232#endif
     233
     234    /*
    212235     * Process guest breakpoints.
    213236     */
    214 #define PROCESS_ONE_BP(a_fDr7, a_iBp) do { \
     237#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
    215238        if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
    216239        { \
     
    223246                case X86_DR7_RW_RW: \
    224247                    fExec |= IEM_F_PENDING_BRK_DATA; \
     248                    INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
    225249                    break; \
    226250                case X86_DR7_RW_IO: \
     
    234258    if (fGstDr7 & X86_DR7_ENABLED_MASK)
    235259    {
    236         PROCESS_ONE_BP(fGstDr7, 0);
    237         PROCESS_ONE_BP(fGstDr7, 1);
    238         PROCESS_ONE_BP(fGstDr7, 2);
    239         PROCESS_ONE_BP(fGstDr7, 3);
     260/** @todo extract more details here to simplify matching later. */
     261#ifdef IEM_WITH_DATA_TLB
     262        IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
     263#endif
     264        PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
     265        PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
     266        PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
     267        PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
    240268    }
    241269
     
    243271     * Process hypervisor breakpoints.
    244272     */
    245     uint32_t const fHyperDr7 = DBGFBpGetDR7(pVCpu->CTX_SUFF(pVM));
     273    PVMCC const    pVM       = pVCpu->CTX_SUFF(pVM);
     274    uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
    246275    if (fHyperDr7 & X86_DR7_ENABLED_MASK)
    247276    {
    248         PROCESS_ONE_BP(fHyperDr7, 0);
    249         PROCESS_ONE_BP(fHyperDr7, 1);
    250         PROCESS_ONE_BP(fHyperDr7, 2);
    251         PROCESS_ONE_BP(fHyperDr7, 3);
     277/** @todo extract more details here to simplify matching later. */
     278        PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
     279        PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
     280        PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
     281        PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
    252282    }
    253283
     
    22412271        iemRaiseXcptAdjustState(pVCpu, u8Vector);
    22422272
     2273    /*
     2274     * Deal with debug events that follows the exception and clear inhibit flags.
     2275     */
     2276    if (   !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
     2277        || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
     2278        pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
     2279    else
     2280    {
     2281        Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
     2282             u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
     2283        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
     2284        pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
     2285                                  >> CPUMCTX_DBG_HIT_DRX_SHIFT;
     2286        pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
     2287        return iemRaiseDebugException(pVCpu);
     2288    }
     2289
    22432290    /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
    22442291       so best leave them alone in case we're in a weird kind of real mode... */
     
    32073254
    32083255    /*
     3256     * Hack alert! Convert incoming debug events to slient on Intel.
     3257     * See bs3-cpu-weird-1.
     3258     */
     3259    if (   !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
     3260        || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
     3261        || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
     3262    { /* ignore */ }
     3263    else
     3264    {
     3265        Log(("iemRaiseXcptOrIntInProtMode: Converting pending %#x debug events to a silent one (intel hack)\n",
     3266             u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
     3267        pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
     3268                                        | CPUMCTX_DBG_HIT_DRX_SILENT;
     3269    }
     3270
     3271    /*
    32093272     * Read the IDT entry.
    32103273     */
     
    36643727    Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
    36653728
     3729    /*
     3730     * Deal with debug events that follows the exception and clear inhibit flags.
     3731     */
     3732    if (   !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
     3733        || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
     3734        pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
     3735    else
     3736    {
     3737        Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
     3738             u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
     3739        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
     3740        pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
     3741                                  >> CPUMCTX_DBG_HIT_DRX_SHIFT;
     3742        pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
     3743        return iemRaiseDebugException(pVCpu);
     3744    }
     3745
    36663746    return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
    36673747}
     
    36893769{
    36903770    IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
     3771
     3772    /*
     3773     * Hack alert! Convert incoming debug events to slient on Intel.
     3774     * See bs3-cpu-weird-1.
     3775     */
     3776    if (   !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
     3777        || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
     3778        || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
     3779    { /* ignore */ }
     3780    else
     3781    {
     3782        Log(("iemRaiseXcptOrIntInLongMode: Converting pending %#x debug events to a silent one (intel hack)\n",
     3783             u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
     3784        pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
     3785                                        | CPUMCTX_DBG_HIT_DRX_SILENT;
     3786    }
    36913787
    36923788    /*
     
    39204016
    39214017    iemRecalcExecModeAndCplAndAcFlags(pVCpu);
     4018
     4019    /*
     4020     * Deal with debug events that follows the exception and clear inhibit flags.
     4021     */
     4022    if (   !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
     4023        || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
     4024        pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
     4025    else
     4026    {
     4027        Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
     4028             u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
     4029        IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
     4030        pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
     4031                                  >> CPUMCTX_DBG_HIT_DRX_SHIFT;
     4032        pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
     4033        return iemRaiseDebugException(pVCpu);
     4034    }
    39224035
    39234036    return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
     
    60456158
    60466159/**
     6160 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
     6161 */
     6162DECL_FORCE_INLINE(uint32_t)
     6163iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
     6164{
     6165    bool const  fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
     6166    if (fAccess & IEM_ACCESS_TYPE_WRITE)
     6167        return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
     6168    return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
     6169}
     6170
     6171
     6172/**
    60476173 * iemMemMap worker that deals with a request crossing pages.
    60486174 */
     
    60746200
    60756201    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     6202
     6203    /*
     6204     * Check for data breakpoints.
     6205     */
     6206    if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
     6207    { /* likely */ }
     6208    else
     6209    {
     6210        uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
     6211        fDataBps         |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
     6212                                                      cbSecondPage, fAccess);
     6213        pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
     6214        if (fDataBps > 1)
     6215            LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
     6216                                  fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     6217    }
    60766218
    60776219    /*
     
    65046646        }
    65056647
    6506         if (   !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
    6507             || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
    6508         {
    6509             pTlbe--;
    6510             pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
     6648        uint32_t fDataBps;
     6649        if (   RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
     6650            || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
     6651        {
     6652            if (   !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
     6653                || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
     6654            {
     6655                pTlbe--;
     6656                pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
     6657            }
     6658            else
     6659            {
     6660                pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
     6661                pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
     6662            }
    65116663        }
    65126664        else
    65136665        {
    6514             pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
    6515             pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
     6666            /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
     6667               to the page with the data access breakpoint armed on it to pass thru here. */
     6668            if (fDataBps > 1)
     6669                LogEx(LOG_GROUP_IEM, ("iemMemMap: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
     6670                                      fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     6671            pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
     6672            pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
     6673            pTlbe->uTag = uTagNoRev;
    65166674        }
    65176675        pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
     
    68797037        }
    68807038
    6881         if (   !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
    6882             || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
    6883         {
    6884             pTlbe--;
    6885             pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
     7039        uint32_t fDataBps;
     7040        if (   RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA))
     7041            || RT_LIKELY(!(fDataBps = iemMemCheckDataBreakpoint(pVCpu->CTX_SUFF(pVM), pVCpu, GCPtrMem, cbMem, fAccess))))
     7042        {
     7043            if (   !(WalkFast.fEffective & PGM_PTATTRS_G_MASK)
     7044                || IEM_GET_CPL(pVCpu) != 0) /* optimization: Only use the PTE.G=1 entries in ring-0. */
     7045            {
     7046                pTlbe--;
     7047                pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision;
     7048            }
     7049            else
     7050            {
     7051                if (a_fSafeCall)
     7052                    pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
     7053                else
     7054                    pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
     7055                pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
     7056            }
    68867057        }
    68877058        else
    68887059        {
    6889             if (a_fSafeCall)
    6890                 pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads++;
    6891             else
    6892                 pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads++;
    6893             pTlbe->uTag         = uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal;
     7060            /* If we hit a data breakpoint, we use a dummy TLBE to force all accesses
     7061               to the page with the data access breakpoint armed on it to pass thru here. */
     7062            if (fDataBps > 1)
     7063                LogEx(LOG_GROUP_IEM, ("iemMemMapJmp<%d>: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
     7064                                      a_fSafeCall, fDataBps, GCPtrMem, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     7065            pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
     7066            pTlbe = &pVCpu->iem.s.DataBreakpointTlbe;
     7067            pTlbe->uTag = uTagNoRev;
    68947068        }
    68957069        pTlbe->fFlagsAndPhysRev = ~WalkFast.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

    r104984 r105072  
    66436643    /*
    66446644     * Re-init hardware breakpoint summary if it was DR7 that got changed.
    6645      */
    6646     if (iDrReg == 7)
     6645     *
     6646     * We also do this when an active data breakpoint is updated so that the
     6647     * TLB entry can be correctly invalidated.
     6648     */
     6649    if (   iDrReg == 7
     6650#ifdef IEM_WITH_DATA_TLB
     6651        || (   iDrReg <= 3
     6652            && (X86_DR7_L_G(iDrReg) & pVCpu->cpum.GstCtx.dr[7])
     6653            && X86_DR7_IS_W_CFG(pVCpu->cpum.GstCtx.dr[7], iDrReg) )
     6654#endif
     6655       )
    66476656        iemRecalcExecDbgFlags(pVCpu);
    66486657
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h

    r104984 r105072  
    96179617    IEMOP_MNEMONIC(into, "into");
    96189618    IEMOP_HLP_NO_64BIT();
     9619/** @todo INTO instruction is completely wrong.   */
    96199620    IEM_MC_DEFER_TO_CIMPL_2_RET(IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_STACK_FAR
    96209621                                | IEM_CIMPL_F_BRANCH_CONDITIONAL | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_RFLAGS,
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp

    r104468 r105072  
    206206{
    207207    VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
    208                                                    pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
     208                                                   pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
     209                                                      !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
     210                                                   || IEM_IS_GUEST_CPU_AMD(pVCpu));
    209211    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    210212        return VINF_SUCCESS;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette