VirtualBox

Changeset 47660 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Aug 12, 2013 12:37:34 AM (11 years ago)
Author:
vboxsync
Message:

VMM: Debug register handling redo. (only partly tested on AMD-V so far.)

Location:
trunk/src/VBox/VMM
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r47652 r47660  
    317317
    318318
     319/** @MAYBE_LOAD_DRx
     320 * Macro for updating DRx values in raw-mode and ring-0 contexts.
     321 */
     322#ifdef IN_RING0
     323# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     324#  ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
     325#   define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
     326    do { \
     327        if (!CPUMIsGuestInLongModeEx(&(a_pVCpu)->cpum.s.Guest)) \
     328            a_fnLoad(a_uValue); \
     329        else \
     330            (a_pVCpu)->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_REGS_HYPER; \
     331    } while (0)
     332#  else
     333#   define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
     334    do { \
     335        /** @todo we're not loading the correct guest value here! */ \
     336        a_fnLoad(a_uValue); \
     337    } while (0)
     338#  endif
     339# else
     340# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
     341    do { \
     342        a_fnLoad(a_uValue); \
     343    } while (0)
     344# endif
     345
     346#elif defined(IN_RC)
     347# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) \
     348    do { \
     349        if ((a_pVCpu)->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER) \
     350        { a_fnLoad(a_uValue); } \
     351    } while (0)
     352
     353#else
     354# define MAYBE_LOAD_DRx(a_pVCpu, a_fnLoad, a_uValue) do { } while (0)
     355#endif
     356
    319357VMMDECL(void) CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0)
    320358{
    321359    pVCpu->cpum.s.Hyper.dr[0] = uDr0;
    322     /** @todo in GC we must load it! */
     360    MAYBE_LOAD_DRx(pVCpu, ASMSetDR0, uDr0);
    323361}
    324362
     
    327365{
    328366    pVCpu->cpum.s.Hyper.dr[1] = uDr1;
    329     /** @todo in GC we must load it! */
     367    MAYBE_LOAD_DRx(pVCpu, ASMSetDR1, uDr1);
    330368}
    331369
     
    334372{
    335373    pVCpu->cpum.s.Hyper.dr[2] = uDr2;
    336     /** @todo in GC we must load it! */
     374    MAYBE_LOAD_DRx(pVCpu, ASMSetDR2, uDr2);
    337375}
    338376
     
    341379{
    342380    pVCpu->cpum.s.Hyper.dr[3] = uDr3;
    343     /** @todo in GC we must load it! */
     381    MAYBE_LOAD_DRx(pVCpu, ASMSetDR3, uDr3);
    344382}
    345383
     
    348386{
    349387    pVCpu->cpum.s.Hyper.dr[6] = uDr6;
    350     /** @todo in GC we must load it! */
    351388}
    352389
     
    355392{
    356393    pVCpu->cpum.s.Hyper.dr[7] = uDr7;
    357     /** @todo in GC we must load it! */
     394#ifdef IN_RC
     395    MAYBE_LOAD_DRx(pVCpu, ASMSetDR7, uDr7);
     396#endif
    358397}
    359398
     
    21182157{
    21192158    pVCpu->cpum.s.Guest.dr[0] = uDr0;
    2120     return CPUMRecalcHyperDRx(pVCpu);
     2159    return CPUMRecalcHyperDRx(pVCpu, 0);
    21212160}
    21222161
     
    21252164{
    21262165    pVCpu->cpum.s.Guest.dr[1] = uDr1;
    2127     return CPUMRecalcHyperDRx(pVCpu);
     2166    return CPUMRecalcHyperDRx(pVCpu, 1);
    21282167}
    21292168
     
    21322171{
    21332172    pVCpu->cpum.s.Guest.dr[2] = uDr2;
    2134     return CPUMRecalcHyperDRx(pVCpu);
     2173    return CPUMRecalcHyperDRx(pVCpu, 2);
    21352174}
    21362175
     
    21392178{
    21402179    pVCpu->cpum.s.Guest.dr[3] = uDr3;
    2141     return CPUMRecalcHyperDRx(pVCpu);
     2180    return CPUMRecalcHyperDRx(pVCpu, 3);
    21422181}
    21432182
     
    21462185{
    21472186    pVCpu->cpum.s.Guest.dr[6] = uDr6;
    2148     return CPUMRecalcHyperDRx(pVCpu);
     2187    return VINF_SUCCESS; /* No need to recalc. */
    21492188}
    21502189
     
    21532192{
    21542193    pVCpu->cpum.s.Guest.dr[7] = uDr7;
    2155     return CPUMRecalcHyperDRx(pVCpu);
     2194    return CPUMRecalcHyperDRx(pVCpu, 7);
    21562195}
    21572196
     
    21642203        iReg += 2;
    21652204    pVCpu->cpum.s.Guest.dr[iReg] = Value;
    2166     return CPUMRecalcHyperDRx(pVCpu);
    2167 }
    2168 
    2169 
    2170 /**
    2171  * Recalculates the hypervisor DRx register values based on
    2172  * current guest registers and DBGF breakpoints.
    2173  *
    2174  * This is called whenever a guest DRx register is modified and when DBGF
    2175  * sets a hardware breakpoint. In guest context this function will reload
    2176  * any (hyper) DRx registers which comes out with a different value.
     2205    return CPUMRecalcHyperDRx(pVCpu, iReg);
     2206}
     2207
     2208
     2209/**
     2210 * Recalculates the hypervisor DRx register values based on current guest
     2211 * registers and DBGF breakpoints, updating changed registers depending on the
     2212 * context.
     2213 *
     2214 * This is called whenever a guest DRx register is modified (any context) and
     2215 * when DBGF sets a hardware breakpoint (ring-3 only, rendezvous).
     2216 *
     2217 * In raw-mode context this function will reload any (hyper) DRx registers which
     2218 * comes out with a different value.  It may also have to save the host debug
     2219 * registers if that haven't been done already.  In this context though, we'll
     2220 * be intercepting and emulating all DRx accesses, so the hypervisor DRx values
     2221 * are only important when breakpoints are actually enabled.
     2222 *
     2223 * In ring-0 (HM) context DR0-3 will be relocated by us, while DR7 will be
     2224 * reloaded by the HM code if it changes.  Further more, we will only use the
     2225 * combined register set when the VBox debugger is actually using hardware BPs,
     2226 * when it isn't we'll keep the guest DR0-3 + (maybe) DR6 loaded (DR6 doesn't
     2227 * concern us here).
     2228 *
     2229 * In ring-3 we won't be loading anything, so well calculate hypervisor values
     2230 * all the time.
    21772231 *
    21782232 * @returns VINF_SUCCESS.
    21792233 * @param   pVCpu       Pointer to the VMCPU.
    2180  */
    2181 VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu)
     2234 * @param   iGstReg     The guest debug register number that was modified.
     2235 *                      UINT8_MAX if not guest register.
     2236 */
     2237VMMDECL(int) CPUMRecalcHyperDRx(PVMCPU pVCpu, uint8_t iGstReg)
    21822238{
    21832239    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    21862242     * Compare the DR7s first.
    21872243     *
    2188      * We only care about the enabled flags. The GE and LE flags are always
    2189      * set and we don't care if the guest doesn't set them. GD is virtualized
    2190      * when we dispatch #DB, we never enable it.
     2244     * We only care about the enabled flags.  GD is virtualized when we
     2245     * dispatch the #DB, we never enable it.  The DBGF DR7 value is will
     2246     * always have the LE and GE bits set, so no need to check and disable
     2247     * stuff if they're cleared like we have to for the guest DR7.
    21912248     */
     2249    RTGCUINTREG uGstDr7 = CPUMGetGuestDR7(pVCpu);
     2250    if (!(uGstDr7 & (X86_DR7_LE | X86_DR7_GE)))
     2251        uGstDr7 = 0;
     2252    else if (!(uGstDr7 & X86_DR7_LE))
     2253        uGstDr7 &= ~X86_DR7_LE_ALL;
     2254    else if (!(uGstDr7 & X86_DR7_GE))
     2255        uGstDr7 &= ~X86_DR7_GE_ALL;
     2256
    21922257    const RTGCUINTREG uDbgfDr7 = DBGFBpGetDR7(pVM);
    2193 #ifdef CPUM_VIRTUALIZE_DRX
    2194     const RTGCUINTREG uGstDr7  = CPUMGetGuestDR7(pVCpu);
    2195 #else
    2196     const RTGCUINTREG uGstDr7  = 0;
    2197 #endif
    2198     if ((uGstDr7 | uDbgfDr7) & X86_DR7_ENABLED_MASK)
     2258    if ((HMIsEnabled(pVCpu->CTX_SUFF(pVM)) ? uDbgfDr7 : (uGstDr7 | uDbgfDr7)) & X86_DR7_ENABLED_MASK)
    21992259    {
     2260        Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     2261
    22002262        /*
    2201          * Ok, something is enabled. Recalc each of the breakpoints.
    2202          * Straight forward code, not optimized/minimized in any way.
     2263         * Ok, something is enabled.  Recalc each of the breakpoints, taking
     2264         * the VM debugger ones of the guest ones.  In raw-mode context we will
     2265         * not allow breakpoints with values inside the hypervisor area.
    22032266         */
    22042267        RTGCUINTREG uNewDr7 = X86_DR7_GE | X86_DR7_LE | X86_DR7_RA1_MASK;
     
    22132276        else if (uGstDr7 & (X86_DR7_L0 | X86_DR7_G0))
    22142277        {
    2215             uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
    22162278            uNewDr0 = CPUMGetGuestDR0(pVCpu);
     2279#ifdef IN_RC
     2280            if (MMHyperIsInsideArea(pVM, uNewDr0))
     2281                uNewDr0 = 0;
     2282            else
     2283#endif
     2284                uNewDr7 |= uGstDr7 & (X86_DR7_L0 | X86_DR7_G0 | X86_DR7_RW0_MASK | X86_DR7_LEN0_MASK);
    22172285        }
    22182286        else
    2219             uNewDr0 = pVCpu->cpum.s.Hyper.dr[0];
     2287            uNewDr0 = 0;
    22202288
    22212289        /* bp 1 */
     
    22282296        else if (uGstDr7 & (X86_DR7_L1 | X86_DR7_G1))
    22292297        {
    2230             uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
    22312298            uNewDr1 = CPUMGetGuestDR1(pVCpu);
     2299#ifdef IN_RC
     2300            if (MMHyperIsInsideArea(pVM, uNewDr1))
     2301                uNewDr1 = 0;
     2302            else
     2303#endif
     2304                uNewDr7 |= uGstDr7 & (X86_DR7_L1 | X86_DR7_G1 | X86_DR7_RW1_MASK | X86_DR7_LEN1_MASK);
    22322305        }
    22332306        else
    2234             uNewDr1 = pVCpu->cpum.s.Hyper.dr[1];
     2307            uNewDr1 = 0;
    22352308
    22362309        /* bp 2 */
     
    22432316        else if (uGstDr7 & (X86_DR7_L2 | X86_DR7_G2))
    22442317        {
    2245             uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
    22462318            uNewDr2 = CPUMGetGuestDR2(pVCpu);
     2319#ifdef IN_RC
     2320            if (MMHyperIsInsideArea(pVM, uNewDr2))
     2321                uNewDr2 = 0;
     2322            else
     2323#endif
     2324                uNewDr7 |= uGstDr7 & (X86_DR7_L2 | X86_DR7_G2 | X86_DR7_RW2_MASK | X86_DR7_LEN2_MASK);
    22472325        }
    22482326        else
    2249             uNewDr2 = pVCpu->cpum.s.Hyper.dr[2];
     2327            uNewDr2 = 0;
    22502328
    22512329        /* bp 3 */
     
    22582336        else if (uGstDr7 & (X86_DR7_L3 | X86_DR7_G3))
    22592337        {
    2260             uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
    22612338            uNewDr3 = CPUMGetGuestDR3(pVCpu);
     2339#ifdef IN_RC
     2340            if (MMHyperIsInsideArea(pVM, uNewDr3))
     2341                uNewDr3 = 0;
     2342            else
     2343#endif
     2344                uNewDr7 |= uGstDr7 & (X86_DR7_L3 | X86_DR7_G3 | X86_DR7_RW3_MASK | X86_DR7_LEN3_MASK);
    22622345        }
    22632346        else
    2264             uNewDr3 = pVCpu->cpum.s.Hyper.dr[3];
     2347            uNewDr3 = 0;
    22652348
    22662349        /*
     
    22682351         */
    22692352#ifdef IN_RC
    2270         if (!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS))
    2271         {
    2272             /** @todo save host DBx registers. */
    2273         }
     2353        /* Make sure to save host registers first. */
     2354        if (!(pVCpu->cpum.s.fUseFlags & (CPUM_USE_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER)))
     2355        {
     2356            Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST));
     2357            pVCpu->cpum.s.Host.dr6 = ASMGetDR6();
     2358            pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
     2359            pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
     2360            ASMSetDR6(X86_DR6_INIT_VAL);
     2361        }
     2362        if (!(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST))
     2363        {
     2364            pVCpu->cpum.s.Host.dr0 = ASMGetDR0();
     2365            pVCpu->cpum.s.Host.dr1 = ASMGetDR1();
     2366            pVCpu->cpum.s.Host.dr2 = ASMGetDR2();
     2367            pVCpu->cpum.s.Host.dr3 = ASMGetDR3();
     2368            pVCpu->cpum.s.fUseFlags |= CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HYPER;
     2369
     2370            /* We haven't loaded any hyper DRxes yet, so we'll have to load them all now. */
     2371            pVCpu->cpum.s.Hyper.dr[0] = uNewDr0;
     2372            ASMSetDR0(uNewDr0);
     2373            pVCpu->cpum.s.Hyper.dr[1] = uNewDr1;
     2374            ASMSetDR1(uNewDr1);
     2375            pVCpu->cpum.s.Hyper.dr[2] = uNewDr2;
     2376            ASMSetDR2(uNewDr2);
     2377            pVCpu->cpum.s.Hyper.dr[3] = uNewDr3;
     2378            ASMSetDR3(uNewDr3);
     2379            pVCpu->cpum.s.Hyper.dr[7] = uNewDr7;
     2380            ASMSetDR7(uNewDr7);
     2381        }
     2382        else
    22742383#endif
    2275         /** @todo Should this not be setting CPUM_USE_DEBUG_REGS_HYPER?
    2276          *        (CPUM_VIRTUALIZE_DRX is never defined). */
    2277         pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
    2278         if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
    2279             CPUMSetHyperDR3(pVCpu, uNewDr3);
    2280         if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
    2281             CPUMSetHyperDR2(pVCpu, uNewDr2);
    2282         if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
    2283             CPUMSetHyperDR1(pVCpu, uNewDr1);
    2284         if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
    2285             CPUMSetHyperDR0(pVCpu, uNewDr0);
    2286         if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
    2287             CPUMSetHyperDR7(pVCpu, uNewDr7);
     2384        {
     2385            pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
     2386            if (uNewDr3 != pVCpu->cpum.s.Hyper.dr[3])
     2387                CPUMSetHyperDR3(pVCpu, uNewDr3);
     2388            if (uNewDr2 != pVCpu->cpum.s.Hyper.dr[2])
     2389                CPUMSetHyperDR2(pVCpu, uNewDr2);
     2390            if (uNewDr1 != pVCpu->cpum.s.Hyper.dr[1])
     2391                CPUMSetHyperDR1(pVCpu, uNewDr1);
     2392            if (uNewDr0 != pVCpu->cpum.s.Hyper.dr[0])
     2393                CPUMSetHyperDR0(pVCpu, uNewDr0);
     2394            if (uNewDr7 != pVCpu->cpum.s.Hyper.dr[7])
     2395                CPUMSetHyperDR7(pVCpu, uNewDr7);
     2396        }
    22882397    }
     2398#ifdef IN_RING0
     2399    else if (CPUMIsGuestDebugStateActive(pVCpu))
     2400    {
     2401        /*
     2402         * Reload the register that was modified.  Normally this won't happen
     2403         * as we won't intercept DRx writes when not having the hyper debug
     2404         * state loaded, but in case we do for some reason we'll simply deal
     2405         * with it.
     2406         */
     2407        switch (iGstReg)
     2408        {
     2409            case 0: ASMSetDR0(CPUMGetGuestDR0(pVCpu)); break;
     2410            case 1: ASMSetDR1(CPUMGetGuestDR1(pVCpu)); break;
     2411            case 2: ASMSetDR2(CPUMGetGuestDR2(pVCpu)); break;
     2412            case 3: ASMSetDR3(CPUMGetGuestDR3(pVCpu)); break;
     2413            default:
     2414                AssertReturn(iGstReg != UINT8_MAX, VERR_INTERNAL_ERROR_3);
     2415        }
     2416    }
     2417#endif
    22892418    else
    22902419    {
    2291 #ifdef IN_RC
    2292         if (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS)
    2293         {
    2294             /** @todo restore host DBx registers. */
    2295         }
     2420        /*
     2421         * No active debug state any more.  In raw-mode this means we have to
     2422         * make sure DR7 has everything disabled now, if we armed it already.
     2423         *
     2424         * In the ring-0 this only happens when we decided to lazy load the
     2425         * debug state because it wasn't active, and that didn't change with
     2426         * the latest changes, so nothing to do here.
     2427         */
     2428#if defined(IN_RC)
     2429        if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER)
     2430        {
     2431            ASMSetDR7(X86_DR7_INIT_VAL);
     2432            if (pVCpu->cpum.s.Hyper.dr[0])
     2433                ASMSetDR0(0);
     2434            if (pVCpu->cpum.s.Hyper.dr[1])
     2435                ASMSetDR1(0);
     2436            if (pVCpu->cpum.s.Hyper.dr[2])
     2437                ASMSetDR2(0);
     2438            if (pVCpu->cpum.s.Hyper.dr[3])
     2439                ASMSetDR3(0);
     2440            pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_DEBUG_REGS_HYPER;
     2441        }
     2442
     2443#else defined(IN_RING0)
     2444        Assert(!CPUMIsHyperDebugStateActive(pVCpu)); /* (can only change while in ring-3) */
    22962445#endif
    2297         pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
     2446        pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
     2447
     2448        /* Clear all the registers. */
     2449        pVCpu->cpum.s.Hyper.dr[7] = X86_DR7_RA1_MASK;
     2450        pVCpu->cpum.s.Hyper.dr[3] = 0;
     2451        pVCpu->cpum.s.Hyper.dr[2] = 0;
     2452        pVCpu->cpum.s.Hyper.dr[1] = 0;
     2453        pVCpu->cpum.s.Hyper.dr[0] = 0;
     2454
    22982455    }
    22992456    Log2(("CPUMRecalcHyperDRx: fUseFlags=%#x %RGr %RGr %RGr %RGr  %RGr %RGr\n",
    23002457          pVCpu->cpum.s.fUseFlags, pVCpu->cpum.s.Hyper.dr[0], pVCpu->cpum.s.Hyper.dr[1],
    2301          pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
    2302          pVCpu->cpum.s.Hyper.dr[7]));
     2458          pVCpu->cpum.s.Hyper.dr[2], pVCpu->cpum.s.Hyper.dr[3], pVCpu->cpum.s.Hyper.dr[6],
     2459          pVCpu->cpum.s.Hyper.dr[7]));
    23032460
    23042461    return VINF_SUCCESS;
     
    27212878VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM)
    27222879{
    2723     return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER) != 0;
     2880    return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSENTER);
    27242881}
    27252882
     
    27332890VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM)
    27342891{
    2735     return (pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL) != 0;
     2892    return RT_BOOL(pVM->cpum.s.fHostUseFlags & CPUM_USE_SYSCALL);
    27362893}
    27372894
     
    27592916VMMDECL(bool) CPUMIsGuestFPUStateActive(PVMCPU pVCpu)
    27602917{
    2761     return (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU) != 0;
     2918    return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU);
    27622919}
    27632920
     
    27662923 * Deactivate the FPU/XMM state of the guest OS.
    27672924 * @param   pVCpu       Pointer to the VMCPU.
     2925 *
     2926 * @todo    r=bird: Why is this needed? Looks like a workaround for mishandled
     2927 *          FPU state management.
    27682928 */
    27692929VMMDECL(void) CPUMDeactivateGuestFPUState(PVMCPU pVCpu)
    27702930{
     2931    Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU));
    27712932    pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU;
    27722933}
     
    27812942VMMDECL(bool) CPUMIsGuestDebugStateActive(PVMCPU pVCpu)
    27822943{
    2783     return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS) != 0;
     2944    return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST);
    27842945}
    27852946
     
    27922953VMMDECL(bool) CPUMIsHyperDebugStateActive(PVMCPU pVCpu)
    27932954{
    2794     return (pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS_HYPER) != 0;
     2955    return RT_BOOL(pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HYPER);
    27952956}
    27962957
     
    28012962 * @returns boolean
    28022963 * @param   pVM         Pointer to the VM.
     2964 * @todo    This API doesn't make sense any more.
    28032965 */
    28042966VMMDECL(void) CPUMDeactivateGuestDebugState(PVMCPU pVCpu)
    28052967{
    2806     pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS;
    2807 }
    2808 
    2809 
    2810 /**
    2811  * Mark the hypervisor's debug state as inactive.
    2812  *
    2813  * @returns boolean
    2814  * @param   pVM         Pointer to the VM.
    2815  */
    2816 VMMDECL(void) CPUMDeactivateHyperDebugState(PVMCPU pVCpu)
    2817 {
    2818     pVCpu->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HYPER;
     2968    Assert(!(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER | CPUM_USED_DEBUG_REGS_HOST)));
    28192969}
    28202970
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r47064 r47660  
    8080static void cpumR0UnmapLocalApics(void);
    8181#endif
     82static int  cpumR0SaveHostDebugState(PVMCPU pVCpu);
    8283
    8384
     
    345346#ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE
    346347# if defined(VBOX_WITH_HYBRID_32BIT_KERNEL) || defined(VBOX_WITH_KERNEL_USING_XMM) /** @todo remove the #else here and move cpumHandleLazyFPUAsm back to VMMGC after branching out 3.0!!. */
    347         Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE));
     348        Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
    348349        /** @todo Move the FFXR handling down into
    349350         *        cpumR0SaveHostRestoreguestFPUState to optimize the
     
    357358            {
    358359                ASMWrMsr(MSR_K6_EFER, SavedEFER & ~MSR_K6_EFER_FFXSR);
    359                 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
     360                pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
    360361            }
    361362        }
     
    365366
    366367        /* Restore EFER. */
    367         if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     368        if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
    368369            ASMWrMsr(MSR_K6_EFER, SavedEFER);
    369370
     
    382383            {
    383384                ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR);
    384                 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
     385                pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
    385386            }
    386387        }
     
    392393
    393394        /* Restore EFER MSR */
    394         if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     395        if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
    395396            ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost);
    396397
     
    425426                /* fxrstor doesn't restore the XMM state! */
    426427                cpumR0LoadXMM(pCtx);
    427                 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE;
     428                pVCpu->cpum.s.fUseFlags |= CPUM_USED_MANUAL_XMM_RESTORE;
    428429            }
    429430        }
     
    481482        /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */
    482483        uint64_t oldMsrEFERHost = 0;
    483         if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     484        if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
    484485        {
    485486            oldMsrEFERHost = ASMRdMsr(MSR_K6_EFER);
     
    489490
    490491        /* Restore EFER MSR */
    491         if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     492        if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
    492493            ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost | MSR_K6_EFER_FFXSR);
    493494
     
    501502# endif
    502503        cpumR0SaveFPU(pCtx);
    503         if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE)
     504        if (pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)
    504505        {
    505506            /* fxsave doesn't save the XMM state! */
     
    517518    }
    518519
    519     pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_MANUAL_XMM_RESTORE);
     520    pVCpu->cpum.s.fUseFlags &= ~(CPUM_USED_FPU | CPUM_SYNC_FPU_STATE | CPUM_USED_MANUAL_XMM_RESTORE);
    520521    return VINF_SUCCESS;
    521522}
     
    523524
    524525/**
    525  * Save guest debug state
     526 * Saves the host debug state, setting CPUM_USED_HOST_DEBUG_STATE and loading
     527 * DR7 with safe values.
    526528 *
    527529 * @returns VBox status code.
    528  * @param   pVM         Pointer to the VM.
    529530 * @param   pVCpu       Pointer to the VMCPU.
    530  * @param   pCtx        Pointer to the guest CPU context.
    531  * @param   fDR6        Whether to include DR6 or not.
    532  */
    533 VMMR0DECL(int) CPUMR0SaveGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
    534 {
    535     Assert(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS);
    536 
    537     /* Save the guest's debug state. The caller is responsible for DR7. */
    538 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    539     if (CPUMIsGuestInLongModeEx(pCtx))
    540     {
    541         if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_DEBUG_STATE))
    542         {
    543             uint64_t dr6 = pCtx->dr[6];
    544 
    545             HMR0SaveDebugState(pVM, pVCpu, pCtx);
    546             if (!fDR6) /* dr6 was already up-to-date */
    547                 pCtx->dr[6] = dr6;
    548         }
    549     }
    550     else
    551 #endif
    552     {
    553 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    554         cpumR0SaveDRx(&pCtx->dr[0]);
    555 #else
    556         pCtx->dr[0] = ASMGetDR0();
    557         pCtx->dr[1] = ASMGetDR1();
    558         pCtx->dr[2] = ASMGetDR2();
    559         pCtx->dr[3] = ASMGetDR3();
    560 #endif
    561         if (fDR6)
    562             pCtx->dr[6] = ASMGetDR6();
    563     }
    564 
    565     /*
    566      * Restore the host's debug state. DR0-3, DR6 and only then DR7!
    567      * DR7 contains 0x400 right now.
    568      */
    569     CPUMR0LoadHostDebugState(pVM, pVCpu);
    570     Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USE_DEBUG_REGS));
    571     return VINF_SUCCESS;
    572 }
    573 
    574 
    575 /**
    576  * Lazily sync in the debug state
    577  *
    578  * @returns VBox status code.
    579  * @param   pVM         Pointer to the VM.
    580  * @param   pVCpu       Pointer to the VMCPU.
    581  * @param   pCtx        Pointer to the guest CPU context.
    582  * @param   fDR6        Whether to include DR6 or not.
    583  */
    584 VMMR0DECL(int) CPUMR0LoadGuestDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
    585 {
    586     /* Save the host state. */
    587     CPUMR0SaveHostDebugState(pVM, pVCpu);
    588     Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
    589 
    590     /* Activate the guest state DR0-3; DR7 is left to the caller. */
    591 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    592     if (CPUMIsGuestInLongModeEx(pCtx))
    593     {
    594         /* Restore the state on entry as we need to be in 64 bits mode to access the full state. */
    595         pVCpu->cpum.s.fUseFlags |= CPUM_SYNC_DEBUG_STATE;
    596     }
    597     else
    598 #endif
    599     {
    600 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    601         cpumR0LoadDRx(&pCtx->dr[0]);
    602 #else
    603         ASMSetDR0(pCtx->dr[0]);
    604         ASMSetDR1(pCtx->dr[1]);
    605         ASMSetDR2(pCtx->dr[2]);
    606         ASMSetDR3(pCtx->dr[3]);
    607 #endif
    608         if (fDR6)
    609             ASMSetDR6(pCtx->dr[6]);
    610     }
    611 
    612     pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS;
    613     return VINF_SUCCESS;
    614 }
    615 
    616 /**
    617  * Save the host debug state
    618  *
    619  * @returns VBox status code.
    620  * @param   pVM         Pointer to the VM.
    621  * @param   pVCpu       Pointer to the VMCPU.
    622  */
    623 VMMR0DECL(int) CPUMR0SaveHostDebugState(PVM pVM, PVMCPU pVCpu)
    624 {
    625     NOREF(pVM);
    626 
    627     /* Save the host state. */
     531 */
     532static int cpumR0SaveHostDebugState(PVMCPU pVCpu)
     533{
     534    /*
     535     * Save the host state.
     536     */
    628537#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    629538    AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
     
    638547    /** @todo dr7 might already have been changed to 0x400; don't care right now as it's harmless. */
    639548    pVCpu->cpum.s.Host.dr7 = ASMGetDR7();
    640     /* Make sure DR7 is harmless or else we could trigger breakpoints when restoring dr0-3 (!) */
    641     ASMSetDR7(X86_DR7_INIT_VAL);
     549
     550    /* Preemption paranoia. */
     551    ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HOST);
     552
     553    /*
     554     * Make sure DR7 is harmless or else we could trigger breakpoints when
     555     * load guest or hypervisor DRx values later.
     556     */
     557    if (pVCpu->cpum.s.Host.dr7 != X86_DR7_INIT_VAL)
     558        ASMSetDR7(X86_DR7_INIT_VAL);
    642559
    643560    return VINF_SUCCESS;
    644561}
    645562
    646 /**
    647  * Load the host debug state
     563
     564/**
     565 * Saves the guest DRx state residing in host registers and restore the host
     566 * register values.
     567 *
     568 * The guest DRx state is only saved if CPUMR0LoadGuestDebugState was called,
     569 * since it's assumed that we're shadowing the guest DRx register values
     570 * accurately when using the combined hypervisor debug register values
     571 * (CPUMR0LoadHyperDebugState).
     572 *
     573 * @returns true if either guest or hypervisor debug registers were loaded.
     574 * @param   pVCpu       The cross context CPU structure for the calling EMT.
     575 * @param   fDR6        Whether to include DR6 or not.
     576 * @thread  EMT(pVCpu)
     577 */
     578VMMR0DECL(bool) CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPU pVCpu, bool fDR6)
     579{
     580    bool const fDrXLoaded = RT_BOOL(pVCpu->cpum.s.fUseFlags & (CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER));
     581
     582    /*
     583     * Do we need to save the guest DRx registered loaded into host registers?
     584     * (DR7 and DR6 (if fDR6 is true) are left to the caller.)
     585     */
     586    if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_GUEST)
     587    {
     588#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     589        if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
     590        {
     591            uint64_t uDr6 = pVCpu->cpum.s.Guest.dr[6];
     592            HMR0SaveDebugState(pVCpu->CTX_SUFF(pVM), pVCpu, &pVCpu->cpum.s.Guest);
     593            if (!fDR6)
     594                pVCpu->cpum.s.Guest.dr[6] = uDr6;
     595        }
     596        else
     597#endif
     598        {
     599#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     600            cpumR0SaveDRx(&pVCpu->cpum.s.Guest.dr[0]);
     601#else
     602            pVCpu->cpum.s.Guest.dr[0] = ASMGetDR0();
     603            pVCpu->cpum.s.Guest.dr[1] = ASMGetDR1();
     604            pVCpu->cpum.s.Guest.dr[2] = ASMGetDR2();
     605            pVCpu->cpum.s.Guest.dr[3] = ASMGetDR3();
     606#endif
     607            if (fDR6)
     608                pVCpu->cpum.s.Guest.dr[6] = ASMGetDR6();
     609        }
     610    }
     611    ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~(  CPUM_USED_DEBUG_REGS_GUEST | CPUM_USED_DEBUG_REGS_HYPER
     612                                                | CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER));
     613
     614    /*
     615     * Restore the host's debug state. DR0-3, DR6 and only then DR7!
     616     */
     617    if (pVCpu->cpum.s.fUseFlags & CPUM_USED_DEBUG_REGS_HOST)
     618    {
     619        /* A bit of paranoia first... */
     620        uint64_t uCurDR7 = ASMGetDR7();
     621        if (uCurDR7 != X86_DR7_INIT_VAL)
     622            ASMSetDR7(X86_DR7_INIT_VAL);
     623
     624#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     625        AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
     626        cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
     627#else
     628        ASMSetDR0(pVCpu->cpum.s.Host.dr0);
     629        ASMSetDR1(pVCpu->cpum.s.Host.dr1);
     630        ASMSetDR2(pVCpu->cpum.s.Host.dr2);
     631        ASMSetDR3(pVCpu->cpum.s.Host.dr3);
     632#endif
     633        /** @todo consider only updating if they differ, esp. DR6. Need to figure how
     634         *        expensive DRx reads are over DRx writes.  */
     635        ASMSetDR6(pVCpu->cpum.s.Host.dr6);
     636        ASMSetDR7(pVCpu->cpum.s.Host.dr7);
     637
     638        ASMAtomicAndU32(&pVCpu->cpum.s.fUseFlags, ~CPUM_USED_DEBUG_REGS_HOST);
     639    }
     640
     641    return fDrXLoaded;
     642}
     643
     644
     645/**
     646 * Lazily sync in the debug state.
     647 *
     648 * @param   pVCpu       The cross context CPU structure for the calling EMT.
     649 * @param   fDR6        Whether to include DR6 or not.
     650 * @thread  EMT(pVCpu)
     651 */
     652VMMR0DECL(void) CPUMR0LoadGuestDebugState(PVMCPU pVCpu, bool fDR6)
     653{
     654    /*
     655     * Save the host state and disarm all host BPs.
     656     */
     657    cpumR0SaveHostDebugState(pVCpu);
     658    Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
     659
     660    /*
     661     * Activate the guest state DR0-3.
     662     * DR7 and DR6 (if fDR6 is true) are left to the caller.
     663     */
     664#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     665    if (CPUMIsGuestInLongModeEx(&pVCpu->cpum.s.Guest))
     666        ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_GUEST); /* Postpone it to the world switch. */
     667    else
     668#endif
     669    {
     670#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
     671        cpumR0LoadDRx(&pVCpu->cpum.s.Guest.dr[0]);
     672#else
     673        ASMSetDR0(pVCpu->cpum.s.Guest.dr[0]);
     674        ASMSetDR1(pVCpu->cpum.s.Guest.dr[1]);
     675        ASMSetDR2(pVCpu->cpum.s.Guest.dr[2]);
     676        ASMSetDR3(pVCpu->cpum.s.Guest.dr[3]);
     677#endif
     678        if (fDR6)
     679            ASMSetDR6(pVCpu->cpum.s.Guest.dr[6]);
     680
     681        ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_GUEST);
     682    }
     683}
     684
     685
     686/**
     687 * Lazily sync in the hypervisor debug state
    648688 *
    649689 * @returns VBox status code.
    650  * @param   pVM         Pointer to the VM.
    651  * @param   pVCpu       Pointer to the VMCPU.
    652  */
    653 VMMR0DECL(int) CPUMR0LoadHostDebugState(PVM pVM, PVMCPU pVCpu)
    654 {
    655     Assert(pVCpu->cpum.s.fUseFlags & (CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER));
    656     NOREF(pVM);
    657 
    658     /*
    659      * Restore the host's debug state. DR0-3, DR6 and only then DR7!
    660      * DR7 contains 0x400 right now.
    661      */
    662 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    663     AssertCompile((uintptr_t)&pVCpu->cpum.s.Host.dr3 - (uintptr_t)&pVCpu->cpum.s.Host.dr0 == sizeof(uint64_t) * 3);
    664     cpumR0LoadDRx(&pVCpu->cpum.s.Host.dr0);
    665 #else
    666     ASMSetDR0(pVCpu->cpum.s.Host.dr0);
    667     ASMSetDR1(pVCpu->cpum.s.Host.dr1);
    668     ASMSetDR2(pVCpu->cpum.s.Host.dr2);
    669     ASMSetDR3(pVCpu->cpum.s.Host.dr3);
    670 #endif
    671     ASMSetDR6(pVCpu->cpum.s.Host.dr6);
    672     ASMSetDR7(pVCpu->cpum.s.Host.dr7);
    673 
    674     pVCpu->cpum.s.fUseFlags &= ~(CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HYPER);
    675     return VINF_SUCCESS;
    676 }
    677 
    678 
    679 /**
    680  * Lazily sync in the hypervisor debug state
    681  *
    682  * @returns VBox status code.
    683  * @param   pVM         Pointer to the VM.
    684  * @param   pVCpu       Pointer to the VMCPU.
    685  * @param   pCtx        Pointer to the guest CPU context.
     690 * @param   pVCpu       The cross context CPU structure for the calling EMT.
    686691 * @param   fDR6        Whether to include DR6 or not.
    687  */
    688 VMMR0DECL(int) CPUMR0LoadHyperDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, bool fDR6)
    689 {
    690     NOREF(pCtx);
    691 
    692     /* Save the host state. */
    693     CPUMR0SaveHostDebugState(pVM, pVCpu);
     692 * @thread  EMT(pVCpu)
     693 */
     694VMMR0DECL(void) CPUMR0LoadHyperDebugState(PVMCPU pVCpu, bool fDR6)
     695{
     696    /*
     697     * Save the host state and disarm all host BPs.
     698     */
     699    cpumR0SaveHostDebugState(pVCpu);
    694700    Assert(ASMGetDR7() == X86_DR7_INIT_VAL);
    695701
    696     /* Activate the guest state DR0-3; DR7 is left to the caller. */
     702    /*
     703     * Make sure the hypervisor values are up to date.
     704     */
     705    CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */);
     706
     707    /*
     708     * Activate the guest state DR0-3.
     709     * DR7 and DR6 (if fDR6 is true) are left to the caller.
     710     */
    697711#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    698712    if (CPUMIsGuestInLongModeEx(pCtx))
    699     {
    700         AssertFailed();
    701         return VERR_NOT_IMPLEMENTED;
    702     }
     713        ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_SYNC_DEBUG_REGS_HYPER); /* Postpone it. */
    703714    else
    704715#endif
    705716    {
    706717#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    707         AssertFailed();
    708         return VERR_NOT_IMPLEMENTED;
     718        cpumR0LoadDRx(&pVCpu->cpum.s.Hyper.dr[0]);
    709719#else
    710         ASMSetDR0(CPUMGetHyperDR0(pVCpu));
    711         ASMSetDR1(CPUMGetHyperDR1(pVCpu));
    712         ASMSetDR2(CPUMGetHyperDR2(pVCpu));
    713         ASMSetDR3(CPUMGetHyperDR3(pVCpu));
     720        ASMSetDR0(pVCpu->cpum.s.Hyper.dr[0]);
     721        ASMSetDR1(pVCpu->cpum.s.Hyper.dr[1]);
     722        ASMSetDR2(pVCpu->cpum.s.Hyper.dr[2]);
     723        ASMSetDR3(pVCpu->cpum.s.Hyper.dr[3]);
    714724#endif
    715725        if (fDR6)
    716             ASMSetDR6(CPUMGetHyperDR6(pVCpu));
    717     }
    718 
    719     pVCpu->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HYPER;
    720     return VINF_SUCCESS;
     726            ASMSetDR6(X86_DR6_INIT_VAL);
     727
     728        ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
     729    }
    721730}
    722731
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r47652 r47660  
    13211321    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
    13221322        return;
    1323 
    1324     /** @todo Turn these into assertions if possible. */
    1325     pCtx->dr[6] |= X86_DR6_INIT_VAL;                                          /* Set reserved bits to 1. */
    1326     pCtx->dr[6] &= ~RT_BIT(12);                                               /* MBZ. */
    1327 
    1328     pCtx->dr[7] &= 0xffffffff;                                                /* Upper 32 bits MBZ. */
    1329     pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15));      /* MBZ. */
    1330     pCtx->dr[7] |= X86_DR7_INIT_VAL;                                          /* MB1. */
    1331 
    1332     /* Update DR6, DR7 with the guest values. */
    1333     pVmcb->guest.u64DR7 = pCtx->dr[7];
    1334     pVmcb->guest.u64DR6 = pCtx->dr[6];
    1335     pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     1323    Assert((pCtx->dr[6] & X86_DR6_RA1_MASK) == X86_DR6_RA1_MASK); Assert((pCtx->dr[6] & X86_DR6_RAZ_MASK) == 0);
     1324    Assert((pCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); Assert((pCtx->dr[7] & X86_DR7_RAZ_MASK) == 0);
    13361325
    13371326    bool fInterceptDB     = false;
    13381327    bool fInterceptMovDRx = false;
    1339     if (DBGFIsStepping(pVCpu))
    1340     {
    1341         /* AMD-V doesn't have any monitor-trap flag equivalent. Instead, enable tracing in the guest and trap #DB. */
     1328
     1329    /*
     1330     * Anyone single stepping on the host side? If so, we'll have to use the
     1331     * trap flag in the guest EFLAGS since AMD-V doesn't have a trap flag on
     1332     * the VMM level like VT-x implementations does.
     1333     */
     1334    bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu);
     1335    if (fStepping)
     1336    {
    13421337        pVmcb->guest.u64RFlags |= X86_EFL_TF;
    13431338        fInterceptDB = true;
     1339        fInterceptMovDRx = true; /* Need clean DR6, no guest mess. */
    13441340    }
    13451341
    13461342    PVM pVM = pVCpu->CTX_SUFF(pVM);
    1347     if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    1348     {
     1343    if (fStepping || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
     1344    {
     1345        /*
     1346         * Use the combined guest and host DRx values found in the hypervisor
     1347         * register set because the debugger has breakpoints active or someone
     1348         * is single stepping on the host side.
     1349         *
     1350         * Note! DBGF expects a clean DR6 state before executing guest code.
     1351         */
    13491352        if (!CPUMIsHyperDebugStateActive(pVCpu))
    1350         {
    1351             int rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
    1352             AssertRC(rc);
    1353 
    1354             /* Update DR6, DR7 with the hypervisor values. */
     1353            CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
     1354        Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     1355        Assert(CPUMIsHyperDebugStateActive(pVCpu));
     1356
     1357        /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
     1358        if (   pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
     1359            || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu) )
     1360        {
    13551361            pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
    1356             pVmcb->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);
     1362            pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
    13571363            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
    13581364        }
    1359         Assert(CPUMIsHyperDebugStateActive(pVCpu));
     1365
     1366        /** @todo If we cared, we could optimize to allow the guest to read registers
     1367         *        with the same values. */
     1368        fInterceptDB = true;
    13601369        fInterceptMovDRx = true;
    1361     }
    1362     else if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    1363     {
    1364         if (!CPUMIsGuestDebugStateActive(pVCpu))
    1365         {
    1366             int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
    1367             AssertRC(rc);
    1368             STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
    1369         }
    1370         Assert(CPUMIsGuestDebugStateActive(pVCpu));
    1371         Assert(fInterceptMovDRx == false);
    1372     }
    1373     else if (!CPUMIsGuestDebugStateActive(pVCpu))
    1374     {
    1375         /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
    1376         fInterceptMovDRx = true;
    1377     }
    1378 
     1370        Log5(("hm: Loaded hyper DRx\n"));
     1371    }
     1372    else
     1373    {
     1374        /*
     1375         * Update DR6, DR7 with the guest values if necessary.
     1376         */
     1377        if (   pVmcb->guest.u64DR7 != pCtx->dr[7]
     1378            || pVmcb->guest.u64DR6 != pCtx->dr[6])
     1379        {
     1380            pVmcb->guest.u64DR7 = pCtx->dr[7];
     1381            pVmcb->guest.u64DR6 = pCtx->dr[6];
     1382            pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     1383        }
     1384
     1385        /*
     1386         * If the guest has enabled debug registers, we need to load them prior to
     1387         * executing guest code so they'll trigger at the right time.
     1388         */
     1389        if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
     1390        {
     1391            if (!CPUMIsGuestDebugStateActive(pVCpu))
     1392            {
     1393                CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
     1394                STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     1395            }
     1396            Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     1397            Assert(CPUMIsGuestDebugStateActive(pVCpu));
     1398            Log5(("hm: Loaded guest DRx\n"));
     1399        }
     1400        /*
     1401         * If no debugging enabled, we'll lazy load DR0-3.
     1402         */
     1403        else if (!CPUMIsGuestDebugStateActive(pVCpu))
     1404            fInterceptMovDRx = true;
     1405    }
     1406
     1407    /*
     1408     * Set up the intercepts.
     1409     */
    13791410    if (fInterceptDB)
    13801411        hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB);
     
    17601791     * Guest Debug registers.
    17611792     */
    1762     pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
    1763     pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
     1793    if (!CPUMIsHyperDebugStateActive(pVCpu))
     1794    {
     1795        pMixedCtx->dr[6] = pVmcb->guest.u64DR6;
     1796        pMixedCtx->dr[7] = pVmcb->guest.u64DR7;
     1797    }
     1798    else
     1799    {
     1800        Assert(pVmcb->guest.u64DR7 == CPUMGetHyperDR7(pVCpu));
     1801        CPUMSetHyperDR6(pVCpu, pVmcb->guest.u64DR6);
     1802    }
    17641803
    17651804    /*
     
    18001839    }
    18011840
    1802     /* Restore host debug registers if necessary and resync on next R0 reentry. */
    1803     if (CPUMIsGuestDebugStateActive(pVCpu))
    1804     {
    1805         CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, true /* save DR6 */);
    1806         Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    1807         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    1808     }
    1809     else if (CPUMIsHyperDebugStateActive(pVCpu))
    1810     {
    1811         CPUMR0LoadHostDebugState(pVM, pVCpu);
    1812         Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     1841    /*
     1842     * Restore host debug registers if necessary and resync on next R0 reentry.
     1843     */
    18131844#ifdef VBOX_STRICT
     1845    if (CPUMIsHyperDebugStateActive(pVCpu))
     1846    {
    18141847        PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    18151848        Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff);
    18161849        Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff);
     1850    }
    18171851#endif
    1818     }
     1852    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */))
     1853        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
     1854    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     1855    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    18191856
    18201857    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
     
    30943131
    30953132# define HMSVM_ASSERT_PREEMPT_CPUID() \
    3096    do \
    3097    { \
    3098         RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
    3099         AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
    3100    } while (0)
     3133    do \
     3134    { \
     3135         RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
     3136         AssertMsg(idAssertCpu == idAssertCpuNow, ("SVM %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
     3137    } while (0)
    31013138
    31023139# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() \
    3103             do { \
    3104                 AssertPtr(pVCpu); \
    3105                 AssertPtr(pCtx); \
    3106                 AssertPtr(pSvmTransient); \
    3107                 Assert(ASMIntAreEnabled()); \
    3108                 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
    3109                 HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
    3110                 Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
    3111                 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
    3112                 if (VMMR0IsLogFlushDisabled(pVCpu)) \
    3113                     HMSVM_ASSERT_PREEMPT_CPUID(); \
    3114             } while (0)
     3140    do { \
     3141        AssertPtr(pVCpu); \
     3142        AssertPtr(pCtx); \
     3143        AssertPtr(pSvmTransient); \
     3144        Assert(ASMIntAreEnabled()); \
     3145        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     3146        HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
     3147        Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
     3148        Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \
     3149        if (VMMR0IsLogFlushDisabled(pVCpu)) \
     3150            HMSVM_ASSERT_PREEMPT_CPUID(); \
     3151    } while (0)
    31153152#else   /* Release builds */
    31163153# define HMSVM_VALIDATE_EXIT_HANDLER_PARAMS() do { } while(0)
     
    39533990
    39543991    /* We should -not- get this VM-exit if the guest is debugging. */
    3955     if (CPUMIsGuestDebugStateActive(pVCpu))
    3956     {
    3957         AssertMsgFailed(("hmR0SvmExitReadDRx: Unexpected exit. pVCpu=%p pCtx=%p\n", pVCpu, pCtx));
    3958         return VERR_SVM_UNEXPECTED_EXIT;
    3959     }
    3960 
    3961     if (   !DBGFIsStepping(pVCpu)
    3962         && !CPUMIsHyperDebugStateActive(pVCpu))
    3963     {
     3992    AssertMsgReturn(!CPUMIsGuestDebugStateActive(pVCpu),
     3993                    ("hmR0SvmExitReadDRx: Unexpected exit. pVCpu=%p pCtx=%p\n", pVCpu, pCtx),
     3994                    VERR_SVM_UNEXPECTED_EXIT);
     3995
     3996    /*
     3997     * Lazy DR0-3 loading?
     3998     */
     3999    if (!CPUMIsHyperDebugStateActive(pVCpu))
     4000    {
     4001        Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
     4002        Log5(("hmR0SvmExitReadDRx: Lazy loading guest debug registers\n"));
     4003
    39644004        /* Don't intercept DRx read and writes. */
    39654005        PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     
    39694009
    39704010        /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
    3971         PVM pVM = pVCpu->CTX_SUFF(pVM);
    3972         int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
    3973         AssertRC(rc);
     4011        CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
    39744012        Assert(CPUMIsGuestDebugStateActive(pVCpu));
    39754013
    39764014        STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    3977         return rc;
    3978     }
    3979 
     4015        return VINF_SUCCESS;
     4016    }
     4017
     4018    /*
     4019     * Interpret the read/writing of DRx.
     4020     */
    39804021    /** @todo Decode assist.  */
    3981     VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
    3982     int rc = VBOXSTRICTRC_VAL(rc2);
     4022    VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */);
     4023    Log5(("hmR0SvmExitReadDRx: Emulatined DRx access: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
    39834024    if (RT_LIKELY(rc == VINF_SUCCESS))
    39844025    {
    39854026        /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */
     4027        /** @todo CPUM should set this flag! */
    39864028        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    39874029    }
    39884030    else
    39894031        Assert(rc == VERR_EM_INTERPRETER);
    3990     return rc;
     4032    return VBOXSTRICTRC_TODO(rc);
    39914033}
    39924034
     
    45334575    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    45344576
     4577    /* If we sat the trap flag above, we have to clear it. */ /** @todo HM should remember what it does and possibly do this elsewhere! */
     4578    if (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu))
     4579        pCtx->eflags.Bits.u1TF = 0;
     4580
    45354581    /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases
    45364582       DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */
    4537     PVM pVM = pVCpu->CTX_SUFF(pVM);
    4538     int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pCtx->dr[6]);
     4583    PSVMVMCB    pVmcb   = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     4584    PVM         pVM     = pVCpu->CTX_SUFF(pVM);
     4585    int rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVmcb->guest.u64DR6);
    45394586    if (rc == VINF_EM_RAW_GUEST_TRAP)
    45404587    {
     4588        Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> guest trap\n", pVmcb->guest.u64DR6));
     4589        if (CPUMIsHyperDebugStateActive(pVCpu))
     4590            CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | pVmcb->guest.u64DR6);
     4591
    45414592        /* Reflect the exception back to the guest. */
    45424593        hmR0SvmSetPendingXcptDB(pVCpu);
     
    45444595    }
    45454596
     4597    /*
     4598     * Update DR6.
     4599     */
     4600    if (CPUMIsHyperDebugStateActive(pVCpu))
     4601    {
     4602        Log5(("hmR0SvmExitXcptDB: DR6=%#RX64 -> %Rrc\n", pVmcb->guest.u64DR6, rc));
     4603        pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL;
     4604        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX;
     4605    }
     4606    else
     4607    {
     4608        AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
     4609        Assert(!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu));
     4610    }
     4611
    45464612    return rc;
    45474613}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r47653 r47660  
    33203320#endif
    33213321
    3322     int rc                = VERR_INTERNAL_ERROR_5;
    3323     PVM pVM               = pVCpu->CTX_SUFF(pVM);
     3322    int  rc;
     3323    PVM  pVM              = pVCpu->CTX_SUFF(pVM);
    33243324    bool fInterceptDB     = false;
    33253325    bool fInterceptMovDRx = false;
    3326     if (DBGFIsStepping(pVCpu) || pVCpu->hm.s.fSingleInstruction)
     3326    if (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu))
    33273327    {
    33283328        /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
     
    33423342    }
    33433343
    3344     if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    3345     {
     3344    if (fInterceptDB || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
     3345    {
     3346        /*
     3347         * Use the combined guest and host DRx values found in the hypervisor
     3348         * register set because the debugger has breakpoints active or someone
     3349         * is single stepping on the host side without a monitor trap flag.
     3350         *
     3351         * Note! DBGF expects a clean DR6 state before executing guest code.
     3352         */
    33463353        if (!CPUMIsHyperDebugStateActive(pVCpu))
    3347         {
    3348             rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
    3349             AssertRC(rc);
    3350         }
     3354            CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
     3355        Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    33513356        Assert(CPUMIsHyperDebugStateActive(pVCpu));
     3357
     3358        /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
     3359        rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
     3360        AssertRCReturn(rc, rc);
     3361
     3362        fInterceptDB = true;
    33523363        fInterceptMovDRx = true;
    33533364    }
    3354     else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    3355     {
    3356         if (!CPUMIsGuestDebugStateActive(pVCpu))
    3357         {
    3358             rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
    3359             AssertRC(rc);
    3360             STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
    3361         }
    3362         Assert(CPUMIsGuestDebugStateActive(pVCpu));
    3363         Assert(fInterceptMovDRx == false);
    3364     }
    3365     else if (!CPUMIsGuestDebugStateActive(pVCpu))
    3366     {
    3367         /* For the first time we would need to intercept MOV DRx accesses even when the guest debug registers aren't loaded. */
    3368         fInterceptMovDRx = true;
    3369     }
    3370 
    3371     /* Update the exception bitmap regarding intercepting #DB generated by the guest. */
     3365    else
     3366    {
     3367        /*
     3368         * If the guest has enabled debug registers, we need to load them prior to
     3369         * executing guest code so they'll trigger at the right time.
     3370         */
     3371        if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
     3372        {
     3373            if (!CPUMIsGuestDebugStateActive(pVCpu))
     3374            {
     3375                CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
     3376                STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     3377            }
     3378            Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     3379            Assert(CPUMIsGuestDebugStateActive(pVCpu));
     3380        }
     3381        /*
     3382         * If no debugging enabled, we'll lazy load DR0-3.
     3383         */
     3384        else if (!CPUMIsGuestDebugStateActive(pVCpu))
     3385            fInterceptMovDRx = true;
     3386
     3387        rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
     3388        AssertRCReturn(rc, rc);
     3389    }
     3390
     3391    /*
     3392     * Update the exception bitmap regarding intercepting #DB generated by the guest.
     3393     */
    33723394    if (fInterceptDB)
    33733395        pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
     
    33783400#endif
    33793401    }
    3380 
    3381     /* Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions. */
     3402    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
     3403    AssertRCReturn(rc, rc);
     3404
     3405    /*
     3406     * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
     3407     */
    33823408    if (fInterceptMovDRx)
    33833409        pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
    33843410    else
    33853411        pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
    3386 
    3387     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
    3388     AssertRCReturn(rc, rc);
    33893412    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
    33903413    AssertRCReturn(rc, rc);
    33913414
    3392     /* The guest's view of its DR7 is unblemished. Use 32-bit write as upper 32-bits MBZ as asserted above. */
    3393     rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
    3394     AssertRCReturn(rc, rc);
    3395 
    33963415    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
    3397     return rc;
     3416    return VINF_SUCCESS;
    33983417}
    33993418
     
    56445663    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
    56455664    {
    5646         /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
    5647         uint32_t u32Val;
    5648         int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);    AssertRCReturn(rc, rc);
    5649         pMixedCtx->dr[7] = u32Val;
     5665        if (!CPUMIsHyperDebugStateActive(pVCpu))
     5666        {
     5667            /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
     5668            uint32_t u32Val;
     5669            int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);    AssertRCReturn(rc, rc);
     5670            pMixedCtx->dr[7] = u32Val;
     5671        }
    56505672
    56515673        pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
     
    60166038    if (CPUMIsGuestDebugStateActive(pVCpu))
    60176039    {
    6018         CPUMR0SaveGuestDebugState(pVM, pVCpu, pMixedCtx, true /* save DR6 */);
    6019         Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     6040        CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
    60206041        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    60216042    }
    60226043    else if (CPUMIsHyperDebugStateActive(pVCpu))
    60236044    {
    6024         CPUMR0LoadHostDebugState(pVM, pVCpu);
    6025         Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     6045        CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */);
    60266046        Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
    60276047    }
     6048    Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     6049    Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     6050
    60286051
    60296052    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
     
    91169139                 *        bootsector testcase for asserting the correct behavior (as well as
    91179140                 *        correctness of this code). */
     9141                /** @todo r=bird: DR0-3 are normally in host registes when the guest is using
     9142                 *        them, so we're testing against potentially stale values here! */
    91189143                STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
    91199144                uint32_t uIOPortLast = uIOPort + cbValue - 1;
     
    93569381        /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
    93579382        PVM pVM = pVCpu->CTX_SUFF(pVM);
    9358         rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
    9359         AssertRC(rc);
     9383        CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    93609384        Assert(CPUMIsGuestDebugStateActive(pVCpu));
    93619385
     
    95999623    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    96009624
     9625    /*
     9626     * Get the DR6-like values from the exit qualification and pass it to DBGF
     9627     * for processing.
     9628     */
    96019629    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    96029630    AssertRCReturn(rc, rc);
     
    96049632    /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
    96059633    uint64_t uDR6 = X86_DR6_INIT_VAL;
    9606     uDR6         |= (pVmxTransient->uExitQualification
     9634    uDR6         |= (  pVmxTransient->uExitQualification
    96079635                     & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
    9608     PVM pVM = pVCpu->CTX_SUFF(pVM);
    9609     rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
     9636
     9637    rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6);
    96109638    if (rc == VINF_EM_RAW_GUEST_TRAP)
    96119639    {
    9612         /* DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet. See Intel spec. 27.1 "Architectural State before a VM-Exit". */
    9613         pMixedCtx->dr[6] = uDR6;
    9614 
     9640        /*
     9641         * The exception was for the guest.  Update DR6, DR7.GD and
     9642         * IA32_DEBUGCTL.LBR before forwarding it.
     9643         * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
     9644         */
     9645        pMixedCtx->dr[6] |= uDR6;
    96159646        if (CPUMIsGuestDebugStateActive(pVCpu))
    96169647            ASMSetDR6(pMixedCtx->dr[6]);
    96179648
    96189649        rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
     9650        AssertRCReturn(rc, rc);
    96199651
    96209652        /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
     
    96229654
    96239655        /* Paranoia. */
    9624         pMixedCtx->dr[7] &= 0xffffffff;                                              /* Upper 32 bits MBZ. */
    9625         pMixedCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15));    /* MBZ. */
    9626         pMixedCtx->dr[7] |= 0x400;                                                   /* MB1. */
    9627 
    9628         rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
    9629         AssertRCReturn(rc,rc);
    9630 
    9631         int rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
    9632         rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    9633         rc2 |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
    9634         AssertRCReturn(rc2, rc2);
     9656        pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
     9657        pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
     9658
     9659        rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
     9660        AssertRCReturn(rc, rc);
     9661
     9662        /*
     9663         * Raise #DB in the guest.
     9664         */
     9665        int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
     9666        rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     9667        rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
     9668        AssertRCReturn(rc, rc);
    96359669        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
    96369670                               pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
    9637         rc = VINF_SUCCESS;
    9638     }
     9671        return VINF_SUCCESS;
     9672    }
     9673
     9674    /*
     9675     * Not a guest trap, must be a hypervisor related debug event then.
     9676     * Update DR6 in case someone is interested in it.
     9677     */
     9678    AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
     9679    AssertReturn(CPUMIsHyperDebugStateActive(pVCpu), VERR_HM_IPE_5);
     9680    CPUMSetHyperDR6(pVCpu, uDR6);
    96399681
    96409682    return rc;
  • trunk/src/VBox/VMM/VMMR3/DBGFBp.cpp

    r44528 r47660  
    566566{
    567567    NOREF(pVM); NOREF(pvUser);
    568     return CPUMRecalcHyperDRx(pVCpu);
     568    return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
    569569}
    570570
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r47619 r47660  
    872872
    873873            default: /** @todo don't use default for guru, but make special errors code! */
     874            {
     875                LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
    874876                rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
    875877                break;
     878            }
    876879        }
    877880
     
    24642467                    rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
    24652468                    TMR3NotifyResume(pVM, pVCpu);
    2466                     Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
     2469                    Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
    24672470                    break;
    24682471
     
    24762479
    24772480                    rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
    2478                     Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
     2481                    Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
    24792482                    if (rc != VINF_SUCCESS)
    24802483                    {
  • trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp

    r46420 r47660  
    323323    int rc = DBGFRZTrap01Handler(pVM, pVCpu, pRegFrame, uDr6);
    324324    if (rc == VINF_EM_RAW_GUEST_TRAP)
    325         CPUMSetGuestDR6(pVCpu, uDr6);
     325        CPUMSetGuestDR6(pVCpu, CPUMGetGuestDR6(pVCpu) | uDr6);
    326326
    327327    rc = trpmGCExitTrap(pVM, pVCpu, rc, pRegFrame);
  • trunk/src/VBox/VMM/VMMRZ/DBGFRZ.cpp

    r44528 r47660  
    4141 * @param   pVCpu       Pointer to the VMCPU.
    4242 * @param   pRegFrame   Pointer to the register frame for the trap.
    43  * @param   uDr6        The DR6 register value.
     43 * @param   uDr6        The DR6 hypervisor register value.
    4444 */
    4545VMMRZ_INT_DECL(int) DBGFRZTrap01Handler(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCUINTREG uDr6)
     
    8585    }
    8686
    87 #ifdef IN_RC
    8887    /*
    89      * Currently we only implement single stepping in the guest,
    90      * so we'll bitch if this is not a BS event.
     88     * Either an ICEBP in hypervisor code or a guest related debug exception
     89     * of sorts.
    9190     */
    92     AssertMsg(uDr6 & X86_DR6_BS, ("hey! we're not doing guest BPs yet! dr6=%RTreg %04x:%RGv\n",
    93                                   uDr6, pRegFrame->cs.Sel, pRegFrame->rip));
    94 #endif
     91    if (RT_UNLIKELY(fInHyper))
     92    {
     93        LogFlow(("DBGFRZTrap01Handler: unabled bp at %04x:%RGv\n", pRegFrame->cs.Sel, pRegFrame->rip));
     94        return VERR_DBGF_HYPER_DB_XCPT;
     95    }
    9596
    9697    LogFlow(("DBGFRZTrap01Handler: guest debug event %RTreg at %04x:%RGv!\n", uDr6, pRegFrame->cs.Sel, pRegFrame->rip));
    97     return fInHyper ? VERR_DBGF_HYPER_DB_XCPT : VINF_EM_RAW_GUEST_TRAP;
     98    return VINF_EM_RAW_GUEST_TRAP;
    9899}
    99100
  • trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac

    r41985 r47660  
    55
    66;
    7 ; Copyright (C) 2006-2012 Oracle Corporation
     7; Copyright (C) 2006-2013 Oracle Corporation
    88;
    99; This file is part of VirtualBox Open Source Edition (OSE), as
     
    355355
    356356    ; debug registers.
    357     test    esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
    358     jz      htg_debug_regs_no
    359     jmp     htg_debug_regs_save
     357    test    esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
     358    jnz     htg_debug_regs_save
    360359htg_debug_regs_no:
    361360    DEBUG_CHAR('a')                     ; trashes esi
     
    439438    mov     rax, dr7                    ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
    440439    mov     [rdx + r8 + CPUMCPU.Host.dr7], rax
    441     xor     eax, eax                    ; clear everything. (bit 12? is read as 1...)
    442     mov     dr7, rax
     440    mov     ecx, X86_DR7_INIT_VAL
     441    cmp     eax, ecx
     442    je      .htg_debug_regs_dr7_disabled
     443    mov     dr7, rcx
     444.htg_debug_regs_dr7_disabled:
    443445    mov     rax, dr6                    ; just in case we save the state register too.
    444446    mov     [rdx + r8 + CPUMCPU.Host.dr6], rax
    445447    ; save host DR0-3?
    446     test    esi, CPUM_USE_DEBUG_REGS
    447     jz near htg_debug_regs_no
     448    test    esi, CPUM_USE_DEBUG_REGS_HYPER
     449    jz     htg_debug_regs_no
    448450DEBUG_S_CHAR('S');
    449451    mov     rax, dr0
     
    455457    mov     rax, dr3
    456458    mov     [rdx + r8 + CPUMCPU.Host.dr3], rax
     459    or      dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
    457460    jmp     htg_debug_regs_no
    458461
     
    513516GLOBALNAME JmpGCTarget
    514517    DEBUG_CHAR('-')
    515 ;mov eax, 0ffff0000h
    516 ;.delay_loop:
    517 ;nop
    518 ;dec eax
    519 ;nop
    520 ;jnz .delay_loop
    521518    ; load final cr3 and do far jump to load cs.
    522519    mov     cr3, ebp                ; ebp set above
     
    565562
    566563    ; debug registers
    567     test    esi, CPUM_USE_DEBUG_REGS
     564    test    esi, CPUM_USE_DEBUG_REGS_HYPER
    568565    jnz     htg_debug_regs_guest
    569566htg_debug_regs_guest_done:
     
    622619    mov     ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
    623620    mov     dr3, ebx
    624     ;mov     eax, [edx + CPUMCPU.Hyper.dr + 8*6]
    625     mov     ecx, 0ffff0ff0h
     621    mov     ecx, X86_DR6_INIT_VAL
    626622    mov     dr6, ecx
    627623    mov     eax, [edx + CPUMCPU.Hyper.dr + 8*7]
    628624    mov     dr7, eax
     625    or      dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
    629626    jmp     htg_debug_regs_guest_done
    630627
     
    793790    ; FPU context is saved before restore of host saving (another) branch.
    794791
     792    ; Disable debug registers if active so they cannot trigger while switching.
     793    test    dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
     794    jz      .gth_disabled_dr7
     795    mov     eax, X86_DR7_INIT_VAL
     796    mov     dr7, eax
     797.gth_disabled_dr7:
    795798
    796799    ;;
     
    991994    ;mov     cr2, rcx
    992995
    993     ; restore debug registers (if modified) (esi must still be fUseFlags!)
    994     ; (must be done after cr4 reload because of the debug extension.)
    995     test    esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
    996     jz short gth_debug_regs_no
    997     jmp     gth_debug_regs_restore
    998 gth_debug_regs_no:
    999 
    1000996    ; Restore MSRs
    1001997    mov     rbx, rdx
     
    10141010    mov     rdx, rbx
    10151011
    1016     ; restore general registers.
     1012    ; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!)
     1013    test    esi, CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER
     1014    jnz     gth_debug_regs_restore
     1015gth_debug_regs_done:
     1016    and     dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
     1017
     1018    ; Restore general registers.
    10171019    mov     eax, edi                    ; restore return code. eax = return code !!
    10181020    ; mov     rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
     
    10491051gth_debug_regs_restore:
    10501052    DEBUG_S_CHAR('d')
    1051     xor     eax, eax
    1052     mov     dr7, rax                    ; paranoia or not?
    1053     test    esi, CPUM_USE_DEBUG_REGS
    1054     jz short gth_debug_regs_dr7
     1053    mov     rax, dr7                    ; Some DR7 paranoia first...
     1054    mov     ecx, X86_DR7_INIT_VAL
     1055    cmp     rax, rcx
     1056    je      .gth_debug_skip_dr7_disabling
     1057    mov     dr7, rcx
     1058.gth_debug_skip_dr7_disabling:
     1059    test    esi, CPUM_USED_DEBUG_REGS_HOST
     1060    jz      .gth_debug_regs_dr7
     1061
    10551062    DEBUG_S_CHAR('r')
    10561063    mov     rax, [rdx + r8 + CPUMCPU.Host.dr0]
     
    10621069    mov     rax, [rdx + r8 + CPUMCPU.Host.dr3]
    10631070    mov     dr3, rax
    1064 gth_debug_regs_dr7:
     1071.gth_debug_regs_dr7:
    10651072    mov     rbx, [rdx + r8 + CPUMCPU.Host.dr6]
    10661073    mov     dr6, rbx
    10671074    mov     rcx, [rdx + r8 + CPUMCPU.Host.dr7]
    10681075    mov     dr7, rcx
    1069     jmp     gth_debug_regs_no
     1076
     1077    ; We clear the USED flags in the main code path.
     1078    jmp     gth_debug_regs_done
    10701079
    10711080ENDPROC vmmRCToHostAsm
  • trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac

    r47652 r47660  
    445445    mov     esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
    446446    test    esi, CPUM_SYNC_FPU_STATE
    447     jz      near gth_fpu_no
     447    jz      near htg_fpu_no
    448448
    449449%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    460460    and     dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
    461461
    462 gth_fpu_no:
     462htg_fpu_no:
    463463    ; Check if we need to restore the guest debug state
    464     test    esi, CPUM_SYNC_DEBUG_STATE
    465     jz      near gth_debug_no
     464    test    esi, CPUM_SYNC_DEBUG_REGS_GUEST | CPUM_SYNC_DEBUG_REGS_HYPER
     465    jz      htg_debug_done
    466466
    467467%ifdef VBOX_WITH_CRASHDUMP_MAGIC
    468468    mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
    469469%endif
    470 
     470    test    esi, CPUM_SYNC_DEBUG_REGS_HYPER
     471    jnz     htg_debug_hyper
     472
     473    ; Guest values in DRx, letting the guest access them directly.
    471474    mov     rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
    472475    mov     dr0, rax
     
    480483    mov     dr6, rax    ; not required for AMD-V
    481484
    482     and     dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
    483 
    484 gth_debug_no:
     485    and     dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_GUEST
     486    or      dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_GUEST
     487    jmp     htg_debug_done
     488
     489htg_debug_hyper:
     490    ; Combined values in DRx, intercepting all accesses.
     491    mov     rax, qword [rdx + CPUMCPU.Hyper.dr + 0*8]
     492    mov     dr0, rax
     493    mov     rax, qword [rdx + CPUMCPU.Hyper.dr + 1*8]
     494    mov     dr1, rax
     495    mov     rax, qword [rdx + CPUMCPU.Hyper.dr + 2*8]
     496    mov     dr2, rax
     497    mov     rax, qword [rdx + CPUMCPU.Hyper.dr + 3*8]
     498    mov     dr3, rax
     499    mov     rax, qword [rdx + CPUMCPU.Hyper.dr + 6*8]
     500    mov     dr6, rax    ; not required for AMD-V
     501
     502    and     dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_REGS_HYPER
     503    or      dword [rdx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
     504
     505htg_debug_done:
    485506
    486507%ifdef VBOX_WITH_CRASHDUMP_MAGIC
    487508    mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
    488509%endif
     510
     511    ;
     512    ; "Call" the specified helper function.
     513    ;
    489514
    490515    ; parameter for all helper functions (pCtx)
    491516    DEBUG64_CHAR('9')
    492517    lea     rsi, [rdx + CPUMCPU.Guest.fpu]
    493     lea     rax, [gth_return wrt rip]
     518    lea     rax, [htg_return wrt rip]
    494519    push    rax                         ; return address
    495520
     
    505530    jz      NAME(HMRCTestSwitcher64)
    506531    mov     eax, VERR_HM_INVALID_HM64ON32OP
    507 gth_return:
     532htg_return:
    508533    DEBUG64_CHAR('r')
    509534
  • trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac

    r41985 r47660  
    55
    66;
    7 ; Copyright (C) 2006-2012 Oracle Corporation
     7; Copyright (C) 2006-2013 Oracle Corporation
    88;
    99; This file is part of VirtualBox Open Source Edition (OSE), as
     
    256256
    257257    ; debug registers.
    258     test    esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
     258    test    esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
    259259    jnz     htg_debug_regs_save_dr7and6
    260260htg_debug_regs_no:
     
    407407
    408408    ; debug registers
    409     test    esi, CPUM_USE_DEBUG_REGS
     409    test    esi, CPUM_USE_DEBUG_REGS_HYPER
    410410    jnz     htg_debug_regs_guest
    411411htg_debug_regs_guest_done:
     
    515515    mov     eax, dr3
    516516    mov     [edx + CPUMCPU.Host.dr3], eax
     517    or      dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
    517518
    518519    ; load hyper DR0-7
     
    525526    mov     ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
    526527    mov     dr3, ebx
    527     ;mov     eax, [edx + CPUMCPU.Hyper.dr + 8*6]
    528     mov     ecx, 0ffff0ff0h
     528    mov     ecx, X86_DR6_INIT_VAL
    529529    mov     dr6, ecx
    530530    mov     eax, [edx + CPUMCPU.Hyper.dr + 8*7]
    531531    mov     dr7, eax
     532    or      dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
    532533    jmp     htg_debug_regs_guest_done
    533534
     
    686687    ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
    687688    ; FPU context is saved before restore of host saving (another) branch.
     689
     690    ; Disable debug regsiters if active so they cannot trigger while switching.
     691    test    dword [edi + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
     692    jz      .gth_disabled_dr7
     693    mov     eax, X86_DR7_INIT_VAL
     694    mov     dr7, eax
     695.gth_disabled_dr7:
    688696
    689697%ifdef VBOX_WITH_NMI
     
    887895    ; restore debug registers (if modified) (esi must still be fUseFlags!)
    888896    ; (must be done after cr4 reload because of the debug extension.)
    889     test    esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
    890     jz short gth_debug_regs_no
    891     jmp     gth_debug_regs_restore
    892 gth_debug_regs_no:
     897    test    esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST
     898    jnz     gth_debug_regs_restore
     899gth_debug_regs_done:
    893900
    894901    ; restore general registers.
     
    911918gth_debug_regs_restore:
    912919    DEBUG_S_CHAR('d')
    913     xor     eax, eax
    914     mov     dr7, eax                    ; paranoia or not?
    915     test    esi, CPUM_USE_DEBUG_REGS
    916     jz short gth_debug_regs_dr7
     920    mov     eax, dr7                    ; Some DR7 paranoia first...
     921    mov     ecx, X86_DR7_INIT_VAL
     922    cmp     eax, ecx
     923    je      .gth_debug_skip_dr7_disabling
     924    mov     dr7, ecx
     925.gth_debug_skip_dr7_disabling:
     926    test    esi, CPUM_USED_DEBUG_REGS_HOST
     927    jz      .gth_debug_regs_dr7
     928
    917929    DEBUG_S_CHAR('r')
    918930    mov     eax, [edx + CPUMCPU.Host.dr0]
     
    924936    mov     eax, [edx + CPUMCPU.Host.dr3]
    925937    mov     dr3, eax
    926 gth_debug_regs_dr7:
     938.gth_debug_regs_dr7:
    927939    mov     ebx, [edx + CPUMCPU.Host.dr6]
    928940    mov     dr6, ebx
    929941    mov     ecx, [edx + CPUMCPU.Host.dr7]
    930942    mov     dr7, ecx
    931     jmp     gth_debug_regs_no
     943
     944    and     dword [edx + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
     945    jmp     gth_debug_regs_done
    932946
    933947ENDPROC vmmRCToHostAsm
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r41932 r47660  
    5757
    5858/** Use flags (CPUM::fUseFlags).
    59  * (Don't forget to sync this with CPUMInternal.mac!)
     59 * (Don't forget to sync this with CPUMInternal.mac !)
    6060 * @{ */
    6161/** Used the FPU, SSE or such stuff. */
     
    6464 * REM syncing is clearing this, lazy FPU is setting it. */
    6565#define CPUM_USED_FPU_SINCE_REM         RT_BIT(1)
     66/** The XMM state was manually restored. (AMD only) */
     67#define CPUM_USED_MANUAL_XMM_RESTORE    RT_BIT(2)
     68
    6669/** Host OS is using SYSENTER and we must NULL the CS. */
    67 #define CPUM_USE_SYSENTER               RT_BIT(2)
     70#define CPUM_USE_SYSENTER               RT_BIT(3)
    6871/** Host OS is using SYSENTER and we must NULL the CS. */
    69 #define CPUM_USE_SYSCALL                RT_BIT(3)
    70 /** Debug registers are used by host and must be disabled. */
    71 #define CPUM_USE_DEBUG_REGS_HOST        RT_BIT(4)
    72 /** Enabled use of debug registers in guest context. */
    73 #define CPUM_USE_DEBUG_REGS             RT_BIT(5)
    74 /** The XMM state was manually restored. (AMD only) */
    75 #define CPUM_MANUAL_XMM_RESTORE         RT_BIT(6)
    76 /** Sync the FPU state on entry (32->64 switcher only). */
    77 #define CPUM_SYNC_FPU_STATE             RT_BIT(7)
    78 /** Sync the debug state on entry (32->64 switcher only). */
    79 #define CPUM_SYNC_DEBUG_STATE           RT_BIT(8)
    80 /** Enabled use of hypervisor debug registers in guest context. */
    81 #define CPUM_USE_DEBUG_REGS_HYPER       RT_BIT(9)
     72#define CPUM_USE_SYSCALL                RT_BIT(4)
     73
     74/** Debug registers are used by host and that DR7 and DR6 must be saved and
     75 *  disabled when switching to raw-mode. */
     76#define CPUM_USE_DEBUG_REGS_HOST        RT_BIT(5)
     77/** Records that we've saved the host DRx registers.
     78 * In ring-0 this means all (DR0-7), while in raw-mode context this means DR0-3
     79 * since DR6 and DR7 are covered by CPUM_USE_DEBUG_REGS_HOST. */
     80#define CPUM_USED_DEBUG_REGS_HOST       RT_BIT(6)
     81/** Set to indicate that we should save host DR0-7 and load the hypervisor debug
     82 * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */
     83#define CPUM_USE_DEBUG_REGS_HYPER       RT_BIT(7)
     84/** Used in ring-0 to indicate that we have loaded the hypervisor debug
     85 * registers. */
     86#define CPUM_USED_DEBUG_REGS_HYPER      RT_BIT(8)
     87/** Used in ring-0 to indicate that we have loaded the guest debug
     88 * registers (DR0-3 and maybe DR6) for direct use by the guest.
     89 * DR7 (and AMD-V DR6) are handled via the VMCB. */
     90#define CPUM_USED_DEBUG_REGS_GUEST      RT_BIT(9)
     91
     92
     93/** Sync the FPU state on next entry (32->64 switcher only). */
     94#define CPUM_SYNC_FPU_STATE             RT_BIT(16)
     95/** Sync the debug state on next entry (32->64 switcher only). */
     96#define CPUM_SYNC_DEBUG_REGS_GUEST      RT_BIT(17)
     97/** Sync the debug state on next entry (32->64 switcher only).
     98 * Almost the same as CPUM_USE_DEBUG_REGS_HYPER in the raw-mode switchers.  */
     99#define CPUM_SYNC_DEBUG_REGS_HYPER      RT_BIT(18)
    82100/** @} */
    83101
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r44528 r47660  
    1818%include "VBox/asmdefs.mac"
    1919
     20
    2021%define CPUM_USED_FPU                   RT_BIT(0)
    2122%define CPUM_USED_FPU_SINCE_REM         RT_BIT(1)
    22 %define CPUM_USE_SYSENTER               RT_BIT(2)
    23 %define CPUM_USE_SYSCALL                RT_BIT(3)
    24 %define CPUM_USE_DEBUG_REGS_HOST        RT_BIT(4)
    25 %define CPUM_USE_DEBUG_REGS             RT_BIT(5)
    26 %define CPUM_SYNC_FPU_STATE             RT_BIT(7)
    27 %define CPUM_SYNC_DEBUG_STATE           RT_BIT(8)
     23%define CPUM_USED_MANUAL_XMM_RESTORE    RT_BIT(2)
     24%define CPUM_USE_SYSENTER               RT_BIT(3)
     25%define CPUM_USE_SYSCALL                RT_BIT(4)
     26%define CPUM_USE_DEBUG_REGS_HOST        RT_BIT(5)
     27%define CPUM_USED_DEBUG_REGS_HOST       RT_BIT(6)
     28%define CPUM_USE_DEBUG_REGS_HYPER       RT_BIT(7)
     29%define CPUM_USED_DEBUG_REGS_HYPER      RT_BIT(8)
     30%define CPUM_USED_DEBUG_REGS_GUEST      RT_BIT(9)
     31%define CPUM_SYNC_FPU_STATE             RT_BIT(16)
     32%define CPUM_SYNC_DEBUG_REGS_GUEST      RT_BIT(17)
     33%define CPUM_SYNC_DEBUG_REGS_HYPER      RT_BIT(18)
     34
    2835
    2936%define CPUM_HANDLER_DS                 1
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette