VirtualBox

Changeset 48552 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Sep 19, 2013 4:52:49 PM (11 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0, HMSVMR0: Fixes for shared FPU/debug state handling with thread-context hooks.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r48475 r48552  
    211211    /** Whether the TSC offset mode needs to be updated. */
    212212    bool            fUpdateTscOffsetting;
     213    /** Whether the guest FPU state was active at the time of #VMEXIT. */
     214    bool            fWasGuestFPUStateActive;
     215    /** Whether the guest debug state was active at the time of #VMEXIT. */
     216    bool            fWasGuestDebugStateActive;
     217    /** Whether the hyper debug state was active at the time of #VMEXIT. */
     218    bool            fWasHyperDebugStateActive;
    213219} SVMTRANSIENT, *PSVMTRANSIENT;
    214220AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode,       sizeof(uint64_t));
     
    13981404         * Note! DBGF expects a clean DR6 state before executing guest code.
    13991405         */
     1406#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1407        if (   CPUMIsGuestInLongModeEx(pMixedCtx)
     1408            && !CPUMIsHyperDebugStateActivePending(pVCpu))
     1409        {
     1410            CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
     1411            Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
     1412            Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
     1413        }
     1414        else
     1415#endif
    14001416        if (!CPUMIsHyperDebugStateActive(pVCpu))
     1417        {
    14011418            CPUMR0LoadHyperDebugState(pVCpu, false /* include DR6 */);
    1402         Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    1403         Assert(CPUMIsHyperDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
     1419            Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     1420            Assert(CPUMIsHyperDebugStateActive(pVCpu));
     1421        }
    14041422
    14051423        /* Update DR6 & DR7. (The other DRx values are handled by CPUM one way or the other.) */
    14061424        if (   pVmcb->guest.u64DR6 != X86_DR6_INIT_VAL
    1407             || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu) )
     1425            || pVmcb->guest.u64DR7 != CPUMGetHyperDR7(pVCpu))
    14081426        {
    14091427            pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);
     
    14371455        if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
    14381456        {
     1457#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1458            if (   CPUMIsGuestInLongModeEx(pMixedCtx)
     1459                && !CPUMIsGuestDebugStateActivePending(pVCpu))
     1460            {
     1461                CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
     1462                STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     1463                Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
     1464                Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
     1465            }
     1466            else
     1467#endif
    14391468            if (!CPUMIsGuestDebugStateActive(pVCpu))
    14401469            {
    14411470                CPUMR0LoadGuestDebugState(pVCpu, false /* include DR6 */);
    14421471                STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     1472                Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     1473                Assert(CPUMIsGuestDebugStateActive(pVCpu));
    14431474            }
    1444             Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    1445             Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
    14461475            Log5(("hmR0SvmLoadSharedDebugState: Loaded guest DRx\n"));
    14471476        }
    14481477        /*
    1449          * If no debugging enabled, we'll lazy load DR0-3.
     1478         * If no debugging enabled, we'll lazy load DR0-3. We don't need to
     1479         * intercept #DB as DR6 is updated in the VMCB.
    14501480         */
     1481#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1482        else if (   (   CPUMIsGuestInLongModeEx(pMixedCtx)
     1483                     && !CPUMIsGuestDebugStateActivePending(pVCpu))
     1484                 || !CPUMIsGuestDebugStateActive(pVCpu))
     1485#else
    14511486        else if (!CPUMIsGuestDebugStateActive(pVCpu))
     1487#endif
     1488        {
    14521489            fInterceptMovDRx = true;
     1490        }
    14531491    }
    14541492
     
    17641802 * @remarks No-long-jump zone!!!
    17651803 */
    1766 static void hmR0VmxLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1804static void hmR0SvmLoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    17671805{
    17681806    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    18931931     * Guest Debug registers.
    18941932     */
     1933    /** @todo We need to save DR6, DR7 according to what we did in
     1934     *        hmR0SvmLoadSharedDebugState(). */
    18951935    if (!CPUMIsHyperDebugStateActive(pVCpu))
    18961936    {
     
    19832023
    19842024    /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
    1985        and done this from the VMXR0ThreadCtxCallback(). */
     2025       and done this from the SVMR0ThreadCtxCallback(). */
    19862026    if (!pVCpu->hm.s.fLeaveDone)
    19872027    {
     
    28642904    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    28652905    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)
    2866         hmR0VmxLoadSharedState(pVCpu, pVmcb, pCtx);
     2906        hmR0SvmLoadSharedState(pVCpu, pVmcb, pCtx);
    28672907    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;       /* Preemption might set this, nothing to do on AMD-V. */
    28682908    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     
    28792919        pSvmTransient->fUpdateTscOffsetting = false;
    28802920    }
     2921
     2922    /* Store status of the shared guest-host state at the time of VMRUN. */
     2923#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2924    if (CPUMIsGuestInLongModeEx(pCtx))
     2925    {
     2926        pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
     2927        pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
     2928    }
     2929    else
     2930#endif
     2931    {
     2932        pSvmTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
     2933        pSvmTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
     2934    }
     2935    pSvmTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
    28812936
    28822937    /* Flush the appropriate tagged-TLB entries. */
     
    42184273    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    42194274
    4220     /* We should -not- get this VM-exit if the guest is debugging. */
    4221     AssertMsgReturn(!CPUMIsGuestDebugStateActive(pVCpu),
     4275    /* We should -not- get this VM-exit if we're not stepping or the guest is debugging. */
     4276    AssertMsgReturn(   pVCpu->hm.s.fSingleInstruction
     4277                    || DBGFIsStepping(pVCpu)
     4278                    || !pSvmTransient->fWasGuestDebugStateActive,
    42224279                    ("hmR0SvmExitReadDRx: Unexpected exit. pVCpu=%p pCtx=%p\n", pVCpu, pCtx),
    42234280                    VERR_SVM_UNEXPECTED_EXIT);
     
    42264283     * Lazy DR0-3 loading?
    42274284     */
    4228     if (!CPUMIsHyperDebugStateActive(pVCpu))
     4285    if (!pSvmTransient->fWasHyperDebugStateActive)
    42294286    {
    42304287        Assert(!DBGFIsStepping(pVCpu)); Assert(!pVCpu->hm.s.fSingleInstruction);
     
    42374294        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    42384295
    4239         /* We're playing with the host CPU state here, make sure we don't preempt. */
     4296        /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
     4297        VMMRZCallRing3Disable(pVCpu);
    42404298        HM_DISABLE_PREEMPT_IF_NEEDED();
    42414299
     
    42454303
    42464304        HM_RESTORE_PREEMPT_IF_NEEDED();
     4305        VMMRZCallRing3Enable(pVCpu);
    42474306
    42484307        STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
     
    43884447                        || DBGFBpIsHwIoArmed(pVM)))
    43894448        {
    4390             /* We're playing with the host CPU state here, make sure we don't preempt. */
     4449            /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
     4450            VMMRZCallRing3Disable(pVCpu);
    43914451            HM_DISABLE_PREEMPT_IF_NEEDED();
    43924452
     
    44094469
    44104470            HM_RESTORE_PREEMPT_IF_NEEDED();
     4471            VMMRZCallRing3Enable(pVCpu);
    44114472        }
    44124473
     
    47384799    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    47394800
     4801    /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
     4802    VMMRZCallRing3Disable(pVCpu);
     4803    HM_DISABLE_PREEMPT_IF_NEEDED();
     4804
     4805    int rc;
     4806    /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
     4807    if (pSvmTransient->fWasGuestFPUStateActive)
     4808    {
     4809        rc = VINF_EM_RAW_GUEST_TRAP;
     4810        Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
     4811    }
     4812    else
     4813    {
    47404814#ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS
    4741     Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     4815        Assert(!pSvmTransient->fWasGuestFPUStateActive);
    47424816#endif
    4743 
    4744     /* We're playing with the host CPU state here, make sure we don't preempt. */
    4745     HM_DISABLE_PREEMPT_IF_NEEDED();
    4746 
    4747     /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
    4748     int rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
    4749     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     4817        /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
     4818        rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
     4819        Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
     4820    }
     4821
     4822    HM_RESTORE_PREEMPT_IF_NEEDED();
     4823    VMMRZCallRing3Enable(pVCpu);
    47504824
    47514825    if (rc == VINF_SUCCESS)
    47524826    {
    4753         Assert(CPUMIsGuestFPUStateActive(pVCpu));
    4754         HM_RESTORE_PREEMPT_IF_NEEDED();
    4755 
     4827        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    47564828        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    4757         return VINF_SUCCESS;
    4758     }
    4759 
    4760     HM_RESTORE_PREEMPT_IF_NEEDED();
    4761 
    4762     /* Forward #NM to the guest. */
    4763     Assert(rc == VINF_EM_RAW_GUEST_TRAP);
    4764     hmR0SvmSetPendingXcptNM(pVCpu);
    4765     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
     4829    }
     4830    else
     4831    {
     4832        /* Forward #NM to the guest. */
     4833        Assert(rc == VINF_EM_RAW_GUEST_TRAP);
     4834        hmR0SvmSetPendingXcptNM(pVCpu);
     4835        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
     4836    }
    47664837    return VINF_SUCCESS;
    47674838}
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r48478 r48552  
    262262     *  contributory exception or a page-fault. */
    263263    bool            fVectoringPF;
     264    /** Whether the guest FPU was active at the time of VM-exit. */
     265    bool            fWasGuestFPUStateActive;
     266    /** Whether the guest debug state was active at the time of VM-exit. */
     267    bool            fWasGuestDebugStateActive;
     268    /** Whether the hyper debug state was active at the time of VM-exit. */
     269    bool            fWasHyperDebugStateActive;
    264270} VMXTRANSIENT;
    265271AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason,    sizeof(uint64_t));
     
    31293135        /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
    31303136        if (fInterceptNM)
    3131             u32CR0Mask |=  (X86_CR0_TS | X86_CR0_MP);
    3132         else
    3133             u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
     3137        {
     3138            u32CR0Mask |=  X86_CR0_TS
     3139                         | X86_CR0_MP;
     3140        }
    31343141
    31353142        /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
     
    31373144        rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
    31383145        AssertRCReturn(rc, rc);
     3146        Log4(("Load: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", u32CR0Mask));
    31393147
    31403148        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
     
    34073415         * Note! DBGF expects a clean DR6 state before executing guest code.
    34083416         */
     3417#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3418        if (   CPUMIsGuestInLongModeEx(pMixedCtx)
     3419            && !CPUMIsHyperDebugStateActivePending(pVCpu))
     3420        {
     3421            CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
     3422            Assert(CPUMIsHyperDebugStateActivePending(pVCpu));
     3423            Assert(!CPUMIsGuestDebugStateActivePending(pVCpu));
     3424        }
     3425        else
     3426#endif
    34093427        if (!CPUMIsHyperDebugStateActive(pVCpu))
     3428        {
    34103429            CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
    3411         Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    3412         Assert(CPUMIsHyperDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
     3430            Assert(CPUMIsHyperDebugStateActive(pVCpu));
     3431            Assert(!CPUMIsGuestDebugStateActive(pVCpu));
     3432        }
    34133433
    34143434        /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
     
    34273447        if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
    34283448        {
    3429             if (!CPUMIsGuestDebugStateActive(pVCpu))
     3449#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3450            if (   CPUMIsGuestInLongModeEx(pMixedCtx)
     3451                && !CPUMIsGuestDebugStateActivePending(pVCpu))
    34303452            {
    34313453                CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
     3454                Assert(CPUMIsGuestDebugStateActivePending(pVCpu));
     3455                Assert(!CPUMIsHyperDebugStateActivePending(pVCpu));
    34323456                STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
    34333457            }
    3434             Assert(!CPUMIsHyperDebugStateActive(pVCpu));
    3435             Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
     3458            else
     3459#endif
     3460            if (CPUMIsGuestDebugStateActive(pVCpu))
     3461            {
     3462                CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
     3463                Assert(CPUMIsGuestDebugStateActive(pVCpu));
     3464                Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     3465                STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     3466            }
    34363467        }
    34373468        /*
     
    34393470         * must intercept #DB in order to maintain a correct DR6 guest value.
    34403471         */
     3472#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3473        else if (   (   CPUMIsGuestInLongModeEx(pMixedCtx)
     3474                     && !CPUMIsGuestDebugStateActivePending(pVCpu))
     3475                 || !CPUMIsGuestDebugStateActive(pVCpu))
     3476#else
    34413477        else if (!CPUMIsGuestDebugStateActive(pVCpu))
     3478#endif
    34423479        {
    34433480            fInterceptMovDRx = true;
     
    51065143        int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0,            &uVal);
    51075144        AssertRCReturn(rc, rc);
     5145
    51085146        uint32_t uShadow = 0;
    51095147        rc     = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
     
    57515789    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
    57525790    {
     5791        /** @todo We need to update DR7 according to what was done in hmR0VmxLoadSharedDebugState(). */
    57535792        if (!CPUMIsHyperDebugStateActive(pVCpu))
    57545793        {
     
    61236162    if (CPUMIsGuestFPUStateActive(pVCpu))
    61246163    {
     6164        if (!fSaveGuestState)
     6165        {
     6166            int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     6167            AssertRCReturn(rc, rc);
     6168        }
    61256169        CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
    61266170        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
     
    61356179    if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
    61366180        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    6137     Assert(!CPUMIsGuestDebugStateActive(pVCpu));
    6138     Assert(!CPUMIsHyperDebugStateActive(pVCpu));
     6181    Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu));
     6182    Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu));
    61396183
    61406184#if HC_ARCH_BITS == 64
     
    63876431    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    63886432
    6389     Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
     6433    Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n enmOperation=%d", pVCpu, pVCpu->idCpu,
     6434          enmOperation));
     6435
    63906436    int rc = hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
    63916437    AssertRCReturn(rc, rc);
     
    75227568    }
    75237569
     7570#ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
     7571    if (!CPUMIsGuestFPUStateActive(pVCpu))
     7572        CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
     7573    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     7574#endif
     7575
    75247576    /*
    75257577     * Load the host state bits as we may've been preempted (only happens when
     
    75347586    }
    75357587    Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
    7536 
    7537 #ifdef HMVMX_ALWAYS_SWAP_FPU_STATE
    7538     if (!CPUMIsGuestFPUStateActive(pVCpu))
    7539         CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
    7540     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    7541 #endif
    75427588
    75437589    /*
     
    75477593        hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
    75487594    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
     7595
     7596    /* Store status of the shared guest-host state at the time of VM-entry. */
     7597#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     7598    if (CPUMIsGuestInLongModeEx(pMixedCtx))
     7599    {
     7600        pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActivePending(pVCpu);
     7601        pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActivePending(pVCpu);
     7602    }
     7603    else
     7604#endif
     7605    {
     7606        pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
     7607        pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
     7608    }
     7609    pVmxTransient->fWasGuestFPUStateActive = CPUMIsGuestFPUStateActive(pVCpu);
    75497610
    75507611    /*
     
    97849845            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
    97859846
    9786             /* We're playing with the host CPU state here, make sure we don't preempt. */
     9847            /* We're playing with the host CPU state here, make sure we don't preempt or longjmp. */
     9848            VMMRZCallRing3Disable(pVCpu);
    97879849            HM_DISABLE_PREEMPT_IF_NEEDED();
     9850
    97889851            bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
    97899852
     
    98059868
    98069869            HM_RESTORE_PREEMPT_IF_NEEDED();
     9870            VMMRZCallRing3Enable(pVCpu);
    98079871        }
    98089872    }
     
    998410048    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    998510049
    9986     /* We should -not- get this VM-exit if the guest's debug registers are active. See CPUMR0LoadGuestDebugState(). */
    9987 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9988     if (   !CPUMIsGuestInLongModeEx(pMixedCtx)      /* EFER is always up-to-date. */
    9989         && CPUMIsGuestDebugStateActive(pVCpu))
    9990 #else
    9991     if (CPUMIsGuestDebugStateActive(pVCpu))
    9992 #endif
     10050    /* We should -not- get this VM-exit if the guest's debug registers were active. */
     10051    if (pVmxTransient->fWasGuestDebugStateActive)
    999310052    {
    999410053        AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     
    999910058    if (   !DBGFIsStepping(pVCpu)
    1000010059        && !pVCpu->hm.s.fSingleInstruction
    10001         && !CPUMIsHyperDebugStateActive(pVCpu))
     10060        && !pVmxTransient->fWasHyperDebugStateActive)
    1000210061    {
    1000310062        /* Don't intercept MOV DRx and #DB any more. */
     
    1001510074        }
    1001610075
    10017         /* We're playing with the host CPU state here, make sure we can't preempt. */
     10076        /* We're playing with the host CPU state here, make sure we can't preempt or longjmp. */
     10077        VMMRZCallRing3Disable(pVCpu);
    1001810078        HM_DISABLE_PREEMPT_IF_NEEDED();
    1001910079
     
    1002410084
    1002510085        HM_RESTORE_PREEMPT_IF_NEEDED();
     10086        VMMRZCallRing3Enable(pVCpu);
    1002610087
    1002710088#ifdef VBOX_WITH_STATISTICS
     
    1028610347         * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
    1028710348         */
     10349        VMMRZCallRing3Disable(pVCpu);
    1028810350        HM_DISABLE_PREEMPT_IF_NEEDED();
    1028910351
     
    1029410356
    1029510357        HM_RESTORE_PREEMPT_IF_NEEDED();
     10358        VMMRZCallRing3Enable(pVCpu);
    1029610359
    1029710360        rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
     
    1032510388     */
    1032610389    AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
    10327     AssertReturn(CPUMIsHyperDebugStateActive(pVCpu), VERR_HM_IPE_5);
     10390    AssertReturn(pVmxTransient->fWasHyperDebugStateActive, VERR_HM_IPE_5);
    1032810391    CPUMSetHyperDR6(pVCpu, uDR6);
    1032910392
     
    1034410407    AssertRCReturn(rc, rc);
    1034510408
    10346     /* We're playing with the host CPU state here, have to disable preemption. */
     10409    /* We're playing with the host CPU state here, have to disable preemption or longjmp. */
     10410    VMMRZCallRing3Disable(pVCpu);
    1034710411    HM_DISABLE_PREEMPT_IF_NEEDED();
    1034810412
     10413    /* If the guest FPU was active at the time of the #NM exit, then it's a guest fault. */
     10414    if (pVmxTransient->fWasGuestFPUStateActive)
     10415    {
     10416        rc = VINF_EM_RAW_GUEST_TRAP;
     10417        Assert(CPUMIsGuestFPUStateActive(pVCpu) || (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0));
     10418    }
     10419    else
     10420    {
    1034910421#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    10350     if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    10351         Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    10352 #endif
    10353 
    10354     /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
    10355     PVM pVM = pVCpu->CTX_SUFF(pVM);
    10356     rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
    10357     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     10422        Assert(!pVmxTransient->fWasGuestFPUStateActive);
     10423#endif
     10424        /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
     10425        rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
     10426        Assert(rc == VINF_EM_RAW_GUEST_TRAP || (rc == VINF_SUCCESS && CPUMIsGuestFPUStateActive(pVCpu)));
     10427    }
     10428
     10429    HM_RESTORE_PREEMPT_IF_NEEDED();
     10430    VMMRZCallRing3Enable(pVCpu);
    1035810431
    1035910432    if (rc == VINF_SUCCESS)
    1036010433    {
    10361         Assert(CPUMIsGuestFPUStateActive(pVCpu));
    10362         HM_RESTORE_PREEMPT_IF_NEEDED();
    10363 
     10434        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    1036410435        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    10365         return VINF_SUCCESS;
    10366     }
    10367     HM_RESTORE_PREEMPT_IF_NEEDED();
    10368 
    10369     /* Forward #NM to the guest. */
    10370     Assert(rc == VINF_EM_RAW_GUEST_TRAP);
    10371     rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
    10372     AssertRCReturn(rc, rc);
    10373     hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
    10374                            pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
    10375     STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
    10376     return rc;
     10436    }
     10437    else
     10438    {
     10439        /* Forward #NM to the guest. */
     10440        Assert(rc == VINF_EM_RAW_GUEST_TRAP);
     10441        rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
     10442        AssertRCReturn(rc, rc);
     10443        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
     10444                               pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
     10445        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
     10446    }
     10447
     10448    return VINF_SUCCESS;
    1037710449}
    1037810450
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette