VirtualBox

Changeset 75856 in vbox


Ignore:
Timestamp:
Dec 1, 2018 8:42:02 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
127060
Message:

VMM/HMVMXR0: Fix single-stepping for cases where we fallback to IEM for executing an instruction and don't update the RIP in the VMX R0 code.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • TabularUnified trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r75826 r75856  
    34973497
    34983498/**
    3499  * Exports the guest's interruptibility-state into the guest-state area in the
    3500  * VMCS.
    3501  *
    3502  * @returns VBox status code.
    3503  * @param   pVCpu       The cross context virtual CPU structure.
    3504  * @param   fIntrState  The interruptibility-state to set.
    3505  */
    3506 static int hmR0VmxExportGuestIntrState(PVMCPU pVCpu, uint32_t fIntrState)
    3507 {
    3508     NOREF(pVCpu);
    3509     AssertMsg(!(fIntrState & 0xfffffff0), ("%#x\n", fIntrState));   /* Bits 31:4 MBZ. */
    3510     Assert((fIntrState & 0x3) != 0x3);                              /* Block-by-STI and MOV SS cannot be simultaneously set. */
    3511     return VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
    3512 }
    3513 
    3514 
    3515 /**
    35163499 * Exports the exception intercepts required for guest execution in the VMCS.
    35173500 *
     
    36373620        int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32);
    36383621        AssertRCReturn(rc, rc);
     3622
     3623        /*
     3624         * Setup pending debug exceptions if the guest is single-stepping using EFLAGS.TF.
     3625         *
     3626         * We must avoid setting any automatic debug exceptions delivery when single-stepping
     3627         * through the hypervisor debugger using EFLAGS.TF.
     3628         */
     3629        if (   !pVCpu->hm.s.fSingleInstruction
     3630            &&  fEFlags.Bits.u1TF)
     3631        {
     3632            rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS);
     3633            AssertRCReturn(rc, rc);
     3634        }
    36393635
    36403636        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS);
     
    41964192    int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, u32GuestDr7);
    41974193    AssertRCReturn(rc, rc);
     4194
     4195    /*
     4196     * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
     4197     * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
     4198     *
     4199     * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
     4200     */
     4201    if (fSteppingDB)
     4202    {
     4203        Assert(pVCpu->hm.s.fSingleInstruction);
     4204        Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
     4205
     4206        uint32_t fIntrState = 0;
     4207        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
     4208        AssertRCReturn(rc, rc);
     4209
     4210        if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
     4211        {
     4212            fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
     4213            rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
     4214            AssertRCReturn(rc, rc);
     4215        }
     4216    }
    41984217
    41994218    return VINF_SUCCESS;
     
    64906509    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    64916510    int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &u32Val);
    6492     if (RT_SUCCESS(rc))
    6493     {
    6494         /*
    6495          * We additionally have a requirement to import RIP, RFLAGS depending on whether we
    6496          * might need them in hmR0VmxEvaluatePendingEvent().
    6497          */
    6498         if (!u32Val)
    6499         {
    6500             if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    6501             {
    6502                 rc =  hmR0VmxImportGuestRip(pVCpu);
    6503                 rc |= hmR0VmxImportGuestRFlags(pVCpu);
    6504                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    6505             }
    6506 
    6507             if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6508                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6509         }
    6510         else
     6511    AssertRCReturn(rc, rc);
     6512
     6513    /*
     6514     * We additionally have a requirement to import RIP, RFLAGS depending on whether we
     6515     * might need them in hmR0VmxEvaluatePendingEvent().
     6516     */
     6517    if (!u32Val)
     6518    {
     6519        if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    65116520        {
    65126521            rc =  hmR0VmxImportGuestRip(pVCpu);
    65136522            rc |= hmR0VmxImportGuestRFlags(pVCpu);
    6514 
    6515             if (u32Val & (  VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
    6516                           | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
    6517             {
    6518                 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
    6519             }
    6520             else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    6521                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    6522 
    6523             if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
    6524             {
    6525                 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6526                     VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6527             }
    6528             else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6529                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6530         }
    6531     }
    6532     return rc;
     6523            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6524        }
     6525
     6526        if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     6527            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     6528    }
     6529    else
     6530    {
     6531        rc =  hmR0VmxImportGuestRip(pVCpu);
     6532        rc |= hmR0VmxImportGuestRFlags(pVCpu);
     6533
     6534        if (u32Val & (  VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS
     6535                      | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
     6536        {
     6537            EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
     6538        }
     6539        else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     6540            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6541
     6542        if (u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)
     6543        {
     6544            if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     6545                VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     6546        }
     6547        else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     6548            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     6549    }
     6550
     6551    return VINF_SUCCESS;
    65336552}
    65346553
     
    70887107    }
    70897108
    7090     /* Clear any pending events from the VMCS. */
     7109    /* Clear the events from the VMCS. */
    70917110    VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
    7092     VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0);
    70937111
    70947112    /* We're now done converting the pending event. */
     
    75907608
    75917609/**
    7592  * Sets a pending-debug exception to be delivered to the guest if the guest is
    7593  * single-stepping in the VMCS.
    7594  *
    7595  * @returns VBox status code.
    7596  * @param   pVCpu           The cross context virtual CPU structure.
    7597  */
    7598 DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu)
    7599 {
    7600     Assert(!(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
    7601     RT_NOREF(pVCpu);
    7602     return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, VMX_VMCS_GUEST_PENDING_DEBUG_XCPT_BS);
    7603 }
    7604 
    7605 
    7606 /**
    76077610 * Injects any pending events into the guest if the guest is in a state to
    76087611 * receive them.
     
    76207623    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    76217624
    7622     bool fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
    7623     bool fBlockSti   = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
     7625    bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
     7626    bool const fBlockSti   = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
    76247627
    76257628    Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pVCpu->cpum.GstCtx.fExtrn) & CPUMCTX_EXTRN_RFLAGS));
     
    76587661        Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,
    76597662              uIntType));
     7663
     7664        /*
     7665         * Inject the event and get any changes to the guest-interruptibility state.
     7666         *
     7667         * The guest-interruptibility state may need to be updated if we inject the event
     7668         * into the guest IDT ourselves (for real-on-v86 guest injecting software interrupts).
     7669         */
    76607670        rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,
    76617671                                          pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping,
     
    76637673        AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict);
    76647674
    7665         /* Update the interruptibility-state as it could have been changed by
    7666            hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
    7667         fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
    7668         fBlockSti   = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
    7669 
    76707675        if (uIntType == VMX_ENTRY_INT_INFO_TYPE_EXT_INT)
    76717676            STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
     
    76747679    }
    76757680
    7676     /* Deliver pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
    7677     if (   fBlockSti
    7678         || fBlockMovSS)
    7679     {
    7680         if (!pVCpu->hm.s.fSingleInstruction)
    7681         {
    7682             /*
    7683              * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
    7684              * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
    7685              * See Intel spec. 27.3.4 "Saving Non-Register State".
    7686              */
    7687             Assert(!DBGFIsStepping(pVCpu));
    7688             int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS);
    7689             AssertRCReturn(rc, rc);
    7690             if (pCtx->eflags.Bits.u1TF)
    7691             {
    7692                 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    7693                 AssertRCReturn(rc2, rc2);
    7694             }
    7695         }
    7696         else if (pCtx->eflags.Bits.u1TF)
    7697         {
    7698             /*
    7699              * We are single-stepping in the hypervisor debugger using EFLAGS.TF. Clear interrupt inhibition as setting the
    7700              * BS bit would mean delivering a #DB to the guest upon VM-entry when it shouldn't be.
    7701              */
    7702             Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG));
    7703             fIntrState = 0;
    7704         }
    7705     }
    7706 
    77077681    /*
    7708      * There's no need to clear the VM-entry interruption-information field here if we're not injecting anything.
    7709      * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
     7682     * Update the guest-interruptibility state.
     7683     *
     7684     * This is required for the real-on-v86 software interrupt injection case above, as well as
     7685     * updates to the guest state from ring-3 or IEM/REM.
    77107686     */
    7711     int rc3 = hmR0VmxExportGuestIntrState(pVCpu, fIntrState);
    7712     AssertRCReturn(rc3, rc3);
     7687    int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
     7688    AssertRCReturn(rc, rc);
     7689
     7690    /*
     7691     * There's no need to clear the VM-entry interruption-information field here if we're not
     7692     * injecting anything. VT-x clears the valid bit on every VM-exit.
     7693     *
     7694     * See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
     7695     */
    77137696
    77147697    Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping));
     
    82948277    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
    82958278
    8296     /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is
    8297        not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */
    82988279    rc  = hmR0VmxExportGuestRip(pVCpu);
    82998280    rc |= hmR0VmxExportGuestRsp(pVCpu);
     
    1049210473
    1049310474    hmR0VmxAdvanceGuestRipBy(pVCpu, pVmxTransient->cbInstr);
    10494 
    10495     /*
    10496      * Deliver a debug exception to the guest if it is single-stepping. Don't directly inject a #DB but use the
    10497      * pending debug exception field as it takes care of priority of events.
    10498      *
    10499      * See Intel spec. 32.2.1 "Debug Exceptions".
    10500      */
    10501     if (  !pVCpu->hm.s.fSingleInstruction
    10502         && pVCpu->cpum.GstCtx.eflags.Bits.u1TF)
    10503     {
    10504         rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    10505         AssertRCReturn(rc, rc);
    10506     }
    10507 
    1050810475    return VINF_SUCCESS;
    1050910476}
     
    1130411271    int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
    1130511272    AssertRCReturn(rc, rc);
    11306 
    11307     bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI);
    11308     if (   fBlockSti
    11309         && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    11310     {
    11311         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     11273    Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS));
     11274    if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
     11275    {
     11276        if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     11277            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     11278
     11279        fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
     11280        rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INT_STATE, fIntrState);
     11281        AssertRCReturn(rc, rc);
    1131211282    }
    1131311283
     
    1249412464             */
    1249512465            if (fIOString)
    12496             {
    12497                 /** @todo Single-step for INS/OUTS with REP prefix? */
    1249812466                ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS);
    12499             }
    12500             else if (  !fDbgStepping
    12501                      && fGstStepping)
    12502             {
    12503                 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu);
    12504                 AssertRCReturn(rc, rc);
    12505             }
    1250612467
    1250712468            /*
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette