VirtualBox

Changeset 72065 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Apr 30, 2018 6:27:34 AM (7 years ago)
Author:
vboxsync
Message:

VMM/SVM: Interrupt injection fixes.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r71970 r72065  
    215215/** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING,
    216216V_INTR_VECTOR. */
    217 #define HMSVM_VMCB_CLEAN_TPR                    RT_BIT(3)
     217#define HMSVM_VMCB_CLEAN_INT_CTRL               RT_BIT(3)
    218218/** Nested Paging: Nested CR3 (nCR3), PAT. */
    219219#define HMSVM_VMCB_CLEAN_NP                     RT_BIT(4)
     
    237237                                                 | HMSVM_VMCB_CLEAN_IOPM_MSRPM  \
    238238                                                 | HMSVM_VMCB_CLEAN_ASID        \
    239                                                  | HMSVM_VMCB_CLEAN_TPR         \
     239                                                 | HMSVM_VMCB_CLEAN_INT_CTRL    \
    240240                                                 | HMSVM_VMCB_CLEAN_NP          \
    241241                                                 | HMSVM_VMCB_CLEAN_CRX_EFER    \
     
    982982
    983983    /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts
    984        and we currently deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */
     984       and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */
    985985    pVmcbCtrl->IntCtrl.n.u1IgnoreTPR = 1;
    986986
     
    10701070        Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0);
    10711071
    1072         /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs. */
     1072        /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */
    10731073        Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu->hm.s.fGIMTrapXcptUD);
    10741074    }
     
    20222022         * since SVM doesn't have a preemption timer.
    20232023         *
    2024          * We do this here rather than in hmR0SvmVmRunSetupVmcb() as we may have been executing the
     2024         * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the
    20252025         * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters
    20262026         * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between.
     
    21052105            }
    21062106
    2107             pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     2107            pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL);
    21082108        }
    21092109    }
     
    24072407
    24082408#ifdef VBOX_WITH_NESTED_HWVIRT
    2409     if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable == 1)
     2409    if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
    24102410    {
    24112411        Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
     
    24822482 * @sa      HMSvmNstGstVmExitNotify.
    24832483 */
    2484 static bool hmR0SvmVmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
     2484static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
    24852485{
    24862486    /*
     
    25112511        pVmcbNstGstCache->fLbrVirt                = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt;
    25122512        pCtx->hwvirt.svm.fHMCachedVmcb            = true;
    2513         Log4(("hmR0SvmVmRunCacheVmcb: Cached VMCB fields\n"));
     2513        Log4(("hmR0SvmCacheVmcbNested: Cached VMCB fields\n"));
    25142514    }
    25152515
     
    25232523 * This is done the first time we enter nested-guest execution using SVM R0
    25242524 * until the nested-guest \#VMEXIT (not to be confused with physical CPU
    2525  * \#VMEXITs which may or may not cause the nested-guest \#VMEXIT).
     2525 * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT).
    25262526 *
    25272527 * @param   pVCpu           The cross context virtual CPU structure.
    25282528 * @param   pCtx            Pointer to the nested-guest-CPU context.
    25292529 */
    2530 static void hmR0SvmVmRunSetupVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)
     2530static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx)
    25312531{
    25322532    PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     
    25362536     * First cache the nested-guest VMCB fields we may potentially modify.
    25372537     */
    2538     bool const fVmcbCached = hmR0SvmVmRunCacheVmcb(pVCpu, pCtx);
     2538    bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu, pCtx);
    25392539    if (!fVmcbCached)
    25402540    {
     
    26042604    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
    26052605
    2606     PSVMVMCB     pVmcbNstGst     = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcbNstGst);
     2606    PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
     2607    Assert(pVmcbNstGst);
     2608
     2609    hmR0SvmSetupVmcbNested(pVCpu, pCtx);
    26072610
    26082611    int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx);
     
    26262629
    26272630#ifdef VBOX_WITH_NESTED_HWVIRT
    2628     Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable == 0);        /* Nested VGIF not supported yet. */
     2631    Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable);            /* Nested VGIF not supported yet. */
    26292632#endif
    26302633
     
    27452748        /*
    27462749         * Nested-guest interrupt pending.
    2747          * Sync/verify nested-guest's V_IRQ and its force-flag.
     2750         * Sync nested-guest's V_IRQ and its force-flag.
    27482751         */
    2749         if (!pVmcbCtrl->IntCtrl.n.u1VIrqPending)
    2750         {
    2751             if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
    2752                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    2753         }
    2754         else
    2755             Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));
     2752        if (  !pVmcbCtrl->IntCtrl.n.u1VIrqPending
     2753            && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
     2754            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    27562755    }
    27572756#endif
     
    34783477 * Sets the virtual interrupt intercept control in the VMCB.
    34793478 *
    3480  * @param   pVmcb       Pointer to the VM control block.
    3481  */
    3482 DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
     3479 * @param   pVCpu   The cross context virtual CPU structure.
     3480 * @param   pVmcb   Pointer to the VM control block.
     3481 * @param   pCtx    Pointer to the guest-CPU context.
     3482 */
     3483DECLINLINE(void) hmR0SvmSetIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    34833484{
    34843485    /*
    3485      * When AVIC isn't supported, indicate that a virtual interrupt is pending and to
    3486      * cause a #VMEXIT when the guest is ready to accept interrupts. At #VMEXIT, we
    3487      * then get the interrupt from the APIC (updating ISR at the right time) and
    3488      * inject the interrupt.
     3486     * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when
     3487     * the guest is ready to accept interrupts. At #VMEXIT, we then get the interrupt
     3488     * from the APIC (updating ISR at the right time) and inject the interrupt.
    34893489     *
    34903490     * With AVIC is supported, we could make use of the asynchronously delivery without
    34913491     * #VMEXIT and we would be passing the AVIC page to SVM.
     3492     *
     3493     * In AMD-V, an interrupt window is achieved using a combination of
     3494     * V_IRQ (an interrupt is pending), V_IGN_TPR (ignore TPR priorities) and the
     3495     * VINTR intercept all being set.
    34923496     */
    3493     if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
    3494     {
    3495         Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 0);
     3497#ifdef VBOX_WITH_NESTED_HWVIRT
     3498    /*
     3499     * Currently we don't overlay interupt windows and if there's any V_IRQ pending
     3500     * in the nested-guest VMCB, we avoid setting up any interrupt window on behalf
     3501     * of the outer guest.
     3502     */
     3503    /** @todo Does this mean we end up prioritizing virtual interrupt
     3504     *        delivery/window over a physical interrupt (from the outer guest)
     3505     *        might be pending? */
     3506    bool const fEnableIntWindow = !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
     3507    if (!fEnableIntWindow)
     3508    {
     3509        Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
     3510        Log4(("Nested-guest V_IRQ already pending\n"));
     3511    }
     3512#else
     3513    RT_NOREF2(pVCpu, pCtx);
     3514    bool const fEnableIntWindow = true;
     3515#endif
     3516    if (fEnableIntWindow)
     3517    {
     3518        Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
    34963519        pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
    3497         pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;
    3498         pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     3520        pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
     3521        hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
    34993522        Log4(("Set VINTR intercept\n"));
    35003523    }
     
    35073530 * at this point of time.
    35083531 *
    3509  * @param   pVmcb       Pointer to the VM control block.
    3510  */
    3511 DECLINLINE(void) hmR0SvmClearVirtIntrIntercept(PSVMVMCB pVmcb)
    3512 {
    3513     if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
    3514     {
    3515         Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 1);
    3516         pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;
    3517         pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
    3518         pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     3532 * @param   pVCpu   The cross context virtual CPU structure.
     3533 * @param   pVmcb   Pointer to the VM control block.
     3534 * @param   pCtx    Pointer to the guest-CPU context.
     3535 */
     3536DECLINLINE(void) hmR0SvmClearIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     3537{
     3538    PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
     3539    if (    pVmcbCtrl->IntCtrl.n.u1VIrqPending
     3540        || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
     3541    {
     3542        pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0;
     3543        pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
     3544        hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_VINTR);
    35193545        Log4(("Cleared VINTR intercept\n"));
    35203546    }
    35213547}
    35223548
    3523 
    3524 /**
    3525  * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a
    3526  * \#VMEXIT as soon as a guest starts executing an IRET. This is used to unblock
    3527  * virtual NMIs.
    3528  *
    3529  * @param   pVmcb       Pointer to the VM control block.
    3530  */
    3531 DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
    3532 {
    3533     if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET))
    3534     {
    3535         pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;
    3536         pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    3537 
    3538         Log4(("Setting IRET intercept\n"));
    3539     }
    3540 }
    3541 
    3542 
    3543 /**
    3544  * Clears the IRET intercept control in the VMCB.
    3545  *
    3546  * @param   pVmcb       Pointer to the VM control block.
    3547  */
    3548 DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
    3549 {
    3550     if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)
    3551     {
    3552         pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET;
    3553         pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
    3554 
    3555         Log4(("Clearing IRET intercept\n"));
    3556     }
    3557 }
    3558 
    35593549#ifdef VBOX_WITH_NESTED_HWVIRT
    3560 
    3561 
    35623550/**
    35633551 * Evaluates the event to be delivered to the nested-guest and sets it as the
     
    35703558static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu, PCPUMCTX pCtx)
    35713559{
    3572     Log4Func(("\n"));
     3560    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    35733561
    35743562    Assert(!pVCpu->hm.s.Event.fPending);
    3575 
    3576     bool const fGif = pCtx->hwvirt.fGif;
    3577     if (fGif)
    3578     {
    3579         PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);
    3580 
    3581         bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
    3582 
    3583         /*
    3584          * Check if the nested-guest can receive NMIs.
    3585          * NMIs are higher priority than regular interrupts.
    3586          */
    3587         /** @todo SMI. SMIs take priority over NMIs. */
    3588         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))
    3589         {
    3590             bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
    3591             if (fBlockNmi)
    3592                 hmR0SvmSetIretIntercept(pVmcbNstGst);
    3593             else if (fIntShadow)
     3563    Assert(pCtx->hwvirt.fGif);
     3564    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     3565    Assert(pVmcb);
     3566
     3567    bool const fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
     3568    bool const fIntShadow  = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
     3569    bool const fBlockNmi   = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3570
     3571    Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n",
     3572              fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
     3573              VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
     3574
     3575    /** @todo SMI. SMIs take priority over NMIs. */
     3576
     3577    /*
     3578     * Check if the guest can receive NMIs.
     3579     * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
     3580     * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
     3581     */
     3582    if (    VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)
     3583        && !fBlockNmi)
     3584    {
     3585        if (    fVirtualGif
     3586            && !fIntShadow)
     3587        {
     3588            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
    35943589            {
    3595                 /** @todo Figure this out, how we shall manage virt. intercept if the
    3596                  *        nested-guest already has one set and/or if we really need it? */
    3597                 //hmR0SvmSetVirtIntrIntercept(pVmcbNstGst);
     3590                Log4(("Intercepting NMI -> #VMEXIT\n"));
     3591                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
    35983592            }
    3599             else
     3593
     3594            Log4(("Setting NMI pending for injection\n"));
     3595            SVMEVENT Event;
     3596            Event.u = 0;
     3597            Event.n.u1Valid  = 1;
     3598            Event.n.u8Vector = X86_XCPT_NMI;
     3599            Event.n.u3Type   = SVM_EVENT_NMI;
     3600            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     3601            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
     3602        }
     3603        else if (!fVirtualGif)
     3604            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
     3605        else
     3606            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
     3607    }
     3608    /*
     3609     * Check if the nested-guest can receive external interrupts (generated by
     3610     * the guest's PIC/APIC).
     3611     *
     3612     * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted
     3613     * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS.
     3614     *
     3615     * External interrupts that are generated for the outer guest may be intercepted
     3616     * depending on how the nested-guest VMCB was programmed by guest software.
     3617     *
     3618     * Physical interrupts always take priority over virtual interrupts,
     3619     * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
     3620     */
     3621    else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     3622             && !pVCpu->hm.s.fSingleInstruction)
     3623    {
     3624        if (    fVirtualGif
     3625            && !fIntShadow
     3626            &&  CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
     3627        {
     3628            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
    36003629            {
    3601                 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
    3602                 {
    3603                     Log4(("Intercepting NMI -> #VMEXIT\n"));
    3604                     return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
    3605                 }
    3606 
    3607                 Log4(("Pending NMI\n"));
     3630                Log4(("Intercepting INTR -> #VMEXIT\n"));
     3631                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
     3632            }
     3633
     3634            uint8_t u8Interrupt;
     3635            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     3636            if (RT_SUCCESS(rc))
     3637            {
     3638                Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
    36083639                SVMEVENT Event;
    36093640                Event.u = 0;
    36103641                Event.n.u1Valid  = 1;
    3611                 Event.n.u8Vector = X86_XCPT_NMI;
    3612                 Event.n.u3Type   = SVM_EVENT_NMI;
     3642                Event.n.u8Vector = u8Interrupt;
     3643                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    36133644                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3614                 hmR0SvmSetIretIntercept(pVmcbNstGst);
    3615                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    3616                 return VINF_SUCCESS;
    36173645            }
    3618         }
    3619 
    3620         /*
    3621          * Check if the nested-guest can receive external interrupts (generated by
    3622          * the guest's PIC/APIC).
    3623          *
    3624          * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted
    3625          * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS.
    3626          *
    3627          * External interrupts that are generated for the outer guest may be intercepted
    3628          * depending on how the nested-guest VMCB was programmed by guest software.
    3629          *
    3630          * Physical interrupts always take priority over virtual interrupts,
    3631          * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
    3632          */
    3633         if (!fIntShadow)
    3634         {
    3635             if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    3636                 && !pVCpu->hm.s.fSingleInstruction
    3637                 && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
     3646            else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    36383647            {
    3639                 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
    3640                 {
    3641                     Log4(("Intercepting external interrupt -> #VMEXIT\n"));
    3642                     return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
    3643                 }
    3644 
    3645                 uint8_t u8Interrupt;
    3646                 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    3647                 if (RT_SUCCESS(rc))
    3648                 {
    3649                     Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
    3650                     SVMEVENT Event;
    3651                     Event.u = 0;
    3652                     Event.n.u1Valid  = 1;
    3653                     Event.n.u8Vector = u8Interrupt;
    3654                     Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    3655                     hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3656                 }
    3657                 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    3658                 {
    3659                     /*
    3660                      * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
    3661                      * updated eventually when the TPR is written by the guest.
    3662                      */
    3663                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    3664                 }
    3665                 else
    3666                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     3648                /*
     3649                 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
     3650                 * updated eventually when the TPR is written by the guest.
     3651                 */
     3652                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    36673653            }
    3668 
    3669             /*
    3670              * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields)
    3671              * interrupt injection. The virtual interrupt injection itself, if any, will be done
    3672              * by the physical CPU.
    3673              */
    3674             /** @todo later explore this for performance reasons. Right now the hardware
    3675              *        takes care of virtual interrupt injection for nested-guest. */
    3676 #if 0
    3677             if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
    3678                 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR)
    3679                 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
    3680             {
    3681                 Log4(("Intercepting virtual interrupt -> #VMEXIT\n"));
    3682                 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
    3683             }
     3654            else
     3655                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     3656        }
     3657        else if (!fVirtualGif)
     3658            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
     3659        else
     3660            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
     3661    }
     3662
     3663    return VINF_SUCCESS;
     3664}
    36843665#endif
    3685         }
    3686     }
    3687 
    3688     return VINF_SUCCESS;
    3689 }
    3690 #endif
    3691 
    36923666
    36933667/**
     
    36973671 * @param   pVCpu       The cross context virtual CPU structure.
    36983672 * @param   pCtx        Pointer to the guest-CPU context.
    3699  *
    3700  * @remarks Don't use this function when we are actively executing a
    3701  *          nested-guest, use hmR0SvmEvaluatePendingEventNested instead.
    37023673 */
    37033674static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
     
    37053676    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    37063677    Assert(!pVCpu->hm.s.Event.fPending);
     3678    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     3679    Assert(pVmcb);
    37073680
    37083681#ifdef VBOX_WITH_NESTED_HWVIRT
    3709     bool const fGif = pCtx->hwvirt.fGif;
     3682    bool const fGif       = pCtx->hwvirt.fGif;
    37103683#else
    3711     bool const fGif = true;
     3684    bool const fGif       = true;
    37123685#endif
    3713     Log4Func(("fGif=%RTbool\n", fGif));
     3686    bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
     3687    bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
     3688    bool const fBlockNmi  = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3689
     3690    Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n",
     3691              fGif, fBlockNmi, fBlockInt, fIntShadow,
     3692              VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
     3693              VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
     3694
     3695    /** @todo SMI. SMIs take priority over NMIs. */
    37143696
    37153697    /*
    3716      * If the global interrupt flag (GIF) isn't set, even NMIs and other events are blocked.
    3717      * See AMD spec. Table 15-10. "Effect of the GIF on Interrupt Handling".
     3698     * Check if the guest can receive NMIs.
     3699     * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
     3700     * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
    37183701     */
    3719     if (fGif)
    3720     {
    3721         bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx);
    3722         bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
    3723         bool const fBlockNmi  = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS);
    3724         PSVMVMCB pVmcb        = pVCpu->hm.s.svm.pVmcb;
    3725 
    3726         Log4Func(("fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fBlockInt, fIntShadow,
    3727                   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
    3728 
    3729         /** @todo SMI. SMIs take priority over NMIs. */
    3730         if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts. */
    3731         {
    3732             if (fBlockNmi)
    3733                 hmR0SvmSetIretIntercept(pVmcb);
    3734             else if (fIntShadow)
    3735                 hmR0SvmSetVirtIntrIntercept(pVmcb);
    3736             else
     3702    if (    VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)
     3703        && !fBlockNmi)
     3704    {
     3705        if (    fGif
     3706            && !fIntShadow)
     3707        {
     3708            Log4(("Setting NMI pending for injection\n"));
     3709            SVMEVENT Event;
     3710            Event.u = 0;
     3711            Event.n.u1Valid  = 1;
     3712            Event.n.u8Vector = X86_XCPT_NMI;
     3713            Event.n.u3Type   = SVM_EVENT_NMI;
     3714            hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
     3715            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
     3716        }
     3717        else if (!fGif)
     3718            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
     3719        else
     3720            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
     3721    }
     3722    /*
     3723     * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
     3724     * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
     3725     */
     3726    else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     3727             && !pVCpu->hm.s.fSingleInstruction)
     3728    {
     3729        if (    fGif
     3730            && !fBlockInt
     3731            && !fIntShadow)
     3732        {
     3733            uint8_t u8Interrupt;
     3734            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     3735            if (RT_SUCCESS(rc))
    37373736            {
    3738                 Log4(("Pending NMI\n"));
    3739 
     3737                Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
    37403738                SVMEVENT Event;
    37413739                Event.u = 0;
    37423740                Event.n.u1Valid  = 1;
    3743                 Event.n.u8Vector = X86_XCPT_NMI;
    3744                 Event.n.u3Type   = SVM_EVENT_NMI;
    3745 
     3741                Event.n.u8Vector = u8Interrupt;
     3742                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    37463743                hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3747                 hmR0SvmSetIretIntercept(pVmcb);
    3748                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    3749                 return;
    37503744            }
    3751         }
    3752         else if (   VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    3753                  && !pVCpu->hm.s.fSingleInstruction)
    3754         {
    3755             /*
    3756              * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns
    3757              * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC.
    3758              */
    3759             if (   !fBlockInt
    3760                 && !fIntShadow)
     3745            else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    37613746            {
    3762                 uint8_t u8Interrupt;
    3763                 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    3764                 if (RT_SUCCESS(rc))
    3765                 {
    3766                     Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt));
    3767 
    3768                     SVMEVENT Event;
    3769                     Event.u = 0;
    3770                     Event.n.u1Valid  = 1;
    3771                     Event.n.u8Vector = u8Interrupt;
    3772                     Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    3773 
    3774                     hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3775                 }
    3776                 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    3777                 {
    3778                     /*
    3779                      * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
    3780                      * updated eventually when the TPR is written by the guest.
    3781                      */
    3782                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    3783                 }
    3784                 else
    3785                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     3747                /*
     3748                 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
     3749                 * updated eventually when the TPR is written by the guest.
     3750                 */
     3751                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    37863752            }
    37873753            else
    3788                 hmR0SvmSetVirtIntrIntercept(pVmcb);
    3789         }
     3754                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     3755        }
     3756        else if (!fGif)
     3757            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
     3758        else
     3759            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx);
    37903760    }
    37913761}
     
    37983768 * @param   pCtx        Pointer to the guest-CPU context.
    37993769 * @param   pVmcb       Pointer to the VM control block.
     3770 *
     3771 * @remarks Must only be called when we are guaranteed to enter
     3772 *          hardware-assisted SVM execution and not return to ring-3
     3773 *          prematurely.
    38003774 */
    38013775static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb)
     
    38423816
    38433817        /*
     3818         * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We do this only
     3819         * when we are surely going to inject the NMI as otherwise if we return to ring-3 prematurely we
     3820         * could leave NMIs blocked indefinitely upon re-entry into SVM R0.
     3821         *
     3822         * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set
     3823         * the VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
     3824         */
     3825        if (    Event.n.u3Type   == SVM_EVENT_NMI
     3826            &&  Event.n.u8Vector == X86_XCPT_NMI
     3827            && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     3828        {
     3829            VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3830        }
     3831
     3832        /*
    38443833         * Inject it (update VMCB for injection by the hardware).
    38453834         */
     
    38553844    else
    38563845        Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0);
     3846
     3847    /*
     3848     * We could have injected an NMI through IEM and continue guest execution using
     3849     * hardware-assisted SVM. In which case, we would not have any events pending (above)
     3850     * but we still need to intercept IRET in order to eventually clear NMI inhibition.
     3851     */
     3852    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     3853        hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
    38573854
    38583855    /*
     
    41654162
    41664163    /*
    4167      * Set up the nested-guest VMCB for execution using hardware-assisted SVM.
    4168      */
    4169     hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);
    4170 
    4171     /*
    41724164     * Load the nested-guest state.
    41734165     */
     
    42124204        return VINF_EM_RAW_INTERRUPT;
    42134205    }
    4214 
    4215     /*
    4216      * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
    4217      * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
    4218      * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
    4219      *
    4220      * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
    4221      * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
    4222      */
    4223     if (pVCpu->hm.s.Event.fPending)
    4224     {
    4225         SVMEVENT Event;
    4226         Event.u = pVCpu->hm.s.Event.u64IntInfo;
    4227         if (    Event.n.u1Valid
    4228             &&  Event.n.u3Type == SVM_EVENT_NMI
    4229             &&  Event.n.u8Vector == X86_XCPT_NMI
    4230             && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    4231         {
    4232             VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    4233         }
    4234     }
    4235 
    42364206    return VINF_SUCCESS;
    42374207}
     
    43424312    }
    43434313
    4344     /*
    4345      * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute
    4346      * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into
    4347      * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.
    4348      *
    4349      * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the
    4350      * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.
    4351      */
    4352     if (pVCpu->hm.s.Event.fPending)
    4353     {
    4354         SVMEVENT Event;
    4355         Event.u = pVCpu->hm.s.Event.u64IntInfo;
    4356         if (    Event.n.u1Valid
    4357             &&  Event.n.u3Type == SVM_EVENT_NMI
    4358             &&  Event.n.u8Vector == X86_XCPT_NMI
    4359             && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
    4360         {
    4361             VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    4362         }
    4363     }
    4364 
    43654314    return VINF_SUCCESS;
    43664315}
     
    43894338    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);            /* Indicate the start of guest execution. */
    43904339
    4391     PVM      pVM = pVCpu->CTX_SUFF(pVM);
     4340    PVM      pVM   = pVCpu->CTX_SUFF(pVM);
    43924341    PSVMVMCB pVmcb = pSvmTransient->pVmcb;
     4342
    43934343    hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb);
    43944344
     
    61536103    {
    61546104        PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     6105        Assert(pVmcb);
    61556106        Assert(pVmcb->ctrl.u64NextRIP);
    61566107        AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb);    /* temporary, remove later */
     
    71657116    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
    71667117    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    7167     hmR0SvmClearVirtIntrIntercept(pVmcb);
     7118    hmR0SvmClearIntWindowExiting(pVCpu, pVmcb, pCtx);
    71687119
    71697120    /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
     
    72727223
    72737224    /* Clear NMI blocking. */
    7274     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7225    if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
     7226        VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    72757227
    72767228    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */
    72777229    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    7278     hmR0SvmClearIretIntercept(pVmcb);
     7230    hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_IRET);
    72797231
    72807232    /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
     
    75477499    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    75487500
    7549     /** @todo if triple-fault is returned in nested-guest scenario convert to a
    7550      *        shutdown VMEXIT. */
    75517501    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    75527502
     
    77337683    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    77347684
    7735 #ifdef VBOX_STRICT
    7736     PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    7737     Assert(pVmcb);
    7738     Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable);
    7739     RT_NOREF(pVmcb);
    7740 #endif
    7741 
    7742     /** @todo Stat. */
    7743     /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
     7685    /*
     7686     * When VGIF is not used we always intercept STGI instructions. When VGIF is used,
     7687     * we only intercept STGI when events are pending for GIF to become 1.
     7688     */
     7689    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     7690    if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable)
     7691        hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_STGI);
     7692
    77447693    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
    77457694    VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr);
     
    77627711#endif
    77637712
    7764     /** @todo Stat. */
    7765     /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmload); */
    77667713    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
    77677714    VBOXSTRICTRC rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr);
     
    77917738#endif
    77927739
    7793     /** @todo Stat. */
    7794     /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmsave); */
    77957740    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
    77967741    VBOXSTRICTRC rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr);
     
    78427787
    78437788    /* If this #DB is the result of delivering an event, go back to the interpreter. */
    7844     /** @todo if triple-fault is returned in nested-guest scenario convert to a
    7845      *        shutdown VMEXIT. */
    78467789    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    7847     if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending))
     7790    if (pVCpu->hm.s.Event.fPending)
    78487791    {
    78497792        STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret);
     
    78647807    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
    78657808
    7866     /** @todo if triple-fault is returned in nested-guest scenario convert to a
    7867      *        shutdown VMEXIT. */
    78687809    HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY();
    78697810
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette