VirtualBox

Changeset 81168 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Oct 9, 2019 8:36:00 AM (5 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
133804
Message:

VMM/HMSVMR0: Nested SVM: bugref:7243 Coalesce hmR0SvmEvaluatePendingEventNested into hmR0SvmEvaluatePendingEvent.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r81166 r81168  
    320320     *  external interrupt or NMI. */
    321321    bool            fVectoringPF;
    322 } SVMTRANSIENT, *PSVMTRANSIENT;
     322} SVMTRANSIENT;
     323/** Pointer to SVM transient state. */
     324typedef SVMTRANSIENT *PSVMTRANSIENT;
     325/** Pointer to a const SVM transient state. */
     326typedef const SVMTRANSIENT *PCSVMTRANSIENT;
     327
    323328AssertCompileMemberAlignment(SVMTRANSIENT, u64ExitCode, sizeof(uint64_t));
    324329AssertCompileMemberAlignment(SVMTRANSIENT, pVmcb,       sizeof(uint64_t));
     
    35213526static void hmR0SvmSetIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
    35223527{
     3528    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
     3529
    35233530    /*
    35243531     * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when the guest
     
    35323539     * is pending), V_IGN_TPR (ignore TPR priorities) and the VINTR intercept all being set.
    35333540     */
    3534 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    3535     /*
    3536      * Currently we don't overlay interupt windows and if there's any V_IRQ pending in the
    3537      * nested-guest VMCB, we avoid setting up any interrupt window on behalf of the outer
    3538      * guest.
    3539      */
    3540     /** @todo Does this mean we end up prioritizing virtual interrupt
    3541      *        delivery/window over a physical interrupt (from the outer guest)
    3542      *        might be pending? */
    3543     bool const fEnableIntWindow = !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
    3544     if (!fEnableIntWindow)
    3545     {
    3546         Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx));
    3547         Log4(("Nested-guest V_IRQ already pending\n"));
    3548     }
    3549 #else
    3550     bool const fEnableIntWindow = true;
    3551     RT_NOREF(pVCpu);
    3552 #endif
    3553     if (fEnableIntWindow)
    3554     {
    3555         Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
    3556         pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
    3557         pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
    3558         hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
    3559         Log4(("Set VINTR intercept\n"));
    3560     }
     3541    Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR);
     3542    pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
     3543    pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL;
     3544    hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR);
     3545    Log4(("Set VINTR intercept\n"));
    35613546}
    35623547
     
    35723557static void hmR0SvmClearIntWindowExiting(PVMCPUCC pVCpu, PSVMVMCB pVmcb)
    35733558{
     3559    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx);
     3560
    35743561    PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl;
    35753562    if (    pVmcbCtrl->IntCtrl.n.u1VIrqPending
     
    35833570}
    35843571
    3585 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    3586 /**
    3587  * Evaluates the event to be delivered to the nested-guest and sets it as the
    3588  * pending event.
    3589  *
    3590  * @returns VBox strict status code.
    3591  * @param   pVCpu       The cross context virtual CPU structure.
    3592  */
    3593 static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPUCC pVCpu)
     3572
     3573/**
     3574 * Evaluates the event to be delivered to the guest and sets it as the pending
     3575 * event.
     3576 *
     3577 * @returns Strict VBox status code.
     3578 * @param   pVCpu           The cross context virtual CPU structure.
     3579 * @param   pSvmTransient   Pointer to the SVM transient structure.
     3580 */
     3581static VBOXSTRICTRC hmR0SvmEvaluatePendingEvent(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient)
    35943582{
    35953583    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    3596     HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    35973584    HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
    35983585                              | CPUMCTX_EXTRN_RFLAGS
     
    36153602
    36163603    /*
    3617      * Check if the guest can receive NMIs.
     3604     * Check if the guest or nested-guest can receive NMIs.
    36183605     * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
    36193606     * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
     
    36253612            && !fIntShadow)
    36263613        {
     3614#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    36273615            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI))
    36283616            {
     
    36313619                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0);
    36323620            }
    3633 
     3621#endif
    36343622            Log4(("Setting NMI pending for injection\n"));
    36353623            SVMEVENT Event;
     
    36433631        else if (!fGif)
    36443632            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
    3645         else
     3633        else if (!pSvmTransient->fIsNestedGuest)
    36463634            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
     3635        /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
    36473636    }
    36483637    /*
    3649      * Check if the nested-guest can receive external interrupts (generated by the guest's
    3650      * PIC/APIC).
     3638     * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt()
     3639     * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request
     3640     * it from the APIC device.
    36513641     *
    3652      * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted
    3653      * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS.
    3654      *
    3655      * External interrupts that are generated for the outer guest may be intercepted
    3656      * depending on how the nested-guest VMCB was programmed by guest software.
    3657      *
    3658      * Physical interrupts always take priority over virtual interrupts,
    3659      * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
    3660      *
     3642     * For nested-guests, physical interrupts always take priority over virtual interrupts.
    36613643     * We don't need to inject nested-guest virtual interrupts here, we can let the hardware
    3662      * do that work when we execute nested guest code esp. since all the required information
     3644     * do that work when we execute nested-guest code esp. since all the required information
    36633645     * is in the VMCB, unlike physical interrupts where we need to fetch the interrupt from
    36643646     * the virtual interrupt controller.
     3647     *
     3648     * See AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts".
    36653649     */
    36663650    else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    36673651             && !pVCpu->hm.s.fSingleInstruction)
    36683652    {
     3653        bool const fBlockInt = !pSvmTransient->fIsNestedGuest ? !(pCtx->eflags.u32 & X86_EFL_IF)
     3654                                                              : CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx);
    36693655        if (    fGif
    3670             && !fIntShadow
    3671             &&  CPUMIsGuestSvmPhysIntrEnabled(pVCpu, pCtx))
    3672         {
     3656            && !fBlockInt
     3657            && !fIntShadow)
     3658        {
     3659#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    36733660            if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
    36743661            {
     
    36773664                return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
    36783665            }
    3679 
     3666#endif
    36803667            uint8_t u8Interrupt;
    36813668            int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     
    37033690        else if (!fGif)
    37043691            hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
    3705         else
     3692        else if (!pSvmTransient->fIsNestedGuest)
    37063693            hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
     3694        /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCB controls. */
    37073695    }
    37083696
    37093697    return VINF_SUCCESS;
    3710 }
    3711 #endif
    3712 
    3713 /**
    3714  * Evaluates the event to be delivered to the guest and sets it as the pending
    3715  * event.
    3716  *
    3717  * @param   pVCpu       The cross context virtual CPU structure.
    3718  */
    3719 static void hmR0SvmEvaluatePendingEvent(PVMCPUCC pVCpu)
    3720 {
    3721     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    3722     HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    3723     HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT
    3724                               | CPUMCTX_EXTRN_RFLAGS
    3725                               | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW);
    3726 
    3727     Assert(!pVCpu->hm.s.Event.fPending);
    3728     PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu);
    3729     Assert(pVmcb);
    3730 
    3731     bool const fGif       = CPUMGetGuestGif(pCtx);
    3732     bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
    3733     bool const fBlockInt  = !(pCtx->eflags.u32 & X86_EFL_IF);
    3734     bool const fBlockNmi  = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    3735 
    3736     Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool fIntPending=%RTbool NMI pending=%RTbool\n",
    3737               fGif, fBlockNmi, fBlockInt, fIntShadow,
    3738               VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC),
    3739               VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)));
    3740 
    3741     /** @todo SMI. SMIs take priority over NMIs. */
    3742 
    3743     /*
    3744      * Check if the guest can receive NMIs.
    3745      * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts".
    3746      * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities".
    3747      */
    3748     if (    VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
    3749         && !fBlockNmi)
    3750     {
    3751         if (    fGif
    3752             && !fIntShadow)
    3753         {
    3754             Log4(("Setting NMI pending for injection\n"));
    3755             SVMEVENT Event;
    3756             Event.u = 0;
    3757             Event.n.u1Valid  = 1;
    3758             Event.n.u8Vector = X86_XCPT_NMI;
    3759             Event.n.u3Type   = SVM_EVENT_NMI;
    3760             hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3761             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
    3762         }
    3763         else if (!fGif)
    3764             hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
    3765         else
    3766             hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
    3767     }
    3768     /*
    3769      * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt()
    3770      * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request
    3771      * it from the APIC device.
    3772      */
    3773     else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
    3774              && !pVCpu->hm.s.fSingleInstruction)
    3775     {
    3776         if (    fGif
    3777             && !fBlockInt
    3778             && !fIntShadow)
    3779         {
    3780             uint8_t u8Interrupt;
    3781             int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
    3782             if (RT_SUCCESS(rc))
    3783             {
    3784                 Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt));
    3785                 SVMEVENT Event;
    3786                 Event.u = 0;
    3787                 Event.n.u1Valid  = 1;
    3788                 Event.n.u8Vector = u8Interrupt;
    3789                 Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
    3790                 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3791             }
    3792             else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
    3793             {
    3794                 /*
    3795                  * AMD-V has no TPR thresholding feature. TPR and the force-flag will be
    3796                  * updated eventually when the TPR is written by the guest.
    3797                  */
    3798                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq);
    3799             }
    3800             else
    3801                 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    3802         }
    3803         else if (!fGif)
    3804             hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI);
    3805         else
    3806             hmR0SvmSetIntWindowExiting(pVCpu, pVmcb);
    3807     }
    38083698}
    38093699
     
    41904080    else if (!pVCpu->hm.s.Event.fPending)
    41914081    {
    4192         if (!pSvmTransient->fIsNestedGuest)
    4193             hmR0SvmEvaluatePendingEvent(pVCpu);
    4194 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    4195         else
    4196         {
    4197             VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEventNested(pVCpu);
    4198             if (    rcStrict != VINF_SUCCESS
    4199                 || !CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
    4200             {
    4201                 if (!CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
    4202                     STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
    4203                 return VBOXSTRICTRC_VAL(rcStrict);
    4204             }
    4205         }
    4206 #endif
     4082        VBOXSTRICTRC rcStrict = hmR0SvmEvaluatePendingEvent(pVCpu, pSvmTransient);
     4083        if (   rcStrict != VINF_SUCCESS
     4084            || pSvmTransient->fIsNestedGuest != CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
     4085        {
     4086            /* If a nested-guest VM-exit occurred, bail. */
     4087            if (pSvmTransient->fIsNestedGuest)
     4088                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchNstGstVmexit);
     4089            return VBOXSTRICTRC_VAL(rcStrict);
     4090        }
    42074091    }
    42084092
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette