VirtualBox

Changeset 70177 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Dec 16, 2017 4:07:02 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
119742
Message:

VMM/HMSVMR0: Ensure clean bits are updated while loading/merging nested-guest state intercepts.
Clear VINTR intercepts once clgi is emulated (esp. when VGIF isn't supported).
Comments, cleanups.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r70116 r70177  
    18541854        /* First, load the guest intercepts into the guest VMCB. */
    18551855        PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;
     1856        Assert(!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR));
    18561857        hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx);
    18571858
     
    18911892               == HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS);
    18921893        pVmcbNstGst->ctrl.u64InterceptCtrl  &= ~SVM_CTRL_INTERCEPT_VMMCALL;
     1894
     1895        /* Finally, update the VMCB clean bits. */
     1896        pVmcbNstGst->ctrl.u64VmcbCleanBits  &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
    18931897
    18941898        Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_XCPT_INTERCEPTS));
     
    22612265               ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));
    22622266
    2263     Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 (HyperCR3=%#RX64) CR4=%#RX32 rc=%d\n",
    2264           pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pVmcbNstGst->guest.u64CR3, pCtx->cr4, rc));
     2267    Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 (HyperCR3=%#RX64) CR4=%#RX32 "
     2268          "ESP=%#RX32 EBP=%#RX32 rc=%d\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->cr0, pCtx->cr3,
     2269          pVmcbNstGst->guest.u64CR3, pCtx->cr4, pCtx->esp, pCtx->ebp, rc));
    22652270    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
     2271
    22662272    return rc;
    22672273}
     
    31173123
    31183124/**
    3119  * Sets the virtual interrupt intercept control in the VMCB which
    3120  * instructs AMD-V to cause a \#VMEXIT as soon as the guest is in a state to
    3121  * receive interrupts.
     3125 * Sets the virtual interrupt intercept control in the VMCB.
    31223126 *
    31233127 * @param   pVmcb       Pointer to the VM control block.
     
    31253129DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
    31263130{
     3131    /*
     3132     * When AVIC isn't supported, indicate that a virtual interrupt is pending and to
     3133     * cause a #VMEXIT when the guest is ready to accept interrupts. At #VMEXIT, we
     3134     * then get the interrupt from the APIC (updating ISR at the right time) and
     3135     * inject the interrupt.
     3136     *
     3137     * With AVIC is supported, we could make use of the asynchronously delivery without
     3138     * #VMEXIT and we would be passing the AVIC page to SVM.
     3139     */
    31273140    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
    31283141    {
    3129         pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;     /* A virtual interrupt is pending. */
    3130         pVmcb->ctrl.IntCtrl.n.u8VIntrVector = 0;     /* Vector not necessary as we #VMEXIT for delivering the interrupt. */
     3142        Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 0);
     3143        pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1;
    31313144        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;
    31323145        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
    3133 
    3134         Log4(("Setting VINTR intercept\n"));
    3135     }
    3136 }
    3137 
    3138 
    3139 #if 0
     3146        Log4(("Set VINTR intercept\n"));
     3147    }
     3148}
     3149
     3150
    31403151/**
    31413152 * Clears the virtual interrupt intercept control in the VMCB as
     
    31493160    if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
    31503161    {
     3162        Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 1);
     3163        pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;
    31513164        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
    3152         pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
    3153         Log4(("Clearing VINTR intercept\n"));
    3154     }
    3155 }
    3156 #endif
     3165        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     3166        Log4(("Cleared VINTR intercept\n"));
     3167    }
     3168}
    31573169
    31583170
     
    69116923
    69126924    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
    6913     pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0;  /* No virtual interrupts pending, we'll inject the current one/NMI before reentry. */
    6914     pVmcb->ctrl.IntCtrl.n.u8VIntrVector = 0;
    6915 
    6916     /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */
    6917     pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
    6918     pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
     6925    hmR0SvmClearVirtIntrIntercept(pVmcb);
    69196926
    69206927    /* Deliver the pending interrupt/NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */
     
    74597466    uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3);
    74607467    VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr);
     7468
     7469    /*
     7470     * The guest should no longer receive interrupts. Until VGIF is supported,
     7471     * clear virtual interrupt intercepts here.
     7472     */
     7473    PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);
     7474    hmR0SvmClearVirtIntrIntercept(pVmcb);
     7475
    74617476    return VBOXSTRICTRC_VAL(rcStrict);
    74627477}
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette