- Timestamp:
- Apr 30, 2018 6:27:34 AM (7 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r71859 r72065 1251 1251 VMM_INT_DECL(bool) CPUMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx); 1252 1252 VMM_INT_DECL(uint8_t) CPUMGetSvmNstGstInterrupt(PCCPUMCTX pCtx); 1253 VMM_INT_DECL(bool) CPUMGetSvmNstGstVGif(PCCPUMCTX pCtx); 1253 1254 VMM_INT_DECL(void) CPUMSvmVmExitRestoreHostState(PVMCPU pVCpu, PCPUMCTX pCtx); 1254 1255 VMM_INT_DECL(void) CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr); -
trunk/include/VBox/vmm/hm_svm.h
r71969 r72065 574 574 uint64_t u; 575 575 } SVMINTCTRL; 576 /** Pointer to an SVMINTCTRL structure. */ 577 typedef SVMINTCTRL *PSVMINTCTRL; 578 /** Pointer to a const SVMINTCTRL structure. */ 579 typedef const SVMINTCTRL *PCSVMINTCTRL; 576 580 577 581 /** … … 1145 1149 VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx); 1146 1150 VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx); 1147 VMM_INT_DECL(bool) HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);1148 VMM_INT_DECL(bool) HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx);1149 1150 1151 /** @} */ 1151 1152 -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r70948 r72065 2568 2568 Assert(pCtx->hwvirt.fGif); 2569 2569 2570 if (!pCtx->hwvirt.svm.fHMCachedVmcb) 2571 { 2572 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 2573 X86EFLAGS fEFlags; 2574 if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking) 2575 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u; 2576 else 2577 fEFlags.u = pCtx->eflags.u; 2578 2579 return fEFlags.Bits.u1IF; 2580 } 2581 2582 return HMCanSvmNstGstTakePhysIntr(pVCpu, pCtx); 2570 X86EFLAGS fEFlags; 2571 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx)) 2572 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u; 2573 else 2574 fEFlags.u = pCtx->eflags.u; 2575 2576 return fEFlags.Bits.u1IF; 2583 2577 #endif 2584 2578 } … … 2604 2598 Assert(pCtx->hwvirt.fGif); 2605 2599 2606 /* 2607 * Although at present, the V_TPR and V_INTR_PRIO fields are not modified 2608 * by SVM R0 code and we could inspect them directly here, we play it 2609 * safe and ask HM if it has cached the VMCB. 2610 */ 2611 if (!pCtx->hwvirt.svm.fHMCachedVmcb) 2612 { 2613 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 2614 if ( !pVmcbCtrl->IntCtrl.n.u1IgnoreTPR 2615 && pVmcbCtrl->IntCtrl.n.u4VIntrPrio <= pVmcbCtrl->IntCtrl.n.u8VTPR) 2616 return false; 2617 2618 X86EFLAGS fEFlags; 2619 if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking) 2620 fEFlags.u = pCtx->eflags.u; 2621 else 2622 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u; 2623 2624 return fEFlags.Bits.u1IF; 2625 } 2626 2627 return HMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx); 2600 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 2601 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl; 2602 if ( !pVmcbIntCtrl->n.u1IgnoreTPR 2603 && pVmcbIntCtrl->n.u4VIntrPrio <= pVmcbIntCtrl->n.u8VTPR) 2604 return false; 2605 2606 X86EFLAGS fEFlags; 2607 if (CPUMIsGuestSvmVirtIntrMasking(pVCpu, pCtx)) 2608 fEFlags.u = pCtx->eflags.u; 2609 else 2610 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u; 2611 2612 return fEFlags.Bits.u1IF; 2628 2613 #endif 2629 2614 } … … 2644 2629 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 2645 2630 return pVmcbCtrl->IntCtrl.n.u8VIntrVector; 2631 #endif 2632 } 2633 2634 2635 /** 2636 * Gets the SVM nested-guest virtual GIF. 2637 * 2638 * @returns The nested-guest virtual GIF. 2639 * @param pCtx The guest-CPU context. 2640 */ 2641 VMM_INT_DECL(bool) CPUMGetSvmNstGstVGif(PCCPUMCTX pCtx) 2642 { 2643 #ifdef IN_RC 2644 RT_NOREF(pCtx); 2645 AssertReleaseFailedReturn(false); 2646 #else 2647 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl; 2648 PCSVMINTCTRL pVmcbIntCtrl = &pVmcbCtrl->IntCtrl; 2649 if (pVmcbIntCtrl->n.u1VGifEnable) 2650 return pVmcbIntCtrl->n.u1VGif; 2651 return true; 2646 2652 #endif 2647 2653 } -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r71966 r72065 577 577 } 578 578 579 580 /**581 * Checks whether the SVM nested-guest is in a state to receive physical (APIC)582 * interrupts.583 *584 * @returns true if it's ready, false otherwise.585 * @param pVCpu The cross context virtual CPU structure of the calling EMT.586 * @param pCtx The guest-CPU context.587 *588 * @remarks This function looks at the VMCB cache rather than directly at the589 * nested-guest VMCB. The latter may have been modified for executing590 * using hardware-assisted SVM.591 *592 * @sa CPUMCanSvmNstGstTakePhysIntr.593 */594 VMM_INT_DECL(bool) HMCanSvmNstGstTakePhysIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)595 {596 Assert(pCtx->hwvirt.svm.fHMCachedVmcb);597 Assert(pCtx->hwvirt.fGif);598 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;599 X86EFLAGS fEFlags;600 if (pVmcbNstGstCache->fVIntrMasking)601 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;602 else603 fEFlags.u = pCtx->eflags.u;604 return fEFlags.Bits.u1IF;605 }606 607 608 /**609 * Checks whether the SVM nested-guest is in a state to receive virtual (setup610 * for injection by VMRUN instruction) interrupts.611 *612 * @returns true if it's ready, false otherwise.613 * @param pVCpu The cross context virtual CPU structure of the calling EMT.614 * @param pCtx The guest-CPU context.615 *616 * @remarks This function looks at the VMCB cache rather than directly at the617 * nested-guest VMCB. The latter may have been modified for executing618 * using hardware-assisted SVM.619 *620 * @sa CPUMCanSvmNstGstTakeVirtIntr.621 */622 VMM_INT_DECL(bool) HMCanSvmNstGstTakeVirtIntr(PVMCPU pVCpu, PCCPUMCTX pCtx)623 {624 #ifdef IN_RC625 RT_NOREF2(pVCpu, pCtx);626 AssertReleaseFailedReturn(false);627 #else628 Assert(pCtx->hwvirt.svm.fHMCachedVmcb);629 Assert(pCtx->hwvirt.fGif);630 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;631 632 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;633 if ( !pVmcbCtrl->IntCtrl.n.u1IgnoreTPR634 && pVmcbCtrl->IntCtrl.n.u4VIntrPrio <= pVmcbCtrl->IntCtrl.n.u8VTPR)635 return false;636 637 X86EFLAGS fEFlags;638 if (pVmcbNstGstCache->fVIntrMasking)639 fEFlags.u = pCtx->eflags.u;640 else641 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;642 return fEFlags.Bits.u1IF;643 #endif644 }645 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r71970 r72065 174 174 * 175 175 * - V_IRQ: Tracked using VMCPU_FF_INTERRUPT_NESTED_GUEST force-flag and updated below. 176 * - V_TPR: Already updated by iemCImpl_load_CrX or by the physical CPU for177 * hardware-assistedSVM execution.176 * - V_TPR: Updated by iemCImpl_load_CrX or by the physical CPU for hardware-assisted 177 * SVM execution. 178 178 * - Interrupt shadow: Tracked using VMCPU_FF_INHIBIT_INTERRUPTS and RIP. 179 179 */ 180 180 PSVMVMCBCTRL pVmcbMemCtrl = &pVmcbMem->ctrl; 181 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) /* V_IRQ. */ 181 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) /* V_IRQ. */ 182 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0; 183 else 182 184 { 183 185 Assert(pVmcbCtrl->IntCtrl.n.u1VIrqPending); 184 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 1;185 186 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 186 187 } 187 else188 pVmcbMemCtrl->IntCtrl.n.u1VIrqPending = 0;189 188 190 189 pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR; /* V_TPR. */ … … 439 438 440 439 /* Virtual GIF. */ 441 if ( pVmcbCtrl->IntCtrl.n.u1VGifEnable442 && pVM->cpum.ro.GuestFeatures.fSvmVGif)440 if ( pVmcbCtrl->IntCtrl.n.u1VGifEnable 441 && !pVM->cpum.ro.GuestFeatures.fSvmVGif) 443 442 { 444 443 Log(("iemSvmVmrun: Virtual GIF not supported -> Disabling\n")); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r71970 r72065 215 215 /** TRP: V_TPR, V_IRQ, V_INTR_PRIO, V_IGN_TPR, V_INTR_MASKING, 216 216 V_INTR_VECTOR. */ 217 #define HMSVM_VMCB_CLEAN_ TPRRT_BIT(3)217 #define HMSVM_VMCB_CLEAN_INT_CTRL RT_BIT(3) 218 218 /** Nested Paging: Nested CR3 (nCR3), PAT. */ 219 219 #define HMSVM_VMCB_CLEAN_NP RT_BIT(4) … … 237 237 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \ 238 238 | HMSVM_VMCB_CLEAN_ASID \ 239 | HMSVM_VMCB_CLEAN_ TPR\239 | HMSVM_VMCB_CLEAN_INT_CTRL \ 240 240 | HMSVM_VMCB_CLEAN_NP \ 241 241 | HMSVM_VMCB_CLEAN_CRX_EFER \ … … 982 982 983 983 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts 984 and we currently deliver both PIC and APIC interrupts alike . See hmR0SvmInjectPendingEvent() */984 and we currently deliver both PIC and APIC interrupts alike, see hmR0SvmEvaluatePendingEvent() */ 985 985 pVmcbCtrl->IntCtrl.n.u1IgnoreTPR = 1; 986 986 … … 1070 1070 Assert(pVmcbCtrlCur->u32VmcbCleanBits == 0); 1071 1071 1072 /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs . */1072 /* Verify our assumption that GIM providers trap #UD uniformly across VCPUs initially. */ 1073 1073 Assert(pVCpuCur->hm.s.fGIMTrapXcptUD == pVCpu->hm.s.fGIMTrapXcptUD); 1074 1074 } … … 2022 2022 * since SVM doesn't have a preemption timer. 2023 2023 * 2024 * We do this here rather than in hmR0Svm VmRunSetupVmcb() as we may have been executing the2024 * We do this here rather than in hmR0SvmSetupVmcbNested() as we may have been executing the 2025 2025 * nested-guest in IEM incl. PAUSE instructions which would update the pause-filter counters 2026 2026 * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between. … … 2105 2105 } 2106 2106 2107 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_ TPR);2107 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL); 2108 2108 } 2109 2109 } … … 2407 2407 2408 2408 #ifdef VBOX_WITH_NESTED_HWVIRT 2409 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable == 1)2409 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable) 2410 2410 { 2411 2411 Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF); … … 2482 2482 * @sa HMSvmNstGstVmExitNotify. 2483 2483 */ 2484 static bool hmR0Svm VmRunCacheVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)2484 static bool hmR0SvmCacheVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx) 2485 2485 { 2486 2486 /* … … 2511 2511 pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt; 2512 2512 pCtx->hwvirt.svm.fHMCachedVmcb = true; 2513 Log4(("hmR0Svm VmRunCacheVmcb: Cached VMCB fields\n"));2513 Log4(("hmR0SvmCacheVmcbNested: Cached VMCB fields\n")); 2514 2514 } 2515 2515 … … 2523 2523 * This is done the first time we enter nested-guest execution using SVM R0 2524 2524 * until the nested-guest \#VMEXIT (not to be confused with physical CPU 2525 * \#VMEXITs which may or may not cause thenested-guest \#VMEXIT).2525 * \#VMEXITs which may or may not cause a corresponding nested-guest \#VMEXIT). 2526 2526 * 2527 2527 * @param pVCpu The cross context virtual CPU structure. 2528 2528 * @param pCtx Pointer to the nested-guest-CPU context. 2529 2529 */ 2530 static void hmR0Svm VmRunSetupVmcb(PVMCPU pVCpu, PCPUMCTX pCtx)2530 static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PCPUMCTX pCtx) 2531 2531 { 2532 2532 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); … … 2536 2536 * First cache the nested-guest VMCB fields we may potentially modify. 2537 2537 */ 2538 bool const fVmcbCached = hmR0Svm VmRunCacheVmcb(pVCpu, pCtx);2538 bool const fVmcbCached = hmR0SvmCacheVmcbNested(pVCpu, pCtx); 2539 2539 if (!fVmcbCached) 2540 2540 { … … 2604 2604 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 2605 2605 2606 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); Assert(pVmcbNstGst); 2606 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2607 Assert(pVmcbNstGst); 2608 2609 hmR0SvmSetupVmcbNested(pVCpu, pCtx); 2607 2610 2608 2611 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx); … … 2626 2629 2627 2630 #ifdef VBOX_WITH_NESTED_HWVIRT 2628 Assert( pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable == 0);/* Nested VGIF not supported yet. */2631 Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable); /* Nested VGIF not supported yet. */ 2629 2632 #endif 2630 2633 … … 2745 2748 /* 2746 2749 * Nested-guest interrupt pending. 2747 * Sync /verifynested-guest's V_IRQ and its force-flag.2750 * Sync nested-guest's V_IRQ and its force-flag. 2748 2751 */ 2749 if (!pVmcbCtrl->IntCtrl.n.u1VIrqPending) 2750 { 2751 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2752 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2753 } 2754 else 2755 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)); 2752 if ( !pVmcbCtrl->IntCtrl.n.u1VIrqPending 2753 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2754 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2756 2755 } 2757 2756 #endif … … 3478 3477 * Sets the virtual interrupt intercept control in the VMCB. 3479 3478 * 3480 * @param pVmcb Pointer to the VM control block. 3481 */ 3482 DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb) 3479 * @param pVCpu The cross context virtual CPU structure. 3480 * @param pVmcb Pointer to the VM control block. 3481 * @param pCtx Pointer to the guest-CPU context. 3482 */ 3483 DECLINLINE(void) hmR0SvmSetIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 3483 3484 { 3484 3485 /* 3485 * When AVIC isn't supported, indicate that a virtual interrupt is pending and to 3486 * cause a #VMEXIT when the guest is ready to accept interrupts. At #VMEXIT, we 3487 * then get the interrupt from the APIC (updating ISR at the right time) and 3488 * inject the interrupt. 3486 * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when 3487 * the guest is ready to accept interrupts. At #VMEXIT, we then get the interrupt 3488 * from the APIC (updating ISR at the right time) and inject the interrupt. 3489 3489 * 3490 3490 * With AVIC is supported, we could make use of the asynchronously delivery without 3491 3491 * #VMEXIT and we would be passing the AVIC page to SVM. 3492 * 3493 * In AMD-V, an interrupt window is achieved using a combination of 3494 * V_IRQ (an interrupt is pending), V_IGN_TPR (ignore TPR priorities) and the 3495 * VINTR intercept all being set. 3492 3496 */ 3493 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)) 3494 { 3495 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 0); 3497 #ifdef VBOX_WITH_NESTED_HWVIRT 3498 /* 3499 * Currently we don't overlay interupt windows and if there's any V_IRQ pending 3500 * in the nested-guest VMCB, we avoid setting up any interrupt window on behalf 3501 * of the outer guest. 3502 */ 3503 /** @todo Does this mean we end up prioritizing virtual interrupt 3504 * delivery/window over a physical interrupt (from the outer guest) 3505 * might be pending? */ 3506 bool const fEnableIntWindow = !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 3507 if (!fEnableIntWindow) 3508 { 3509 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 3510 Log4(("Nested-guest V_IRQ already pending\n")); 3511 } 3512 #else 3513 RT_NOREF2(pVCpu, pCtx); 3514 bool const fEnableIntWindow = true; 3515 #endif 3516 if (fEnableIntWindow) 3517 { 3518 Assert(pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR); 3496 3519 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 1; 3497 pVmcb->ctrl.u 64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;3498 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);3520 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL; 3521 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_VINTR); 3499 3522 Log4(("Set VINTR intercept\n")); 3500 3523 } … … 3507 3530 * at this point of time. 3508 3531 * 3509 * @param pVmcb Pointer to the VM control block. 3510 */ 3511 DECLINLINE(void) hmR0SvmClearVirtIntrIntercept(PSVMVMCB pVmcb) 3512 { 3513 if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR) 3514 { 3515 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqPending == 1); 3516 pVmcb->ctrl.IntCtrl.n.u1VIrqPending = 0; 3517 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR; 3518 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 3532 * @param pVCpu The cross context virtual CPU structure. 3533 * @param pVmcb Pointer to the VM control block. 3534 * @param pCtx Pointer to the guest-CPU context. 3535 */ 3536 DECLINLINE(void) hmR0SvmClearIntWindowExiting(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 3537 { 3538 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; 3539 if ( pVmcbCtrl->IntCtrl.n.u1VIrqPending 3540 || (pVmcbCtrl->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)) 3541 { 3542 pVmcbCtrl->IntCtrl.n.u1VIrqPending = 0; 3543 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INT_CTRL; 3544 hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_VINTR); 3519 3545 Log4(("Cleared VINTR intercept\n")); 3520 3546 } 3521 3547 } 3522 3548 3523 3524 /**3525 * Sets the IRET intercept control in the VMCB which instructs AMD-V to cause a3526 * \#VMEXIT as soon as a guest starts executing an IRET. This is used to unblock3527 * virtual NMIs.3528 *3529 * @param pVmcb Pointer to the VM control block.3530 */3531 DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)3532 {3533 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET))3534 {3535 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;3536 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;3537 3538 Log4(("Setting IRET intercept\n"));3539 }3540 }3541 3542 3543 /**3544 * Clears the IRET intercept control in the VMCB.3545 *3546 * @param pVmcb Pointer to the VM control block.3547 */3548 DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)3549 {3550 if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)3551 {3552 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET;3553 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);3554 3555 Log4(("Clearing IRET intercept\n"));3556 }3557 }3558 3559 3549 #ifdef VBOX_WITH_NESTED_HWVIRT 3560 3561 3562 3550 /** 3563 3551 * Evaluates the event to be delivered to the nested-guest and sets it as the … … 3570 3558 static VBOXSTRICTRC hmR0SvmEvaluatePendingEventNested(PVMCPU pVCpu, PCPUMCTX pCtx) 3571 3559 { 3572 Log4Func(("\n"));3560 HMSVM_ASSERT_IN_NESTED_GUEST(pCtx); 3573 3561 3574 3562 Assert(!pVCpu->hm.s.Event.fPending); 3575 3576 bool const fGif = pCtx->hwvirt.fGif; 3577 if (fGif) 3578 { 3579 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 3580 3581 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3582 3583 /* 3584 * Check if the nested-guest can receive NMIs. 3585 * NMIs are higher priority than regular interrupts. 3586 */ 3587 /** @todo SMI. SMIs take priority over NMIs. */ 3588 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 3589 { 3590 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3591 if (fBlockNmi) 3592 hmR0SvmSetIretIntercept(pVmcbNstGst); 3593 else if (fIntShadow) 3563 Assert(pCtx->hwvirt.fGif); 3564 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 3565 Assert(pVmcb); 3566 3567 bool const fVirtualGif = CPUMGetSvmNstGstVGif(pCtx); 3568 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3569 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3570 3571 Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n", 3572 fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3573 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))); 3574 3575 /** @todo SMI. SMIs take priority over NMIs. */ 3576 3577 /* 3578 * Check if the guest can receive NMIs. 3579 * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts". 3580 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities". 3581 */ 3582 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI) 3583 && !fBlockNmi) 3584 { 3585 if ( fVirtualGif 3586 && !fIntShadow) 3587 { 3588 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI)) 3594 3589 { 3595 /** @todo Figure this out, how we shall manage virt. intercept if the 3596 * nested-guest already has one set and/or if we really need it? */ 3597 //hmR0SvmSetVirtIntrIntercept(pVmcbNstGst); 3590 Log4(("Intercepting NMI -> #VMEXIT\n")); 3591 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0); 3598 3592 } 3599 else 3593 3594 Log4(("Setting NMI pending for injection\n")); 3595 SVMEVENT Event; 3596 Event.u = 0; 3597 Event.n.u1Valid = 1; 3598 Event.n.u8Vector = X86_XCPT_NMI; 3599 Event.n.u3Type = SVM_EVENT_NMI; 3600 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3601 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 3602 } 3603 else if (!fVirtualGif) 3604 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3605 else 3606 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx); 3607 } 3608 /* 3609 * Check if the nested-guest can receive external interrupts (generated by 3610 * the guest's PIC/APIC). 3611 * 3612 * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted 3613 * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS. 3614 * 3615 * External interrupts that are generated for the outer guest may be intercepted 3616 * depending on how the nested-guest VMCB was programmed by guest software. 3617 * 3618 * Physical interrupts always take priority over virtual interrupts, 3619 * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts". 3620 */ 3621 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3622 && !pVCpu->hm.s.fSingleInstruction) 3623 { 3624 if ( fVirtualGif 3625 && !fIntShadow 3626 && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx)) 3627 { 3628 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR)) 3600 3629 { 3601 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_NMI)) 3602 { 3603 Log4(("Intercepting NMI -> #VMEXIT\n")); 3604 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0); 3605 } 3606 3607 Log4(("Pending NMI\n")); 3630 Log4(("Intercepting INTR -> #VMEXIT\n")); 3631 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3632 } 3633 3634 uint8_t u8Interrupt; 3635 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3636 if (RT_SUCCESS(rc)) 3637 { 3638 Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt)); 3608 3639 SVMEVENT Event; 3609 3640 Event.u = 0; 3610 3641 Event.n.u1Valid = 1; 3611 Event.n.u8Vector = X86_XCPT_NMI;3612 Event.n.u3Type = SVM_EVENT_ NMI;3642 Event.n.u8Vector = u8Interrupt; 3643 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3613 3644 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3614 hmR0SvmSetIretIntercept(pVmcbNstGst);3615 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);3616 return VINF_SUCCESS;3617 3645 } 3618 } 3619 3620 /* 3621 * Check if the nested-guest can receive external interrupts (generated by 3622 * the guest's PIC/APIC). 3623 * 3624 * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted 3625 * when executing using hardware-assisted SVM, see HMSVM_MANDATORY_GUEST_CTRL_INTERCEPTS. 3626 * 3627 * External interrupts that are generated for the outer guest may be intercepted 3628 * depending on how the nested-guest VMCB was programmed by guest software. 3629 * 3630 * Physical interrupts always take priority over virtual interrupts, 3631 * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts". 3632 */ 3633 if (!fIntShadow) 3634 { 3635 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3636 && !pVCpu->hm.s.fSingleInstruction 3637 && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx)) 3646 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3638 3647 { 3639 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR)) 3640 { 3641 Log4(("Intercepting external interrupt -> #VMEXIT\n")); 3642 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3643 } 3644 3645 uint8_t u8Interrupt; 3646 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3647 if (RT_SUCCESS(rc)) 3648 { 3649 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 3650 SVMEVENT Event; 3651 Event.u = 0; 3652 Event.n.u1Valid = 1; 3653 Event.n.u8Vector = u8Interrupt; 3654 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3655 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3656 } 3657 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3658 { 3659 /* 3660 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be 3661 * updated eventually when the TPR is written by the guest. 3662 */ 3663 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3664 } 3665 else 3666 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3648 /* 3649 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be 3650 * updated eventually when the TPR is written by the guest. 3651 */ 3652 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3667 3653 } 3668 3669 /* 3670 * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields) 3671 * interrupt injection. The virtual interrupt injection itself, if any, will be done 3672 * by the physical CPU. 3673 */ 3674 /** @todo later explore this for performance reasons. Right now the hardware 3675 * takes care of virtual interrupt injection for nested-guest. */ 3676 #if 0 3677 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST) 3678 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR) 3679 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx)) 3680 { 3681 Log4(("Intercepting virtual interrupt -> #VMEXIT\n")); 3682 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0); 3683 } 3654 else 3655 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3656 } 3657 else if (!fVirtualGif) 3658 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3659 else 3660 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx); 3661 } 3662 3663 return VINF_SUCCESS; 3664 } 3684 3665 #endif 3685 }3686 }3687 3688 return VINF_SUCCESS;3689 }3690 #endif3691 3692 3666 3693 3667 /** … … 3697 3671 * @param pVCpu The cross context virtual CPU structure. 3698 3672 * @param pCtx Pointer to the guest-CPU context. 3699 *3700 * @remarks Don't use this function when we are actively executing a3701 * nested-guest, use hmR0SvmEvaluatePendingEventNested instead.3702 3673 */ 3703 3674 static void hmR0SvmEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx) … … 3705 3676 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 3706 3677 Assert(!pVCpu->hm.s.Event.fPending); 3678 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 3679 Assert(pVmcb); 3707 3680 3708 3681 #ifdef VBOX_WITH_NESTED_HWVIRT 3709 bool const fGif = pCtx->hwvirt.fGif;3682 bool const fGif = pCtx->hwvirt.fGif; 3710 3683 #else 3711 bool const fGif = true;3684 bool const fGif = true; 3712 3685 #endif 3713 Log4Func(("fGif=%RTbool\n", fGif)); 3686 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3687 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3688 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3689 3690 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n", 3691 fGif, fBlockNmi, fBlockInt, fIntShadow, 3692 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3693 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))); 3694 3695 /** @todo SMI. SMIs take priority over NMIs. */ 3714 3696 3715 3697 /* 3716 * If the global interrupt flag (GIF) isn't set, even NMIs and other events are blocked. 3717 * See AMD spec. Table 15-10. "Effect of the GIF on Interrupt Handling". 3698 * Check if the guest can receive NMIs. 3699 * Nested NMIs are not allowed, see AMD spec. 8.1.4 "Masking External Interrupts". 3700 * NMIs take priority over maskable interrupts, see AMD spec. 8.5 "Priorities". 3718 3701 */ 3719 if (fGif) 3720 { 3721 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3722 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 3723 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3724 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3725 3726 Log4Func(("fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fBlockInt, fIntShadow, 3727 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))); 3728 3729 /** @todo SMI. SMIs take priority over NMIs. */ 3730 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts. */ 3731 { 3732 if (fBlockNmi) 3733 hmR0SvmSetIretIntercept(pVmcb); 3734 else if (fIntShadow) 3735 hmR0SvmSetVirtIntrIntercept(pVmcb); 3736 else 3702 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI) 3703 && !fBlockNmi) 3704 { 3705 if ( fGif 3706 && !fIntShadow) 3707 { 3708 Log4(("Setting NMI pending for injection\n")); 3709 SVMEVENT Event; 3710 Event.u = 0; 3711 Event.n.u1Valid = 1; 3712 Event.n.u8Vector = X86_XCPT_NMI; 3713 Event.n.u3Type = SVM_EVENT_NMI; 3714 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3715 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 3716 } 3717 else if (!fGif) 3718 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3719 else 3720 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx); 3721 } 3722 /* 3723 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns 3724 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC. 3725 */ 3726 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3727 && !pVCpu->hm.s.fSingleInstruction) 3728 { 3729 if ( fGif 3730 && !fBlockInt 3731 && !fIntShadow) 3732 { 3733 uint8_t u8Interrupt; 3734 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3735 if (RT_SUCCESS(rc)) 3737 3736 { 3738 Log4(("Pending NMI\n")); 3739 3737 Log4(("Setting external interrupt %#x pending for injection\n", u8Interrupt)); 3740 3738 SVMEVENT Event; 3741 3739 Event.u = 0; 3742 3740 Event.n.u1Valid = 1; 3743 Event.n.u8Vector = X86_XCPT_NMI; 3744 Event.n.u3Type = SVM_EVENT_NMI; 3745 3741 Event.n.u8Vector = u8Interrupt; 3742 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3746 3743 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3747 hmR0SvmSetIretIntercept(pVmcb);3748 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);3749 return;3750 3744 } 3751 } 3752 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3753 && !pVCpu->hm.s.fSingleInstruction) 3754 { 3755 /* 3756 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns 3757 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC. 3758 */ 3759 if ( !fBlockInt 3760 && !fIntShadow) 3745 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3761 3746 { 3762 uint8_t u8Interrupt; 3763 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3764 if (RT_SUCCESS(rc)) 3765 { 3766 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 3767 3768 SVMEVENT Event; 3769 Event.u = 0; 3770 Event.n.u1Valid = 1; 3771 Event.n.u8Vector = u8Interrupt; 3772 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3773 3774 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3775 } 3776 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3777 { 3778 /* 3779 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be 3780 * updated eventually when the TPR is written by the guest. 3781 */ 3782 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3783 } 3784 else 3785 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3747 /* 3748 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be 3749 * updated eventually when the TPR is written by the guest. 3750 */ 3751 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3786 3752 } 3787 3753 else 3788 hmR0SvmSetVirtIntrIntercept(pVmcb); 3789 } 3754 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3755 } 3756 else if (!fGif) 3757 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_STGI); 3758 else 3759 hmR0SvmSetIntWindowExiting(pVCpu, pVmcb, pCtx); 3790 3760 } 3791 3761 } … … 3798 3768 * @param pCtx Pointer to the guest-CPU context. 3799 3769 * @param pVmcb Pointer to the VM control block. 3770 * 3771 * @remarks Must only be called when we are guaranteed to enter 3772 * hardware-assisted SVM execution and not return to ring-3 3773 * prematurely. 3800 3774 */ 3801 3775 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMVMCB pVmcb) … … 3842 3816 3843 3817 /* 3818 * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We do this only 3819 * when we are surely going to inject the NMI as otherwise if we return to ring-3 prematurely we 3820 * could leave NMIs blocked indefinitely upon re-entry into SVM R0. 3821 * 3822 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set 3823 * the VMCS field after actually delivering the NMI which we read on VM-exit to determine the state. 3824 */ 3825 if ( Event.n.u3Type == SVM_EVENT_NMI 3826 && Event.n.u8Vector == X86_XCPT_NMI 3827 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 3828 { 3829 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3830 } 3831 3832 /* 3844 3833 * Inject it (update VMCB for injection by the hardware). 3845 3834 */ … … 3855 3844 else 3856 3845 Assert(pVmcb->ctrl.EventInject.n.u1Valid == 0); 3846 3847 /* 3848 * We could have injected an NMI through IEM and continue guest execution using 3849 * hardware-assisted SVM. In which case, we would not have any events pending (above) 3850 * but we still need to intercept IRET in order to eventually clear NMI inhibition. 3851 */ 3852 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 3853 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET); 3857 3854 3858 3855 /* … … 4165 4162 4166 4163 /* 4167 * Set up the nested-guest VMCB for execution using hardware-assisted SVM.4168 */4169 hmR0SvmVmRunSetupVmcb(pVCpu, pCtx);4170 4171 /*4172 4164 * Load the nested-guest state. 4173 4165 */ … … 4212 4204 return VINF_EM_RAW_INTERRUPT; 4213 4205 } 4214 4215 /*4216 * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute4217 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into4218 * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.4219 *4220 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the4221 * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.4222 */4223 if (pVCpu->hm.s.Event.fPending)4224 {4225 SVMEVENT Event;4226 Event.u = pVCpu->hm.s.Event.u64IntInfo;4227 if ( Event.n.u1Valid4228 && Event.n.u3Type == SVM_EVENT_NMI4229 && Event.n.u8Vector == X86_XCPT_NMI4230 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))4231 {4232 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);4233 }4234 }4235 4236 4206 return VINF_SUCCESS; 4237 4207 } … … 4342 4312 } 4343 4313 4344 /*4345 * If we are injecting an NMI, we must set VMCPU_FF_BLOCK_NMIS only when we are going to execute4346 * guest code for certain (no exits to ring-3). Otherwise, we could re-read the flag on re-entry into4347 * AMD-V and conclude that NMI inhibition is active when we have not even delivered the NMI.4348 *4349 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set the4350 * VMCS field after actually delivering the NMI which we read on VM-exit to determine the state.4351 */4352 if (pVCpu->hm.s.Event.fPending)4353 {4354 SVMEVENT Event;4355 Event.u = pVCpu->hm.s.Event.u64IntInfo;4356 if ( Event.n.u1Valid4357 && Event.n.u3Type == SVM_EVENT_NMI4358 && Event.n.u8Vector == X86_XCPT_NMI4359 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))4360 {4361 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);4362 }4363 }4364 4365 4314 return VINF_SUCCESS; 4366 4315 } … … 4389 4338 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */ 4390 4339 4391 PVM pVM = pVCpu->CTX_SUFF(pVM);4340 PVM pVM = pVCpu->CTX_SUFF(pVM); 4392 4341 PSVMVMCB pVmcb = pSvmTransient->pVmcb; 4342 4393 4343 hmR0SvmInjectPendingEvent(pVCpu, pCtx, pVmcb); 4394 4344 … … 6153 6103 { 6154 6104 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6105 Assert(pVmcb); 6155 6106 Assert(pVmcb->ctrl.u64NextRIP); 6156 6107 AssertRelease(pVmcb->ctrl.u64NextRIP - pCtx->rip == cb); /* temporary, remove later */ … … 7165 7116 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */ 7166 7117 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7167 hmR0SvmClear VirtIntrIntercept(pVmcb);7118 hmR0SvmClearIntWindowExiting(pVCpu, pVmcb, pCtx); 7168 7119 7169 7120 /* Deliver the pending interrupt via hmR0SvmEvaluatePendingEvent() and resume guest execution. */ … … 7272 7223 7273 7224 /* Clear NMI blocking. */ 7274 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7225 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7226 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 7275 7227 7276 7228 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive NMIs, it is now ready. */ 7277 7229 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7278 hmR0SvmClear IretIntercept(pVmcb);7230 hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_IRET); 7279 7231 7280 7232 /* Deliver the pending NMI via hmR0SvmEvaluatePendingEvent() and resume guest execution. */ … … 7547 7499 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7548 7500 7549 /** @todo if triple-fault is returned in nested-guest scenario convert to a7550 * shutdown VMEXIT. */7551 7501 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7552 7502 … … 7733 7683 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7734 7684 7735 #ifdef VBOX_STRICT 7736 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7737 Assert(pVmcb); 7738 Assert(!pVmcb->ctrl.IntCtrl.n.u1VGifEnable); 7739 RT_NOREF(pVmcb); 7740 #endif 7741 7742 /** @todo Stat. */ 7743 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */ 7685 /* 7686 * When VGIF is not used we always intercept STGI instructions. When VGIF is used, 7687 * we only intercept STGI when events are pending for GIF to become 1. 7688 */ 7689 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7690 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable) 7691 hmR0SvmClearCtrlIntercept(pVCpu, pCtx, pVmcb, SVM_CTRL_INTERCEPT_STGI); 7692 7744 7693 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 7745 7694 VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr); … … 7762 7711 #endif 7763 7712 7764 /** @todo Stat. */7765 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmload); */7766 7713 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 7767 7714 VBOXSTRICTRC rcStrict = IEMExecDecodedVmload(pVCpu, cbInstr); … … 7791 7738 #endif 7792 7739 7793 /** @todo Stat. */7794 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmsave); */7795 7740 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 7796 7741 VBOXSTRICTRC rcStrict = IEMExecDecodedVmsave(pVCpu, cbInstr); … … 7842 7787 7843 7788 /* If this #DB is the result of delivering an event, go back to the interpreter. */ 7844 /** @todo if triple-fault is returned in nested-guest scenario convert to a7845 * shutdown VMEXIT. */7846 7789 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7847 if ( RT_UNLIKELY(pVCpu->hm.s.Event.fPending))7790 if (pVCpu->hm.s.Event.fPending) 7848 7791 { 7849 7792 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingInterpret); … … 7864 7807 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7865 7808 7866 /** @todo if triple-fault is returned in nested-guest scenario convert to a7867 * shutdown VMEXIT. */7868 7809 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7869 7810 -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r71293 r72065 1674 1674 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1675 1675 { 1676 PVM pVM = pVCpu->CTX_SUFF(pVM); 1677 bool fGif = pCtx->hwvirt.fGif; 1676 PVM pVM = pVCpu->CTX_SUFF(pVM); 1677 Assert(pCtx->hwvirt.fGif); 1678 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx); 1678 1679 #ifdef VBOX_WITH_RAW_MODE 1679 f Gif&= !PATMIsPatchGCAddr(pVM, pCtx->eip);1680 #endif 1681 if (f Gif)1680 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip); 1681 #endif 1682 if (fVirtualGif) 1682 1683 { 1683 1684 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
Note:
See TracChangeset
for help on using the changeset viewer.