Changeset 82016 in vbox for trunk/src/VBox
- Timestamp:
- Nov 20, 2019 10:09:45 AM (5 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r81861 r82016 4953 4953 4954 4954 /* 4955 * NMIs to the guest are blocked after an NMI is injected until the guest executes an IRET. We only 4956 * bother with virtual-NMI blocking when we have support for virtual NMIs in the CPU, otherwise 4957 * setting this would block host-NMIs and IRET will not clear the blocking. 4958 * 4959 * We always set NMI-exiting so when the host receives an NMI we get a VM-exit. 4960 * 4961 * See Intel spec. 26.6.1 "Interruptibility state". See @bugref{7445}. 4962 */ 4963 PCVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 4964 if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 4965 && CPUMIsGuestNmiBlocking(pVCpu)) 4955 * Check if we should inhibit NMI delivery. 4956 */ 4957 if (CPUMIsGuestNmiBlocking(pVCpu)) 4966 4958 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 4967 4959 … … 8563 8555 * event. 8564 8556 * 8557 * Toggling of interrupt force-flags here is safe since we update TRPM on premature 8558 * exits to ring-3 before executing guest code, see hmR0VmxExitToRing3(). We must 8559 * NOT restore these force-flags. 8560 * 8565 8561 * @returns Strict VBox status code (i.e. informational status codes too). 8566 8562 * @param pVCpu The cross context virtual CPU structure. … … 8578 8574 8579 8575 /* 8580 * Get the current interruptibility-state of the guest or nested-guest and 8581 * then figure out what needs to be injected. 8582 */ 8583 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pVmxTransient); 8584 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS); 8585 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI); 8586 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI); 8587 8588 /* We don't support block-by-SMI yet.*/ 8589 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); 8590 8591 /* Block-by-STI must not be set when interrupts are disabled. */ 8592 if (fBlockSti) 8593 { 8594 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); 8595 Assert(pCtx->eflags.Bits.u1IF); 8596 } 8597 8598 /* Update interruptibility state to the caller. */ 8599 *pfIntrState = fIntrState; 8600 8601 /* 8602 * Toggling of interrupt force-flags here is safe since we update TRPM on 8603 * premature exits to ring-3 before executing guest code, see hmR0VmxExitToRing3(). 8604 * We must NOT restore these force-flags. 8605 */ 8606 8607 /** @todo SMI. SMIs take priority over NMIs. */ 8608 8609 /* 8610 * Check if an NMI is pending and if the guest or nested-guest can receive them. 8611 * NMIs take priority over external interrupts. 8612 */ 8613 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 8614 { 8615 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */ 8616 if ( !pVCpu->hm.s.Event.fPending 8617 && !fBlockNmi 8618 && !fBlockSti 8619 && !fBlockMovSS) 8620 { 8621 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8622 if ( fIsNestedGuest 8623 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT)) 8624 return IEMExecVmxVmexitXcptNmi(pVCpu); 8576 * Compute/update guest-interruptibility state related FFs. 8577 */ 8578 /** @todo r=ramshankar: Move this outside this function to the caller. */ 8579 { 8580 /* Get the current interruptibility-state of the guest or nested-guest (this updates FFs). */ 8581 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pVmxTransient); 8582 8583 #ifdef VBOX_STRICT 8584 /* Validate. */ 8585 Assert(!(fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 8586 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 8587 { 8588 /* Block-by-STI must not be set when interrupts are disabled. */ 8589 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RFLAGS); 8590 Assert(pCtx->eflags.Bits.u1IF); 8591 } 8625 8592 #endif 8626 hmR0VmxSetPendingXcptNmi(pVCpu); 8627 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 8628 Log4Func(("Pending NMI\n")); 8629 } 8630 else if (!fIsNestedGuest) 8631 hmR0VmxSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 8632 /* else: for nested-guests, NMI-window exiting will be picked up when merging VMCS controls. */ 8633 } 8634 /* 8635 * Check if an external interrupt (PIC/APIC) is pending and if the guest or nested-guest 8636 * can receive them. Once PDMGetInterrupt() returns a valid interrupt we -must- deliver 8637 * the interrupt. We can no longer re-request it from the APIC. 8638 */ 8639 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 8640 && !pVCpu->hm.s.fSingleInstruction) 8641 { 8642 Assert(!DBGFIsStepping(pVCpu)); 8643 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS); 8644 AssertRCReturn(rc, rc); 8645 8646 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 8647 if ( !pVCpu->hm.s.Event.fPending 8648 && !fBlockInt 8649 && !fBlockSti 8650 && !fBlockMovSS) 8651 { 8652 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8653 if ( fIsNestedGuest 8654 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT) 8655 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT)) 8656 { 8657 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */); 8658 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE); 8659 return rcStrict; 8660 } 8661 #endif 8662 uint8_t u8Interrupt; 8663 rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 8664 if (RT_SUCCESS(rc)) 8593 8594 /* Update interruptibility state to the caller. */ 8595 *pfIntrState = fIntrState; 8596 } 8597 8598 /* 8599 * Evaluate if a new event needs to be injected. 8600 * An event that's already pending has already performed all necessary checks. 8601 */ 8602 if ( !pVCpu->hm.s.Event.fPending 8603 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 8604 { 8605 /** @todo SMI. SMIs take priority over NMIs. */ 8606 8607 /* 8608 * NMIs. 8609 * NMIs take priority over external interrupts. 8610 */ 8611 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 8612 { 8613 /* 8614 * For a guest, the FF always indicates the guest's ability to receive an NMI. 8615 * 8616 * For a nested-guest, the FF always indicates the outer guest's ability to 8617 * receive an NMI while the guest-interruptibility state bit depends on whether 8618 * the nested-hypervisor is using virtual-NMIs. 8619 */ 8620 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 8665 8621 { 8666 8622 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8667 8623 if ( fIsNestedGuest 8668 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT) 8669 && CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT)) 8624 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_NMI_EXIT)) 8625 return IEMExecVmxVmexitXcptNmi(pVCpu); 8626 #endif 8627 hmR0VmxSetPendingXcptNmi(pVCpu); 8628 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 8629 Log4Func(("NMI pending injection\n")); 8630 8631 /* We've injected the NMI, bail. */ 8632 return VINF_SUCCESS; 8633 } 8634 else if (!fIsNestedGuest) 8635 hmR0VmxSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 8636 } 8637 8638 /* 8639 * External interrupts (PIC/APIC). 8640 * Once PDMGetInterrupt() returns a valid interrupt we -must- deliver it. 8641 * We cannot re-request the interrupt from the controller again. 8642 */ 8643 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 8644 && !pVCpu->hm.s.fSingleInstruction) 8645 { 8646 Assert(!DBGFIsStepping(pVCpu)); 8647 int rc = hmR0VmxImportGuestState(pVCpu, pVmcsInfo, CPUMCTX_EXTRN_RFLAGS); 8648 AssertRC(rc); 8649 8650 if (pCtx->eflags.u32 & X86_EFL_IF) 8651 { 8652 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8653 if ( fIsNestedGuest 8654 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT) 8655 && !CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT)) 8670 8656 { 8671 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */);8657 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */); 8672 8658 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE); 8673 8659 return rcStrict; 8674 8660 } 8675 8661 #endif 8676 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt); 8677 Log4Func(("Pending external interrupt vector %#x\n", u8Interrupt)); 8662 uint8_t u8Interrupt; 8663 rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 8664 if (RT_SUCCESS(rc)) 8665 { 8666 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 8667 if ( fIsNestedGuest 8668 && CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT) 8669 && CPUMIsGuestVmxExitCtlsSet(pCtx, VMX_EXIT_CTLS_ACK_EXT_INT)) 8670 { 8671 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, u8Interrupt, false /* fIntPending */); 8672 Assert(rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE); 8673 return rcStrict; 8674 } 8675 #endif 8676 hmR0VmxSetPendingExtInt(pVCpu, u8Interrupt); 8677 Log4Func(("External interrupt (%#x) pending injection\n", u8Interrupt)); 8678 } 8679 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 8680 { 8681 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 8682 8683 if ( !fIsNestedGuest 8684 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 8685 hmR0VmxApicSetTprThreshold(pVmcsInfo, u8Interrupt >> 4); 8686 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */ 8687 8688 /* 8689 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and 8690 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no 8691 * need to re-set this force-flag here. 8692 */ 8693 } 8694 else 8695 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 8696 8697 /* We've injected the interrupt or taken necessary action, bail. */ 8698 return VINF_SUCCESS; 8678 8699 } 8679 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 8680 { 8681 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 8682 8683 if ( !fIsNestedGuest 8684 && (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 8685 hmR0VmxApicSetTprThreshold(pVmcsInfo, u8Interrupt >> 4); 8686 /* else: for nested-guests, TPR threshold is picked up while merging VMCS controls. */ 8687 8688 /* 8689 * If the CPU doesn't have TPR shadowing, we will always get a VM-exit on TPR changes and 8690 * APICSetTpr() will end up setting the VMCPU_FF_INTERRUPT_APIC if required, so there is no 8691 * need to re-set this force-flag here. 8692 */ 8693 } 8694 else 8695 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 8696 } 8697 else if (!fIsNestedGuest) 8700 else if (!fIsNestedGuest) 8701 hmR0VmxSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 8702 } 8703 } 8704 else if (!fIsNestedGuest) 8705 { 8706 /* 8707 * An event is being injected or we are in an interrupt shadow. Check if another event is 8708 * pending. If so, instruct VT-x to cause a VM-exit as soon as the guest is ready to accept 8709 * the pending event. 8710 */ 8711 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 8712 hmR0VmxSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 8713 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 8714 && !pVCpu->hm.s.fSingleInstruction) 8698 8715 hmR0VmxSetIntWindowExitVmcs(pVCpu, pVmcsInfo); 8699 /* else: for nested-guests, interrupt-window exiting will be picked up when merging VMCS controls. */8700 }8716 } 8717 /* else: for nested-guests, NMI/interrupt-window exiting will be picked up when merging VMCS controls. */ 8701 8718 8702 8719 return VINF_SUCCESS; … … 13091 13108 * Execution of IRET caused a fault when NMI blocking was in effect (i.e we're in 13092 13109 * the guest or nested-guest NMI handler). We need to set the block-by-NMI field so 13093 * that NMIs remain blocked until the IRET execution is completed.13110 * that virtual NMIs remain blocked until the IRET execution is completed. 13094 13111 * 13095 13112 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception". … … 13106 13123 * SPP-related event VM-exit when NMI blocking was in effect (i.e. we're in the 13107 13124 * guest or nested-guest NMI handler). We need to set the block-by-NMI field so 13108 * that NMIs remain blocked until the IRET execution is completed.13125 * that virtual NMIs remain blocked until the IRET execution is completed. 13109 13126 * 13110 13127 * See Intel spec. 27.2.3 "Information about NMI unblocking due to IRET" -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r81792 r82016 1869 1869 EmuFeat.fVmxExtIntExit = 1; 1870 1870 EmuFeat.fVmxNmiExit = 1; 1871 EmuFeat.fVmxVirtNmi = 0;1871 EmuFeat.fVmxVirtNmi = 1; 1872 1872 EmuFeat.fVmxPreemptTimer = 0; /* Currently disabled on purpose, see @bugref{9180#c108}. */ 1873 1873 EmuFeat.fVmxPostedInt = 0;
Note:
See TracChangeset
for help on using the changeset viewer.