Changeset 67204 in vbox
- Timestamp:
- Jun 1, 2017 11:55:18 AM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 115886
- Location:
- trunk
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm.h
r67156 r67204 158 158 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb); 159 159 VMM_INT_DECL(uint8_t) HMSvmNstGstGetInterrupt(PCCPUMCTX pCtx); 160 VMM_INT_DECL(bool) HMSvmNstGstCanTakeInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx); 160 VMM_INT_DECL(bool) HMSvmNstGstCanTakePhysInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx); 161 VMM_INT_DECL(bool) HMSvmNstGstCanTakeVirtInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx); 161 162 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, 162 163 uint64_t uExitInfo1, uint64_t uExitInfo2); -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r67200 r67204 811 811 812 812 /** 813 * Checks whether an interrupt is pending for the nested-guest. 813 * Checks whether the nested-guest is in a state to receive physical (APIC) 814 * interrupts. 814 815 * 815 816 * @returns VBox status code. 816 * @retval true if there's a pending interrupt, false otherwise.817 * @retval true if it's ready, false otherwise. 817 818 * 818 819 * @param pVCpu The cross context virtual CPU structure. 819 820 * @param pCtx The guest-CPU context. 820 821 */ 821 VMM_INT_DECL(bool) HMSvmNstGstCanTake Interrupt(PVMCPU pVCpu, PCCPUMCTX pCtx)822 VMM_INT_DECL(bool) HMSvmNstGstCanTakePhysInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx) 822 823 { 824 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 825 RT_NOREF(pVCpu); 826 823 827 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl; 828 X86EFLAGS fEFlags; 829 if (!pVmcbCtrl->IntCtrl.n.u1VIntrMasking) 830 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u; 831 else 832 fEFlags.u = pCtx->eflags.u; 833 834 return pCtx->hwvirt.svm.fGif && fEFlags.Bits.u1IF; 835 } 836 837 838 /** 839 * Checks whether the nested-guest is in a state to receive virtual (injected by 840 * VMRUN) interrupts. 841 * 842 * @returns VBox status code. 843 * @retval true if it's ready, false otherwise. 844 * 845 * @param pVCpu The cross context virtual CPU structure. 846 * @param pCtx The guest-CPU context. 847 */ 848 VMM_INT_DECL(bool) HMSvmNstGstCanTakeVirtInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx) 849 { 824 850 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); 825 826 X86RFLAGS RFlags; 827 if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking) 828 RFlags.u = pCtx->rflags.u; 829 else 830 RFlags.u = pCtx->hwvirt.svm.HostState.rflags.u; 831 832 if (!RFlags.Bits.u1IF) 833 return false; 834 851 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 852 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)); 853 RT_NOREF(pVCpu); 854 855 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl; 835 856 if ( !pVmcbCtrl->IntCtrl.n.u1IgnoreTPR 836 857 && pVmcbCtrl->IntCtrl.n.u4VIntrPrio <= pVmcbCtrl->IntCtrl.n.u8VTPR) 837 858 return false; 838 859 839 /* Paranoia. */ 840 Assert(RT_BOOL(pCtx->hwvirt.svm.fGif)); 841 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 842 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)); 843 RT_NOREF(pVCpu); 860 if (!pCtx->rflags.Bits.u1IF) 861 return false; 862 863 if (!pCtx->hwvirt.svm.fGif) 864 return false; 865 844 866 return true; 845 867 } -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r66994 r67204 1951 1951 * Interrupts. 1952 1952 */ 1953 /** @todo this can be optimized a bit. later. */ 1953 1954 bool fWakeupPending = false; 1954 1955 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) … … 1956 1957 { 1957 1958 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1958 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */ 1959 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */ 1960 { 1961 bool fIntrEnabled; 1962 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 1959 1963 #ifdef VBOX_WITH_RAW_MODE 1960 && PATMAreInterruptsEnabled(pVM) 1964 fIntrEnabled = PATMAreInterruptsEnabled(pVM); RT_NOREF(pCtx); 1965 #elif defined(VBOX_WITH_NESTED_HWVIRT) 1966 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1967 fIntrEnabled = HMSvmNstGstCanTakePhysInterrupt(pVCpu, pCtx); 1968 else 1969 fIntrEnabled = pCtx->eflags.Bits.u1IF; 1961 1970 #else 1962 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF) 1963 #endif 1971 fIntrEnabled = pCtx->eflags.Bits.u1IF; 1972 #endif 1973 if (fIntrEnabled) 1974 { 1975 Assert(!HMR3IsEventPending(pVCpu)); 1976 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 1977 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 1978 { 1964 1979 #ifdef VBOX_WITH_NESTED_HWVIRT 1965 && pVCpu->em.s.pCtx->hwvirt.svm.fGif 1966 #endif 1967 ) 1968 { 1969 Assert(!HMR3IsEventPending(pVCpu)); 1970 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 1971 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 1972 { 1973 #ifdef VBOX_WITH_NESTED_HWVIRT 1974 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 1975 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_INTR)) 1976 { 1977 VBOXSTRICTRC rcStrict = HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INTR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1978 if (rcStrict == VINF_SVM_VMEXIT) 1979 rc2 = VINF_EM_RESCHEDULE; 1980 else 1980 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_INTR)) 1981 1981 { 1982 Log(("EM: SVM Nested-guest INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 1983 /** @todo should we call iemInitiateCpuShutdown? Should this 1984 * result in trapping triple-fault intercepts? */ 1985 rc2 = VINF_EM_TRIPLE_FAULT; 1986 } 1987 } 1988 else 1989 #endif 1990 { 1991 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */ 1992 /** @todo this really isn't nice, should properly handle this */ 1993 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT); 1994 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW)) 1995 rc2 = VINF_EM_RESCHEDULE; 1996 #ifdef VBOX_STRICT 1997 rcIrq = rc2; 1998 #endif 1999 } 2000 UPDATE_RC(); 2001 /* Reschedule required: We must not miss the wakeup below! */ 2002 fWakeupPending = true; 2003 } 2004 #ifdef VBOX_WITH_NESTED_HWVIRT 2005 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2006 { 2007 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 2008 if (HMSvmNstGstCanTakeInterrupt(pVCpu, pCtx)) 2009 { 2010 /* 2011 * Check nested-guest virtual interrupts. 2012 */ 2013 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VINTR)) 2014 { 2015 VBOXSTRICTRC rcStrict = HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VINTR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1982 VBOXSTRICTRC rcStrict = HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INTR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 2016 1983 if (rcStrict == VINF_SVM_VMEXIT) 2017 1984 rc2 = VINF_EM_RESCHEDULE; 2018 1985 else 2019 1986 { 2020 Log(("EM: SVM Nested-guest VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));1987 Log(("EM: SVM Nested-guest INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 2021 1988 /** @todo should we call iemInitiateCpuShutdown? Should this 2022 1989 * result in trapping triple-fault intercepts? */ … … 2025 1992 } 2026 1993 else 1994 #endif 2027 1995 { 2028 /* 2029 * Prepare the nested-guest interrupt for injection. 2030 */ 2031 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2032 uint8_t uNstGstVector = HMSvmNstGstGetInterrupt(pCtx); 2033 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT); 2034 /** @todo reschedule to HM/REM later, when the HMR0 nested-guest execution is 2035 * done. For now just reschedule to IEM. */ 2036 rc2 = VINF_EM_RESCHEDULE; 1996 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */ 1997 /** @todo this really isn't nice, should properly handle this */ 1998 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT); 1999 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW)) 2000 rc2 = VINF_EM_RESCHEDULE; 2001 #ifdef VBOX_STRICT 2002 rcIrq = rc2; 2003 #endif 2037 2004 } 2038 2005 UPDATE_RC(); … … 2040 2007 fWakeupPending = true; 2041 2008 } 2009 #ifdef VBOX_WITH_NESTED_HWVIRT 2010 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 2011 { 2012 /* 2013 * Check nested-guest virtual interrupts. 2014 */ 2015 if (HMSvmNstGstCanTakeVirtInterrupt(pVCpu, pCtx)) 2016 { 2017 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VINTR)) 2018 { 2019 VBOXSTRICTRC rcStrict = HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VINTR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 2020 if (rcStrict == VINF_SVM_VMEXIT) 2021 rc2 = VINF_EM_RESCHEDULE; 2022 else 2023 { 2024 Log(("EM: SVM Nested-guest VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 2025 /** @todo should we call iemInitiateCpuShutdown? Should this 2026 * result in trapping triple-fault intercepts? */ 2027 rc2 = VINF_EM_TRIPLE_FAULT; 2028 } 2029 } 2030 else 2031 { 2032 /* 2033 * Prepare the nested-guest interrupt for injection. 2034 */ 2035 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2036 uint8_t uNstGstVector = HMSvmNstGstGetInterrupt(pCtx); 2037 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT); 2038 /** @todo reschedule to HM/REM later, when the HMR0 nested-guest execution is 2039 * done. For now just reschedule to IEM. */ 2040 rc2 = VINF_EM_RESCHEDULE; 2041 } 2042 UPDATE_RC(); 2043 /* Reschedule required: We must not miss the wakeup below! */ 2044 fWakeupPending = true; 2045 } 2046 } 2047 #endif /* VBOX_WITH_NESTED_HWVIRT */ 2042 2048 } 2043 #endif /* VBOX_WITH_NESTED_HWVIRT */2044 2049 } 2045 2050 }
Note:
See TracChangeset
for help on using the changeset viewer.