Changeset 97178 in vbox
- Timestamp:
- Oct 17, 2022 9:06:03 PM (2 years ago)
- svn:sync-xref-src-repo-rev:
- 154153
- Location:
- trunk
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r97096 r97178 1686 1686 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn))) 1687 1687 1688 /** @def CPUMCTX_ASSERT_NOT_EXTRN 1689 * Macro for asserting that @a a_fNotExtrn are present in @a a_pCtx. 1690 * 1691 * @param a_pCtx The CPU context of the calling EMT. 1692 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check. 1693 */ 1694 #define CPUMCTX_ASSERT_NOT_EXTRN(a_pCtx, a_fNotExtrn) \ 1695 AssertMsg(!((a_pCtx)->fExtrn & (a_fNotExtrn)), \ 1696 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pCtx)->fExtrn, (a_fNotExtrn))) 1697 1688 1698 /** @def CPUM_IMPORT_EXTRN_RET 1689 1699 * Macro for making sure the state specified by @a fExtrnImport is present, … … 1907 1917 pCtx->hwvirt.fGif = fGif; 1908 1918 } 1919 1920 /** 1921 * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS. 1922 * 1923 * This also inhibit NMIs, except perhaps for nested guests. 1924 * 1925 * @returns true if interrupts are inhibited by interrupt shadow, false if not. 1926 * @param pCtx Current guest CPU context. 1927 * @note Requires pCtx->rip to be up to date. 1928 * @note Does not clear fInhibit when CPUMCTX::uRipInhibitInt differs 1929 * from CPUMCTX::rip. 1930 */ 1931 DECLINLINE(bool) CPUMIsInInterruptShadow(PCCPUMCTX pCtx) 1932 { 1933 if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW)) 1934 return false; 1935 1936 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 1937 return pCtx->uRipInhibitInt == pCtx->rip; 1938 } 1939 1940 /** 1941 * Checks if we're in an "interrupt shadow", i.e. after a STI, POPF or MOV SS, 1942 * updating the state if stale. 1943 * 1944 * This also inhibit NMIs, except perhaps for nested guests. 1945 * 1946 * @returns true if interrupts are inhibited by interrupt shadow, false if not. 1947 * @param pCtx Current guest CPU context. 1948 * @note Requires pCtx->rip to be up to date. 1949 */ 1950 DECLINLINE(bool) CPUMIsInInterruptShadowWithUpdate(PCPUMCTX pCtx) 1951 { 1952 if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW)) 1953 return false; 1954 1955 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 1956 if (pCtx->uRipInhibitInt == pCtx->rip) 1957 return true; 1958 1959 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 1960 return false; 1961 } 1962 1963 /** 1964 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction. 1965 * 1966 * @param pCtx Current guest CPU context. 1967 * @note Requires pCtx->rip to be up to date. 1968 */ 1969 DECLINLINE(void) CPUMSetInInterruptShadow(PCPUMCTX pCtx) 1970 { 1971 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 1972 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 1973 pCtx->uRipInhibitInt = pCtx->rip; 1974 } 1975 1976 /** 1977 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction, 1978 * extended version. 1979 * 1980 * @param pCtx Current guest CPU context. 1981 * @param rip The RIP for which it is inhibited. 1982 */ 1983 DECLINLINE(void) CPUMSetInInterruptShadowEx(PCPUMCTX pCtx, uint64_t rip) 1984 { 1985 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 1986 pCtx->uRipInhibitInt = rip; 1987 } 1988 1989 /** 1990 * Clears the "interrupt shadow" flag. 1991 * 1992 * @param pCtx Current guest CPU context. 1993 */ 1994 DECLINLINE(void) CPUMClearInterruptShadow(PCPUMCTX pCtx) 1995 { 1996 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 1997 } 1998 1999 /** 2000 * Update the "interrupt shadow" flag. 2001 * 2002 * @param pCtx Current guest CPU context. 2003 * @param fInhibited The new state. 2004 * @note Requires pCtx->rip to be up to date. 2005 */ 2006 DECLINLINE(void) CPUMUpdateInterruptShadow(PCPUMCTX pCtx, bool fInhibited) 2007 { 2008 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 2009 if (!fInhibited) 2010 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 2011 else 2012 { 2013 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 2014 pCtx->uRipInhibitInt = pCtx->rip; 2015 } 2016 } 2017 2018 /** 2019 * Update the "interrupt shadow" flag, extended version. 2020 * 2021 * @returns fInhibited. 2022 * @param pCtx Current guest CPU context. 2023 * @param fInhibited The new state. 2024 * @param rip The RIP for which it is inhibited. 2025 */ 2026 DECLINLINE(bool) CPUMUpdateInterruptShadowEx(PCPUMCTX pCtx, bool fInhibited, uint64_t rip) 2027 { 2028 if (!fInhibited) 2029 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 2030 else 2031 { 2032 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 2033 pCtx->uRipInhibitInt = rip; 2034 } 2035 return fInhibited; 2036 } 2037 2038 /* VMX forward declarations used by extended function versions: */ 2039 DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx); 2040 DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls); 2041 DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx); 2042 DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking); 2043 2044 /** 2045 * Checks whether interrupts, include NMIs, are inhibited by pending NMI 2046 * delivery. 2047 * 2048 * This only checks the inhibit mask. 2049 * 2050 * @retval true if interrupts are inhibited by NMI handling. 2051 * @retval false if interrupts are not inhibited by NMI handling. 2052 * @param pCtx Current guest CPU context. 2053 */ 2054 DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmi(PCCPUMCTX pCtx) 2055 { 2056 return (pCtx->fInhibit & CPUMCTX_INHIBIT_NMI) != 0; 2057 } 2058 2059 /** 2060 * Extended version of CPUMAreInterruptsInhibitedByNmi() that takes VMX non-root 2061 * mode into account when check whether interrupts are inhibited by NMI. 2062 * 2063 * @retval true if interrupts are inhibited by NMI handling. 2064 * @retval false if interrupts are not inhibited by NMI handling. 2065 * @param pCtx Current guest CPU context. 2066 */ 2067 DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmiEx(PCCPUMCTX pCtx) 2068 { 2069 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */ 2070 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2071 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2072 return CPUMAreInterruptsInhibitedByNmi(pCtx); 2073 return CPUMIsGuestVmxVirtNmiBlocking(pCtx); 2074 } 2075 2076 /** 2077 * Marks interrupts, include NMIs, as inhibited by pending NMI delivery. 2078 * 2079 * @param pCtx Current guest CPU context. 2080 */ 2081 DECLINLINE(void) CPUMSetInterruptInhibitingByNmi(PCPUMCTX pCtx) 2082 { 2083 pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI; 2084 } 2085 2086 /** 2087 * Extended version of CPUMSetInterruptInhibitingByNmi() that takes VMX non-root 2088 * mode into account when marking interrupts as inhibited by NMI. 2089 * 2090 * @param pVCpu The cross context virtual CPU structure. 2091 */ 2092 DECLINLINE(void) CPUMSetInterruptInhibitingByNmiEx(PCPUMCTX pCtx) 2093 { 2094 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */ 2095 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2096 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2097 CPUMSetInterruptInhibitingByNmi(pCtx); 2098 else 2099 CPUMSetGuestVmxVirtNmiBlocking(pCtx, true); 2100 } 2101 2102 /** 2103 * Marks interrupts, include NMIs, as no longer inhibited by pending NMI 2104 * delivery. 2105 * 2106 * @param pCtx Current guest CPU context. 2107 */ 2108 DECLINLINE(void) CPUMClearInterruptInhibitingByNmi(PCPUMCTX pCtx) 2109 { 2110 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI; 2111 } 2112 2113 /** 2114 * Extended version of CPUMClearInterruptInhibitingByNmi() that takes VMX 2115 * non-root mode into account when doing the updating. 2116 * 2117 * @param pVCpu The cross context virtual CPU structure. 2118 * @param fInhibited The new state. 2119 */ 2120 DECLINLINE(void) CPUMClearInterruptInhibitingByNmiEx(PCPUMCTX pCtx) 2121 { 2122 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */ 2123 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2124 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2125 CPUMClearInterruptInhibitingByNmi(pCtx); 2126 else 2127 CPUMSetGuestVmxVirtNmiBlocking(pCtx, false); 2128 } 2129 2130 /** 2131 * Update whether interrupts, include NMIs, are inhibited by pending NMI 2132 * delivery. 2133 * 2134 * @param pCtx Current guest CPU context. 2135 * @param fInhibited The new state. 2136 */ 2137 DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmi(PCPUMCTX pCtx, bool fInhibited) 2138 { 2139 if (!fInhibited) 2140 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI; 2141 else 2142 pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI; 2143 } 2144 2145 /** 2146 * Extended version of CPUMUpdateInterruptInhibitingByNmi() that takes VMX 2147 * non-root mode into account when doing the updating. 2148 * 2149 * @param pVCpu The cross context virtual CPU structure. 2150 * @param fInhibited The new state. 2151 */ 2152 DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmiEx(PCPUMCTX pCtx, bool fInhibited) 2153 { 2154 /* 2155 * Set the state of guest-NMI blocking in any of the following cases: 2156 * - We're not executing a nested-guest. 2157 * - We're executing an SVM nested-guest[1]. 2158 * - We're executing a VMX nested-guest without virtual-NMIs enabled. 2159 * 2160 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking. 2161 * SVM hypervisors must track NMI blocking themselves by intercepting 2162 * the IRET instruction after injection of an NMI. 2163 */ 2164 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2165 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2166 CPUMUpdateInterruptInhibitingByNmi(pCtx, fInhibited); 2167 /* 2168 * Set the state of virtual-NMI blocking, if we are executing a 2169 * VMX nested-guest with virtual-NMIs enabled. 2170 */ 2171 else 2172 CPUMSetGuestVmxVirtNmiBlocking(pCtx, fInhibited); 2173 } 2174 1909 2175 1910 2176 /** … … 2767 3033 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED, 2768 3034 CPUMINTERRUPTIBILITY_INT_DISABLED, 2769 CPUMINTERRUPTIBILITY_INT_INHIBITED, 3035 CPUMINTERRUPTIBILITY_INT_INHIBITED, /**< @todo rename as it inhibits NMIs too. */ 2770 3036 CPUMINTERRUPTIBILITY_NMI_INHIBIT, 2771 3037 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT, … … 2775 3041 2776 3042 VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu); 2777 VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);2778 VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);2779 3043 2780 3044 /** @name Typical scalable bus frequency values. -
trunk/include/VBox/vmm/cpum.mac
r97150 r97178 150 150 .gs.u32Limit resd 1 151 151 .gs.Attr resd 1 152 153 .ldtr.Sel resw 1 154 .ldtr.PaddingSel resw 1 155 .ldtr.ValidSel resw 1 156 .ldtr.fFlags resw 1 157 .ldtr.u64Base resq 1 158 .ldtr.u32Limit resd 1 159 .ldtr.Attr resd 1 160 .tr.Sel resw 1 161 .tr.PaddingSel resw 1 162 .tr.ValidSel resw 1 163 .tr.fFlags resw 1 164 .tr.u64Base resq 1 165 .tr.u32Limit resd 1 166 .tr.Attr resd 1 167 152 168 .eip resq 1 153 169 .eflags resq 1 … … 217 233 .gs.u32Limit resd 1 218 234 .gs.Attr resd 1 219 .eip resq 1220 .eflags resq 1221 .cr0 resq 1222 .cr2 resq 1223 .cr3 resq 1224 .cr4 resq 1225 .dr resq 8226 .gdtrPadding resw 3227 .gdtr resw 0228 .gdtr.cbGdt resw 1229 .gdtr.pGdt resq 1230 .idtrPadding resw 3231 .idtr resw 0232 .idtr.cbIdt resw 1233 .idtr.pIdt resq 1234 235 .ldtr.Sel resw 1 235 236 .ldtr.PaddingSel resw 1 … … 246 247 .tr.u32Limit resd 1 247 248 .tr.Attr resd 1 249 .eip resq 1 250 .eflags resq 1 251 .fInhibit resb 1 252 alignb 8 253 .uRipInhibitInt resq 1 254 .cr0 resq 1 255 .cr2 resq 1 256 .cr3 resq 1 257 .cr4 resq 1 258 .dr resq 8 259 .gdtrPadding resw 3 260 .gdtr resw 0 261 .gdtr.cbGdt resw 1 262 .gdtr.pGdt resq 1 263 .idtrPadding resw 3 264 .idtr resw 0 265 .idtr.cbIdt resw 1 266 .idtr.pIdt resq 1 248 267 .SysEnter.cs resb 8 249 268 .SysEnter.eip resb 8 … … 256 275 .msrSFMASK resb 8 257 276 .msrKERNELGSBASE resb 8 258 .uMsrPadding0 resb 8259 277 260 278 alignb 8 … … 323 341 .hwvirt.enmHwvirt resd 1 324 342 .hwvirt.fGif resb 1 325 alignb 8326 .hwvirt.f LocalForcedActionsresd 1343 alignb 4 344 .hwvirt.fSavedInhibit resd 1 327 345 alignb 64 328 346 endstruc -
trunk/include/VBox/vmm/cpumctx.h
r97150 r97178 266 266 CPUMSELREG gs; 267 267 /** @} */ 268 269 CPUMSELREG ldtr; 270 CPUMSELREG tr; 268 271 269 272 /** The program counter. */ … … 399 402 } CPUM_UNION_NM(s); 400 403 404 /** The task register. 405 * Only the guest context uses all the members. */ 406 CPUMSELREG ldtr; 407 /** The task register. 408 * Only the guest context uses all the members. */ 409 CPUMSELREG tr; 410 401 411 /** The program counter. */ 402 412 union … … 416 426 /** @} */ /*(CPUMCTXCORE)*/ 417 427 428 /** Interrupt & exception inhibiting (CPUMCTX_INHIBIT_XXX). */ 429 uint8_t fInhibit; 430 uint8_t abPadding[7]; 431 /** The RIP value fInhibit is/was valid for. */ 432 uint64_t uRipInhibitInt; 418 433 419 434 /** @name Control registers. … … 422 437 uint64_t cr2; 423 438 uint64_t cr3; 424 /** @todo the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */425 439 uint64_t cr4; 440 /** @todo Add the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */ 426 441 /** @} */ 427 442 … … 445 460 /** Interrupt Descriptor Table register. */ 446 461 VBOXIDTR idtr; 447 448 /** The task register.449 * Only the guest context uses all the members. */450 CPUMSELREG ldtr;451 /** The task register.452 * Only the guest context uses all the members. */453 CPUMSELREG tr;454 462 455 463 /** The sysenter msr registers. … … 466 474 uint64_t msrSFMASK; /**< syscall flag mask. */ 467 475 uint64_t msrKERNELGSBASE; /**< swapgs exchange value. */ 468 uint64_t uMsrPadding0; /**< no longer used (used to hold a copy of APIC base MSR). */469 476 /** @} */ 470 477 471 /** 0x228 - Externalized state tracker, CPUMCTX_EXTRN_XXX. */ 478 /** 0x230 - Externalized state tracker, CPUMCTX_EXTRN_XXX. 479 * @todo Move up after uRipInhibitInt after fInhibit moves into RFLAGS. 480 * That will put this in the same cacheline as RIP, RFLAGS and CR0 481 * which are typically always imported and exported again during an 482 * VM exit. */ 472 483 uint64_t fExtrn; 473 484 474 uint64_t au64Unused[2];485 uint64_t u64Unused; 475 486 476 487 /** 0x240 - PAE PDPTEs. */ … … 634 645 /** 0x11134 - Global interrupt flag - AMD only (always true on Intel). */ 635 646 bool fGif; 636 bool afPadding1[3]; 637 /** 0x11138 - A subset of guest force flags that are saved while running the 638 * nested-guest. */ 639 #ifdef VMCPU_WITH_64_BIT_FFS 640 uint64_t fLocalForcedActions; 641 #else 642 uint32_t fLocalForcedActions; 643 uint32_t fPadding; 644 #endif 645 #if 0 646 /** 0x11140 - Pad to 64 byte boundary. */ 647 uint8_t abPadding0[8+16+32]; 648 #endif 647 /** 0x11135 - Padding. */ 648 bool afPadding0[3]; 649 /** 0x11138 - A subset of guest inhibit flags (CPUMCTX_INHIBIT_XXX) that are 650 * saved while running the nested-guest. */ 651 uint32_t fSavedInhibit; 652 /** 0x1113c - Pad to 64 byte boundary. */ 653 uint8_t abPadding1[4]; 649 654 } hwvirt; 650 655 } CPUMCTX; … … 656 661 AssertCompileSizeAlignment(CPUMCTX, 16); 657 662 AssertCompileSizeAlignment(CPUMCTX, 8); 658 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0); 659 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 8); 660 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 16); 661 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 24); 662 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 32); 663 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 40); 664 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 48); 665 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 56); 666 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r8, 64); 667 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r9, 72); 668 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 80); 669 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 88); 670 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 96); 671 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 104); 672 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 112); 673 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 120); 674 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 128); 675 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 152); 676 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 176); 677 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 200); 678 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 224); 679 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 248); 680 AssertCompileMemberOffset(CPUMCTX, rip, 272); 681 AssertCompileMemberOffset(CPUMCTX, rflags, 280); 682 AssertCompileMemberOffset(CPUMCTX, cr0, 288); 683 AssertCompileMemberOffset(CPUMCTX, cr2, 296); 684 AssertCompileMemberOffset(CPUMCTX, cr3, 304); 685 AssertCompileMemberOffset(CPUMCTX, cr4, 312); 686 AssertCompileMemberOffset(CPUMCTX, dr, 320); 687 AssertCompileMemberOffset(CPUMCTX, gdtr, 384+6); 688 AssertCompileMemberOffset(CPUMCTX, idtr, 400+6); 689 AssertCompileMemberOffset(CPUMCTX, ldtr, 416); 690 AssertCompileMemberOffset(CPUMCTX, tr, 440); 691 AssertCompileMemberOffset(CPUMCTX, SysEnter, 464); 692 AssertCompileMemberOffset(CPUMCTX, msrEFER, 488); 693 AssertCompileMemberOffset(CPUMCTX, msrSTAR, 496); 694 AssertCompileMemberOffset(CPUMCTX, msrPAT, 504); 695 AssertCompileMemberOffset(CPUMCTX, msrLSTAR, 512); 696 AssertCompileMemberOffset(CPUMCTX, msrCSTAR, 520); 697 AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 528); 698 AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 536); 699 AssertCompileMemberOffset(CPUMCTX, aPaePdpes, 0x240); 700 AssertCompileMemberOffset(CPUMCTX, aXcr, 0x260); 701 AssertCompileMemberOffset(CPUMCTX, fXStateMask, 0x270); 702 AssertCompileMemberOffset(CPUMCTX, fUsedFpuGuest, 0x278); 703 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x300); 704 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) abXState, 0x300); 705 AssertCompileMemberAlignment(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x100); 663 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0x0000); 664 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 0x0008); 665 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 0x0010); 666 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 0x0018); 667 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 0x0020); 668 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 0x0028); 669 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 0x0030); 670 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 0x0038); 671 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r8, 0x0040); 672 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r9, 0x0048); 673 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 0x0050); 674 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 0x0058); 675 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 0x0060); 676 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 0x0068); 677 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 0x0070); 678 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 0x0078); 679 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 0x0080); 680 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 0x0098); 681 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 0x00b0); 682 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 0x00c8); 683 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 0x00e0); 684 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 0x00f8); 685 AssertCompileMemberOffset(CPUMCTX, ldtr, 0x0110); 686 AssertCompileMemberOffset(CPUMCTX, tr, 0x0128); 687 AssertCompileMemberOffset(CPUMCTX, rip, 0x0140); 688 AssertCompileMemberOffset(CPUMCTX, rflags, 0x0148); 689 AssertCompileMemberOffset(CPUMCTX, fInhibit, 0x0150); 690 AssertCompileMemberOffset(CPUMCTX, uRipInhibitInt, 0x0158); 691 AssertCompileMemberOffset(CPUMCTX, cr0, 0x0160); 692 AssertCompileMemberOffset(CPUMCTX, cr2, 0x0168); 693 AssertCompileMemberOffset(CPUMCTX, cr3, 0x0170); 694 AssertCompileMemberOffset(CPUMCTX, cr4, 0x0178); 695 AssertCompileMemberOffset(CPUMCTX, dr, 0x0180); 696 AssertCompileMemberOffset(CPUMCTX, gdtr, 0x01c0+6); 697 AssertCompileMemberOffset(CPUMCTX, idtr, 0x01d0+6); 698 AssertCompileMemberOffset(CPUMCTX, SysEnter, 0x01e0); 699 AssertCompileMemberOffset(CPUMCTX, msrEFER, 0x01f8); 700 AssertCompileMemberOffset(CPUMCTX, msrSTAR, 0x0200); 701 AssertCompileMemberOffset(CPUMCTX, msrPAT, 0x0208); 702 AssertCompileMemberOffset(CPUMCTX, msrLSTAR, 0x0210); 703 AssertCompileMemberOffset(CPUMCTX, msrCSTAR, 0x0218); 704 AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 0x0220); 705 AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 0x0228); 706 AssertCompileMemberOffset(CPUMCTX, aPaePdpes, 0x0240); 707 AssertCompileMemberOffset(CPUMCTX, aXcr, 0x0260); 708 AssertCompileMemberOffset(CPUMCTX, fXStateMask, 0x0270); 709 AssertCompileMemberOffset(CPUMCTX, fUsedFpuGuest, 0x0278); 710 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x0300); 711 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) abXState, 0x0300); 712 AssertCompileMemberAlignment(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x0100); 706 713 /* Only do spot checks for hwvirt */ 707 AssertCompileMemberAlignment(CPUMCTX, hwvirt, 0x1000);714 AssertCompileMemberAlignment(CPUMCTX, hwvirt, 0x1000); 708 715 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.Vmcb, X86_PAGE_SIZE); 709 716 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abMsrBitmap, X86_PAGE_SIZE); … … 725 732 AssertCompileMemberOffset(CPUMCTX, hwvirt.enmHwvirt, 0x11130); 726 733 AssertCompileMemberOffset(CPUMCTX, hwvirt.fGif, 0x11134); 727 AssertCompileMemberOffset(CPUMCTX, hwvirt.f LocalForcedActions,0x11138);734 AssertCompileMemberOffset(CPUMCTX, hwvirt.fSavedInhibit, 0x11138); 728 735 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs); 729 736 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r0); … … 1021 1028 1022 1029 1030 /** @name CPUMCTX_INHIBIT_XXX - Interrupt inhibiting flags. 1031 * @{ */ 1032 /** Interrupt shadow following MOV SS or POP SS. 1033 * 1034 * When this in effect, both maskable and non-maskable interrupts are blocked 1035 * from delivery for one instruction. Same for certain debug exceptions too, 1036 * unlike the STI variant. 1037 * 1038 * It is implementation specific whether a sequence of two or more of these 1039 * instructions will have any effect on the instruction following the last one 1040 * of them. */ 1041 #define CPUMCTX_INHIBIT_SHADOW_SS UINT8_C(0x01) 1042 /** Interrupt shadow following STI. 1043 * Same as CPUMCTX_INHIBIT_SHADOW_SS but without blocking any debug exceptions. */ 1044 #define CPUMCTX_INHIBIT_SHADOW_STI UINT8_C(0x02) 1045 /** Mask combining STI and SS shadowing. */ 1046 #define CPUMCTX_INHIBIT_SHADOW (CPUMCTX_INHIBIT_SHADOW_SS | CPUMCTX_INHIBIT_SHADOW_STI) 1047 1048 /** Interrupts blocked by NMI delivery. This condition is cleared by IRET. 1049 * 1050 * Section "6.7 NONMASKABLE INTERRUPT (NMI)" in Intel SDM Vol 3A states that 1051 * "The processor also invokes certain hardware conditions to ensure that no 1052 * other interrupts, including NMI interrupts, are received until the NMI 1053 * handler has completed executing." This flag indicates that these 1054 * conditions are currently active. */ 1055 #define CPUMCTX_INHIBIT_NMI UINT8_C(0x04) 1056 /** @} */ 1057 1058 1023 1059 /** 1024 1060 * Additional guest MSRs (i.e. not part of the CPU context structure). -
trunk/include/VBox/vmm/em.h
r96999 r97178 138 138 /** @} */ 139 139 140 VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC);141 VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu);142 VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu);143 140 VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled); 144 141 VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu); -
trunk/include/VBox/vmm/vm.h
r96811 r97178 490 490 /** The bit number for VMCPU_FF_DBGF. */ 491 491 #define VMCPU_FF_DBGF_BIT 10 492 /** This action forces the VM to service any pending updates to CR3 (used only493 * by HM). */494 492 /** Hardware virtualized nested-guest interrupt pending. */ 495 493 #define VMCPU_FF_INTERRUPT_NESTED_GUEST RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT) … … 523 521 /* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */ 524 522 /* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */ 525 /** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */ 526 #define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_64(VMCPU_FF_INHIBIT_INTERRUPTS_BIT) 527 #define VMCPU_FF_INHIBIT_INTERRUPTS_BIT 24 528 /** Block injection of non-maskable interrupts to the guest. */ 529 #define VMCPU_FF_BLOCK_NMIS RT_BIT_64(VMCPU_FF_BLOCK_NMIS_BIT) 530 #define VMCPU_FF_BLOCK_NMIS_BIT 25 523 /* 24 used to be VMCPU_FF_INHIBIT_INTERRUPTS, which moved to CPUMCTX::fInhibit in v7.0.4. */ 524 /* 25 used to be VMCPU_FF_BLOCK_NMIS, which moved to CPUMCTX::fInhibit in v7.0.4. */ 531 525 /** Force return to Ring-3. */ 532 526 #define VMCPU_FF_TO_R3 RT_BIT_64(VMCPU_FF_TO_R3_BIT) … … 576 570 /** High priority VMCPU pre-execution actions. */ 577 571 #define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \ 578 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_ INHIBIT_INTERRUPTS | VMCPU_FF_DBGF \572 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_DBGF \ 579 573 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \ 580 574 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \ … … 584 578 #define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY ) 585 579 /** High priority VMCPU pre raw-mode execution mask. */ 586 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \ 587 | VMCPU_FF_INHIBIT_INTERRUPTS ) 580 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL ) 588 581 589 582 /** High priority post-execution actions. */ -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r97096 r97178 1943 1943 * or raw-mode). Hence we use the function below which handles the details. 1944 1944 */ 1945 if ( CPUMIsGuestPhysIntrEnabled(pVCpu) 1946 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS)) 1945 if ( pVCpu->cpum.s.Guest.fInhibit == 0 1946 || ( !(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI) 1947 && pVCpu->cpum.s.Guest.uRipInhibitInt != pVCpu->cpum.s.Guest.rip)) 1947 1948 { 1948 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest) 1949 || CPUMIsGuestVirtIntrEnabled(pVCpu)) 1950 return CPUMINTERRUPTIBILITY_UNRESTRAINED; 1951 1952 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */ 1953 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED; 1949 /** @todo OPT: this next call should be inlined! */ 1950 if (CPUMIsGuestPhysIntrEnabled(pVCpu)) 1951 { 1952 /** @todo OPT: type this out as it repeats tests. */ 1953 if ( !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest) 1954 || CPUMIsGuestVirtIntrEnabled(pVCpu)) 1955 return CPUMINTERRUPTIBILITY_UNRESTRAINED; 1956 1957 /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */ 1958 return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED; 1959 } 1960 return CPUMINTERRUPTIBILITY_INT_DISABLED; 1954 1961 } 1955 1962 … … 1962 1969 * See Intel spec. 25.4.1 "Event Blocking". 1963 1970 */ 1964 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1965 return CPUMINTERRUPTIBILITY_NMI_INHIBIT; 1966 1967 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1971 /** @todo r=bird: The above comment mixes up VMX root-mode and non-root. Section 1972 * 25.4.1 is only applicable to VMX non-root mode. In root mode / 1973 * non-VMX mode, I have not see any evidence in the intel manuals that 1974 * NMIs are not blocked when in an interrupt shadow. Section "6.7 1975 * NONMASKABLE INTERRUPT (NMI)" in SDM 3A seems pretty clear to me. 1976 */ 1977 if (!(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI)) 1968 1978 return CPUMINTERRUPTIBILITY_INT_INHIBITED; 1969 1970 return CPUMINTERRUPTIBILITY_INT_DISABLED; 1979 return CPUMINTERRUPTIBILITY_NMI_INHIBIT; 1971 1980 } 1972 1981 return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT; … … 2002 2011 } 2003 2012 #endif 2004 }2005 2006 2007 /**2008 * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.2009 *2010 * @returns @c true if NMIs are blocked, @c false otherwise.2011 * @param pVCpu The cross context virtual CPU structure.2012 */2013 VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)2014 {2015 /*2016 * Return the state of guest-NMI blocking in any of the following cases:2017 * - We're not executing a nested-guest.2018 * - We're executing an SVM nested-guest[1].2019 * - We're executing a VMX nested-guest without virtual-NMIs enabled.2020 *2021 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.2022 * SVM hypervisors must track NMI blocking themselves by intercepting2023 * the IRET instruction after injection of an NMI.2024 */2025 if ( !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)2026 || !CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.s.Guest, VMX_PIN_CTLS_VIRT_NMI))2027 return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);2028 2029 /*2030 * Return the state of virtual-NMI blocking, if we are executing a2031 * VMX nested-guest with virtual-NMIs enabled.2032 */2033 return CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.s.Guest);2034 }2035 2036 2037 /**2038 * Sets blocking delivery of NMIs to the guest.2039 *2040 * @param pVCpu The cross context virtual CPU structure.2041 * @param fBlock Whether NMIs are blocked or not.2042 */2043 VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)2044 {2045 /*2046 * Set the state of guest-NMI blocking in any of the following cases:2047 * - We're not executing a nested-guest.2048 * - We're executing an SVM nested-guest[1].2049 * - We're executing a VMX nested-guest without virtual-NMIs enabled.2050 *2051 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.2052 * SVM hypervisors must track NMI blocking themselves by intercepting2053 * the IRET instruction after injection of an NMI.2054 */2055 if ( !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)2056 || !CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.s.Guest, VMX_PIN_CTLS_VIRT_NMI))2057 {2058 if (fBlock == VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))2059 { /* probably likely */ }2060 else if (fBlock)2061 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);2062 else2063 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);2064 return;2065 }2066 2067 /*2068 * Set the state of virtual-NMI blocking, if we are executing a2069 * VMX nested-guest with virtual-NMIs enabled.2070 */2071 return CPUMSetGuestVmxVirtNmiBlocking(&pVCpu->cpum.s.Guest, fBlock);2072 2013 } 2073 2014 -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r96407 r97178 77 77 Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED); 78 78 pVCpu->em.s.enmState = enmNewState; 79 }80 81 82 /**83 * Sets the PC for which interrupts should be inhibited.84 *85 * @param pVCpu The cross context virtual CPU structure.86 * @param PC The PC.87 */88 VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)89 {90 pVCpu->em.s.GCPtrInhibitInterrupts = PC;91 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);92 }93 94 95 /**96 * Gets the PC for which interrupts should be inhibited.97 *98 * There are a few instructions which inhibits or delays interrupts99 * for the instruction following them. These instructions are:100 * - STI101 * - MOV SS, r/m16102 * - POP SS103 *104 * @returns The PC for which interrupts should be inhibited.105 * @param pVCpu The cross context virtual CPU structure.106 *107 */108 VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)109 {110 return pVCpu->em.s.GCPtrInhibitInterrupts;111 }112 113 114 /**115 * Checks if interrupt inhibiting is enabled for the current instruction.116 *117 * @returns true if interrupts are inhibited, false if not.118 * @param pVCpu The cross context virtual CPU structure.119 */120 VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu)121 {122 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))123 return false;124 if (pVCpu->em.s.GCPtrInhibitInterrupts == CPUMGetGuestRIP(pVCpu))125 return true;126 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);127 return false;128 79 } 129 80 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r97156 r97178 3660 3660 * Normally, NMI blocking is in effect whenever we inject an NMI. 3661 3661 */ 3662 bool fBlockNmi; 3663 if ( u8Vector == X86_XCPT_NMI 3664 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)) 3665 fBlockNmi = true; 3666 else 3667 fBlockNmi = false; 3662 bool fBlockNmi = u8Vector == X86_XCPT_NMI 3663 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT); 3668 3664 3669 3665 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 3710 3706 * Set NMI blocking if necessary. 3711 3707 */ 3712 if ( fBlockNmi 3713 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 3714 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3708 if (fBlockNmi) 3709 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx); 3715 3710 3716 3711 /* … … 9604 9599 */ 9605 9600 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW) 9606 && ! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)9601 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) 9607 9602 && !TRPMHasTrap(pVCpu)) 9608 9603 { … … 9626 9621 { 9627 9622 rcStrict = iemVmxApicWriteEmulation(pVCpu); 9628 Assert(! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));9623 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)); 9629 9624 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE)); 9630 9625 } … … 9633 9628 { 9634 9629 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */); 9635 Assert(! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));9630 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)); 9636 9631 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF)); 9637 9632 } … … 9714 9709 if ( fExecuteInhibit 9715 9710 && rcStrict == VINF_SUCCESS 9716 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 9717 && EMIsInhibitInterruptsActive(pVCpu)) 9711 && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) 9718 9712 { 9719 9713 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock); … … 9759 9753 else if (pVCpu->iem.s.cActiveMappings > 0) 9760 9754 iemMemRollback(pVCpu); 9761 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */ 9755 /** @todo drop this after we bake this change into RIP advancing. */ 9756 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */ 9762 9757 } 9763 9758 … … 10017 10012 if ( fIntrEnabled 10018 10013 && TRPMHasTrap(pVCpu) 10019 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)10014 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) 10020 10015 { 10021 10016 uint8_t u8TrapNo; … … 10102 10097 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 10103 10098 | VMCPU_FF_TLB_FLUSH 10104 | VMCPU_FF_INHIBIT_INTERRUPTS10105 | VMCPU_FF_BLOCK_NMIS10106 10099 | VMCPU_FF_UNHALT ); 10107 10100 … … 10284 10277 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 10285 10278 | VMCPU_FF_TLB_FLUSH 10286 | VMCPU_FF_INHIBIT_INTERRUPTS10287 | VMCPU_FF_BLOCK_NMIS10288 10279 | VMCPU_FF_UNHALT ); 10289 10280 if (RT_LIKELY( ( ( !fCpu -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r97126 r97178 3864 3864 IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize) 3865 3865 { 3866 bool fBlockingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);3866 bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx); 3867 3867 3868 3868 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 3913 3913 */ 3914 3914 if (fBlockingNmi) 3915 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);3915 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx); 3916 3916 3917 3917 /* … … 4703 4703 { 4704 4704 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel); 4705 if (rcStrict == VINF_SUCCESS) 4706 { 4707 if (iSegReg == X86_SREG_SS) 4708 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 4709 } 4705 if (iSegReg == X86_SREG_SS && rcStrict == VINF_SUCCESS) 4706 CPUMSetInInterruptShadow(&pVCpu->cpum.GstCtx); 4710 4707 return rcStrict; 4711 4708 } … … 4765 4762 pVCpu->cpum.GstCtx.rsp = TmpRsp.u; 4766 4763 if (iSegReg == X86_SREG_SS) 4767 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);4764 CPUMSetInInterruptShadow(&pVCpu->cpum.GstCtx); 4768 4765 } 4769 4766 return rcStrict; … … 7515 7512 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7516 7513 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) 7517 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);7514 CPUMSetInInterruptShadow(&pVCpu->cpum.GstCtx); 7518 7515 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl)); 7519 7516 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp
r96821 r97178 245 245 pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR; /* V_TPR. */ 246 246 247 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadow. */ 248 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip) 247 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx)) /* Interrupt shadow. */ 248 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0; 249 else 249 250 { 250 251 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1; 251 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);252 252 LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pVCpu->cpum.GstCtx.rip)); 253 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 253 254 } 254 else255 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;256 255 257 256 /* … … 323 322 324 323 /* 325 * Restore the subset of force-flags that were preserved. 326 */ 327 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions) 328 { 329 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions); 330 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0; 331 } 324 * Restore the subset of the inhibit flags that were preserved. 325 */ 326 pVCpu->cpum.GstCtx.fInhibit |= pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit; 332 327 333 328 if (rcStrict == VINF_SUCCESS) … … 734 729 * preserve VMCPU_FF_INHIBIT_INTERRUPTS. 735 730 */ 736 pVCpu->cpum.GstCtx.hwvirt.f LocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;737 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);731 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = pVCpu->cpum.GstCtx.fInhibit & CPUMCTX_INHIBIT_NMI; 732 pVCpu->cpum.GstCtx.fInhibit &= ~CPUMCTX_INHIBIT_NMI; 738 733 739 734 /* … … 754 749 LogFlow(("iemSvmVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP)); 755 750 /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */ 756 EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGst->u64RIP);751 CPUMSetInInterruptShadowEx(&pVCpu->cpum.GstCtx, pVmcbNstGst->u64RIP); 757 752 } 758 753 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp
r97157 r97178 1188 1188 { 1189 1189 /* We shouldn't be called multiple times during VM-entry. */ 1190 Assert(pVCpu->cpum.GstCtx.hwvirt.f LocalForcedActions== 0);1190 Assert(pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit == 0); 1191 1191 1192 1192 /* MTF should not be set outside VMX non-root mode. */ … … 1221 1221 * the nested-guest. 1222 1222 */ 1223 pVCpu->cpum.GstCtx.hwvirt.f LocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;1223 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = pVCpu->cpum.GstCtx.fInhibit & CPUMCTX_INHIBIT_NMI; 1224 1224 } 1225 1225 … … 1232 1232 static void iemVmxVmexitRestoreNmiBlockingFF(PVMCPUCC pVCpu) RT_NOEXCEPT 1233 1233 { 1234 if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)1235 {1236 VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);1237 pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;1238 }1234 /** @todo r=bird: why aren't we clearing the nested guest flags first here? 1235 * If there is some other code doing that already, it would be great 1236 * to point to it here... */ 1237 pVCpu->cpum.GstCtx.fInhibit |= pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit; 1238 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = 0; 1239 1239 } 1240 1240 … … 1466 1466 else 1467 1467 { 1468 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))1468 if (CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 1469 1469 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 1470 1470 } 1471 1471 1472 1472 /* Blocking-by-STI. */ 1473 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1474 && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu)) 1473 if (!(pVCpu->cpum.GstCtx.fInhibit & CPUMCTX_INHIBIT_SHADOW)) 1474 { /* probable */} 1475 else 1475 1476 { 1476 1477 /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI 1477 1478 * currently. */ 1478 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; 1479 if (pVCpu->cpum.GstCtx.rip == pVCpu->cpum.GstCtx.uRipInhibitInt) 1480 pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; /** @todo r=bird: Why the STI one? MOVSS seems to block more and the one to use. */ 1479 1481 1480 1482 /* Clear inhibition unconditionally since we've ensured it isn't set prior to executing VMLAUNCH/VMRESUME. */ 1481 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);1483 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 1482 1484 } 1483 1485 /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */ … … 2564 2566 2565 2567 /* Clear any saved NMI-blocking state so we don't assert on next VM-entry (if it was in effect on the previous one). */ 2566 pVCpu->cpum.GstCtx.hwvirt.f LocalForcedActions &= ~VMCPU_FF_BLOCK_NMIS;2568 pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit &= ~CPUMCTX_INHIBIT_NMI; 2567 2569 } 2568 2570 else … … 6996 6998 if ( !fEntryVectoring 6997 6999 && (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))) 6998 EMSetInhibitInterruptsPC(pVCpu, pVmcs->u64GuestRip.u);7000 CPUMSetInInterruptShadowEx(&pVCpu->cpum.GstCtx, pVmcs->u64GuestRip.u); 6999 7001 else 7000 Assert(! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));7002 Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)); 7001 7003 7002 7004 /* NMI blocking. */ … … 7008 7010 { 7009 7011 pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false; 7010 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 7011 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 7012 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx); 7012 7013 } 7013 7014 } … … 7729 7730 /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we 7730 7731 * use block-by-STI here which is not quite correct. */ 7731 if ( !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 7732 || pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 7732 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx)) 7733 7733 { /* likely */ } 7734 7734 else -
trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h
r96407 r97178 368 368 { 369 369 ADD_REG64(WHvRegisterInterruptState, 0); 370 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 371 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip) 370 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) 372 371 aValues[iReg - 1].InterruptState.InterruptShadow = 1; 373 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 374 aValues[iReg - 1].InterruptState.NmiMasked = 1; 372 aValues[iReg - 1].InterruptState.NmiMasked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx); 375 373 } 376 374 else if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT) 377 375 { 378 376 if ( pVCpu->nem.s.fLastInterruptShadow 379 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 380 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)) 377 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) 381 378 { 382 379 ADD_REG64(WHvRegisterInterruptState, 0); 383 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 384 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip) 380 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) 385 381 aValues[iReg - 1].InterruptState.InterruptShadow = 1; 386 382 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */ … … 991 987 992 988 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT)) 993 { 994 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow; 995 if (aValues[iReg].InterruptState.InterruptShadow) 996 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64); 997 else 998 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 999 } 989 pVCpu->nem.s.fLastInterruptShadow = CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx, 990 aValues[iReg].InterruptState.InterruptShadow, 991 aValues[iReg + 1].Reg64); 1000 992 1001 993 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI)) 1002 { 1003 if (aValues[iReg].InterruptState.NmiMasked) 1004 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 1005 else 1006 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 1007 } 994 CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, aValues[iReg].InterruptState.NmiMasked); 1008 995 1009 996 fWhat |= CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI; … … 1279 1266 pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength; 1280 1267 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0; 1281 1282 /* Update interrupt inhibition. */ 1283 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1284 { /* likely */ } 1285 else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 1286 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1268 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 1287 1269 } 1288 1270 … … 1471 1453 Assert( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT)) 1472 1454 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT)); 1455 1473 1456 NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs); 1474 1457 pVCpu->cpum.GstCtx.rip = pExitCtx->Rip; 1475 1458 pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags; 1476 1477 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow; 1478 if (!pExitCtx->ExecutionState.InterruptShadow) 1479 { 1480 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1481 { /* likely */ } 1482 else 1483 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1484 } 1485 else 1486 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip); 1487 1459 pVCpu->nem.s.fLastInterruptShadow = CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx, 1460 pExitCtx->ExecutionState.InterruptShadow, 1461 pExitCtx->Rip); 1488 1462 APICSetTpr(pVCpu, pExitCtx->Cr8 << 4); 1489 1463 … … 2081 2055 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2082 2056 pCtx->rip += cb; 2083 2084 /* Update interrupt shadow. */ 2085 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 2086 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 2087 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2057 /** @todo Why not clear RF too? */ 2058 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 2088 2059 } 2089 2060 … … 2450 2421 return rcStrict; 2451 2422 } 2452 bool const fInhibitInterrupts = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)2453 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;2454 2423 2455 2424 /* … … 2458 2427 if (fPendingNmi) 2459 2428 { 2460 if ( ! fInhibitInterrupts2461 && ! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))2429 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) 2430 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 2462 2431 { 2463 2432 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI"); … … 2479 2448 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) 2480 2449 { 2481 if ( !fInhibitInterrupts 2450 /** @todo check NMI inhibiting here too! */ 2451 if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) 2482 2452 && pVCpu->cpum.GstCtx.rflags.Bits.u1IF) 2483 2453 { -
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r97159 r97178 1698 1698 static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu) 1699 1699 { 1700 uint32_t fIntrState = 0; 1701 1700 1702 /* 1701 1703 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS. 1702 1704 */ 1703 uint32_t fIntrState = 0; 1704 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1705 if (CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx)) 1705 1706 { 1706 1707 /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */ 1707 1708 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); 1708 1709 1709 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1710 if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu)) 1711 { 1712 if (pCtx->eflags.Bits.u1IF) 1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; 1714 else 1715 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS; 1716 } 1717 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1718 { 1719 /* 1720 * We can clear the inhibit force flag as even if we go back to the recompiler 1721 * without executing guest code in VT-x, the flag's condition to be cleared is 1722 * met and thus the cleared state is correct. 1723 */ 1724 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1725 } 1710 /** @todo r=bird: This heuristic isn't all that correct, it would be safer 1711 * to always use MOVSS here. Best deal would be to track both bits in CPUM. */ 1712 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF) 1713 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; 1714 else 1715 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS; 1726 1716 } 1727 1717 … … 1729 1719 * Check if we should inhibit NMI delivery. 1730 1720 */ 1731 if (CPUM IsGuestNmiBlocking(pVCpu))1721 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx)) 1732 1722 fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI; 1733 1723 … … 3380 3370 3381 3371 /** 3372 * Worker for vmxHCImportGuestIntrState that handles the case where any of the 3373 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set. 3374 */ 3375 DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState) 3376 { 3377 /* 3378 * We must import RIP here to set our EM interrupt-inhibited state. 3379 * We also import RFLAGS as our code that evaluates pending interrupts 3380 * before VM-entry requires it. 3381 */ 3382 vmxHCImportGuestRip(pVCpu); 3383 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo); 3384 3385 CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx, 3386 RT_BOOL(fGstIntState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)), 3387 pVCpu->cpum.GstCtx.rip); 3388 CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI)); 3389 } 3390 3391 3392 /** 3382 3393 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU 3383 3394 * context. … … 3399 3410 if (!u32Val) 3400 3411 { 3401 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3402 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 3403 /** @todo r=bird: This is a call which isn't necessary most of the time, this 3404 * path is taken on basically all exits. Try find a way to eliminating it. */ 3405 CPUMSetGuestNmiBlocking(pVCpu, false); 3412 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 3413 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx); 3406 3414 } 3407 3415 else 3408 { 3409 /** @todo consider this branch for non-inlining. */ 3410 /* 3411 * We must import RIP here to set our EM interrupt-inhibited state. 3412 * We also import RFLAGS as our code that evaluates pending interrupts 3413 * before VM-entry requires it. 3414 */ 3415 vmxHCImportGuestRip(pVCpu); 3416 vmxHCImportGuestRFlags(pVCpu, pVmcsInfo); 3417 3418 if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)) 3419 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 3420 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3421 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 3422 3423 bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI); 3424 CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking); 3425 } 3416 vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val); 3426 3417 } 3427 3418 … … 4927 4918 */ 4928 4919 if ( !VCPU_2_VMXSTATE(pVCpu).Event.fPending 4929 && ! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))4920 && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx)) 4930 4921 { 4931 4922 /** @todo SMI. SMIs take priority over NMIs. */ … … 4947 4938 * the nested-hypervisor is using virtual-NMIs. 4948 4939 */ 4949 if (! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))4940 if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 4950 4941 { 4951 4942 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 4961 4952 return VINF_SUCCESS; 4962 4953 } 4963 elseif (!fIsNestedGuest)4954 if (!fIsNestedGuest) 4964 4955 vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo); 4965 4956 } … … 6163 6154 pVCpu->cpum.GstCtx.rip += cbInstr; 6164 6155 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP); 6165 6166 /* Update interrupt inhibition. */ 6167 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 6168 && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu)) 6169 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6156 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 6157 /** @todo clear RF? */ 6170 6158 } 6171 6159 … … 6291 6279 && enmRaise == IEMXCPTRAISE_PREV_EVENT 6292 6280 && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 6293 && CPUMIsGuestNmiBlocking(pVCpu)) 6294 { 6295 CPUMSetGuestNmiBlocking(pVCpu, false); 6296 } 6281 && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx)) 6282 CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx); 6297 6283 6298 6284 switch (enmRaise) … … 6376 6362 } 6377 6363 else if ( (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI) 6378 && !CPUM IsGuestNmiBlocking(pVCpu))6364 && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx)) 6379 6365 { 6380 6366 if ( VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo) … … 6389 6375 * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception". 6390 6376 */ 6391 CPUMSet GuestNmiBlocking(pVCpu, true);6377 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx); 6392 6378 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason)); 6393 6379 } … … 6406 6392 if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual)) 6407 6393 { 6408 CPUMSet GuestNmiBlocking(pVCpu, true);6394 CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx); 6409 6395 Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason)); 6410 6396 } … … 7654 7640 } 7655 7641 7656 Assert(!CPUM IsGuestNmiBlocking(pVCpu));7642 Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx)); 7657 7643 7658 7644 /* … … 7666 7652 if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI) 7667 7653 { 7668 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 7669 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 7654 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 7670 7655 7671 7656 fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r96811 r97178 2692 2692 2693 2693 if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT) 2694 { 2695 if (pVmcbCtrl->IntShadow.n.u1IntShadow) 2696 EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP); 2697 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2698 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2699 } 2694 CPUMUpdateInterruptShadowEx(pCtx, pVmcbCtrl->IntShadow.n.u1IntShadow, pVmcbGuest->u64RIP); 2700 2695 2701 2696 if (fWhat & CPUMCTX_EXTRN_RIP) … … 3504 3499 3505 3500 /** 3506 * Checks if the guest (or nested-guest) has an interrupt shadow active right3507 * now.3508 *3509 * @returns @c true if the interrupt shadow is active, @c false otherwise.3510 * @param pVCpu The cross context virtual CPU structure.3511 *3512 * @remarks No-long-jump zone!!!3513 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.3514 */3515 static bool hmR0SvmIsIntrShadowActive(PVMCPUCC pVCpu)3516 {3517 /*3518 * Instructions like STI and MOV SS inhibit interrupts till the next instruction3519 * completes. Check if we should inhibit interrupts or clear any existing3520 * interrupt inhibition.3521 */3522 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))3523 {3524 if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))3525 {3526 /*3527 * We can clear the inhibit force flag as even if we go back to the recompiler3528 * without executing guest code in AMD-V, the flag's condition to be cleared is3529 * met and thus the cleared state is correct.3530 */3531 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);3532 return false;3533 }3534 return true;3535 }3536 return false;3537 }3538 3539 3540 /**3541 3501 * Sets the virtual interrupt intercept control in the VMCB. 3542 3502 * … … 3612 3572 3613 3573 bool const fGif = CPUMGetGuestGif(pCtx); 3614 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);3615 bool const fBlockNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);3574 bool const fIntShadow = CPUMIsInInterruptShadowWithUpdate(pCtx); 3575 bool const fBlockNmi = CPUMAreInterruptsInhibitedByNmi(pCtx); 3616 3576 3617 3577 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n", … … 3734 3694 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 3735 3695 3736 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);3696 bool const fIntShadow = CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx); 3737 3697 #ifdef VBOX_STRICT 3738 3698 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; … … 3782 3742 * VM-exit to determine the state. 3783 3743 */ 3784 if ( Event.n.u3Type == SVM_EVENT_NMI 3785 && Event.n.u8Vector == X86_XCPT_NMI 3786 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 3787 { 3788 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 3789 } 3744 if ( Event.n.u3Type == SVM_EVENT_NMI 3745 && Event.n.u8Vector == X86_XCPT_NMI) 3746 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx); 3790 3747 3791 3748 /* … … 3809 3766 * but we still need to intercept IRET in order to eventually clear NMI inhibition. 3810 3767 */ 3811 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))3768 if (CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 3812 3769 hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET); 3813 3770 … … 6711 6668 /* If we are re-injecting an NMI, clear NMI blocking. */ 6712 6669 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI) 6713 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);6670 CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx); 6714 6671 6715 6672 /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */ … … 6802 6759 DECLINLINE(void) hmR0SvmAdvanceRip(PVMCPUCC pVCpu, uint32_t cb) 6803 6760 { 6804 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6805 pCtx->rip += cb; 6806 6807 /* Update interrupt shadow. */ 6808 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 6809 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 6810 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6761 pVCpu->cpum.GstCtx.rip += cb; 6762 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 6763 /** @todo clear RF. */ 6811 6764 } 6812 6765 -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r97150 r97178 158 158 */ 159 159 #define CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID RT_BIT(12) 160 161 162 /** For saved state only: Block injection of non-maskable interrupts to the guest. 163 * @note This flag was moved to CPUMCTX::fInhibit in v7.0.2. */ 164 #define CPUM_OLD_VMCPU_FF_BLOCK_NMIS RT_BIT_64(25) 160 165 161 166 … … 2541 2546 SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap)); 2542 2547 SSMR3PutMem(pSSM, &pGstCtx->hwvirt.svm.abIoBitmap[0], sizeof(pGstCtx->hwvirt.svm.abIoBitmap)); 2543 SSMR3PutU32(pSSM, pGstCtx->hwvirt.fLocalForcedActions); 2548 /* This is saved in the old VMCPUM_FF format. Change if more flags are added. */ 2549 SSMR3PutU32(pSSM, pGstCtx->hwvirt.fSavedInhibit & CPUMCTX_INHIBIT_NMI ? CPUM_OLD_VMCPU_FF_BLOCK_NMIS : 0); 2544 2550 SSMR3PutBool(pSSM, pGstCtx->hwvirt.fGif); 2545 2551 } … … 2827 2833 SSMR3GetMem(pSSM, &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap)); 2828 2834 SSMR3GetMem(pSSM, &pGstCtx->hwvirt.svm.abIoBitmap[0], sizeof(pGstCtx->hwvirt.svm.abIoBitmap)); 2829 SSMR3GetU32(pSSM, &pGstCtx->hwvirt.fLocalForcedActions); 2835 2836 uint32_t fSavedLocalFFs = 0; 2837 rc = SSMR3GetU32(pSSM, &fSavedLocalFFs); 2838 AssertRCReturn(rc, rc); 2839 Assert(fSavedLocalFFs == 0 || fSavedLocalFFs == CPUM_OLD_VMCPU_FF_BLOCK_NMIS); 2840 pGstCtx->hwvirt.fSavedInhibit = fSavedLocalFFs & CPUM_OLD_VMCPU_FF_BLOCK_NMIS ? CPUMCTX_INHIBIT_NMI : 0; 2841 2830 2842 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.fGif); 2831 2843 } … … 4000 4012 4001 4013 pHlp->pfnPrintf(pHlp, "VCPU[%u] hardware virtualization state:\n", pVCpu->idCpu); 4002 pHlp->pfnPrintf(pHlp, "f LocalForcedActions = %#RX32\n", pCtx->hwvirt.fLocalForcedActions);4014 pHlp->pfnPrintf(pHlp, "fSavedInhibit = %#RX32\n", pCtx->hwvirt.fSavedInhibit); 4003 4015 pHlp->pfnPrintf(pHlp, "In nested-guest hwvirt mode = %RTbool\n", CPUMIsGuestInNestedHwvirtMode(pCtx)); 4004 4016 -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r96894 r97178 47 47 *********************************************************************************************************************************/ 48 48 #define LOG_GROUP LOG_GROUP_EM 49 #define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */49 #define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */ 50 50 #include <VBox/vmm/em.h> 51 51 #include <VBox/vmm/vmm.h> … … 1721 1721 * The instruction following an emulated STI should *always* be executed! 1722 1722 * 1723 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTShere if1723 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if 1724 1724 * the eip is the same as the inhibited instr address. Before we 1725 1725 * are able to execute this instruction in raw mode (iret to … … 1729 1729 * unlikely, but such timing sensitive problem are not as rare as 1730 1730 * you might think. 1731 */ 1732 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 1731 * 1732 * Note! This used to be a force action flag. Can probably ditch this code. 1733 */ 1734 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) 1733 1735 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)) 1734 1736 { 1735 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP );1736 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))1737 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT); 1738 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt) 1737 1739 { 1738 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu))); 1739 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1740 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); 1741 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n", 1742 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt)); 1740 1743 } 1741 1744 else 1742 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTSset at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));1745 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu))); 1743 1746 } 1744 1747 … … 1792 1795 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))); 1793 1796 bool fWakeupPending = false; 1794 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) 1797 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW 1798 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST 1799 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 1800 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) 1795 1801 && (!rc || rc >= VINF_EM_RESCHEDULE_HM) 1796 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */ 1797 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */ 1798 { 1799 bool fInVmxNonRootMode; 1800 bool fInSvmHwvirtMode; 1801 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx); 1802 if (fInNestedGuest) 1802 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */ 1803 /** @todo r=bird: But interrupt shadows probably do not block vmexits due to host interrupts... */ 1804 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */ 1805 { 1806 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx)) 1803 1807 { 1804 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx); 1805 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx); 1806 } 1807 else 1808 { 1809 fInVmxNonRootMode = false; 1810 fInSvmHwvirtMode = false; 1811 } 1812 1813 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx); 1814 if (fGif) 1815 { 1808 bool fInVmxNonRootMode; 1809 bool fInSvmHwvirtMode; 1810 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx)) 1811 { 1812 fInVmxNonRootMode = false; 1813 fInSvmHwvirtMode = false; 1814 } 1815 else 1816 { 1817 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx); 1818 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx); 1819 } 1820 1821 if (0) 1822 { } 1816 1823 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1817 1824 /* … … 1824 1831 * See Intel spec. 26.7.6 "NMI-Window Exiting". 1825 1832 */ 1826 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)1827 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))1833 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW) 1834 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx)) 1828 1835 { 1829 1836 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT)); … … 1835 1842 UPDATE_RC(); 1836 1843 } 1837 else1838 1844 #endif 1839 1845 /* 1840 1846 * NMIs (take priority over external interrupts). 1841 1847 */ 1842 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)1843 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))1848 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI) 1849 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)) 1844 1850 { 1845 1851 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 1974 1980 } 1975 1981 } 1976 } 1982 } /* CPUMGetGuestGif */ 1977 1983 } 1978 1984 … … 2080 2086 /* check that we got them all */ 2081 2087 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)); 2082 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_ INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));2088 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)); 2083 2089 } 2084 2090 -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp
r96407 r97178 1476 1476 pVCpu->cpum.GstCtx.rip = pRun->s.regs.regs.rip; 1477 1477 1478 if (KvmEvents.interrupt.shadow) 1479 EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip); 1480 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1481 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1482 1483 if (KvmEvents.nmi.masked) 1484 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 1485 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1486 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 1478 CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx, KvmEvents.interrupt.shadow != 0, pVCpu->cpum.GstCtx.rip); 1479 CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, KvmEvents.nmi.masked != 0); 1487 1480 1488 1481 if (KvmEvents.interrupt.injected) … … 1869 1862 1870 1863 KvmEvents.flags = KVM_VCPUEVENT_VALID_SHADOW; 1871 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1872 { 1873 if (pRun->s.regs.regs.rip == EMGetInhibitInterruptsPC(pVCpu)) 1874 KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI; 1875 else 1876 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1877 } 1864 if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx)) 1865 { /* probably likely */ } 1866 else 1867 KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI; 1878 1868 1879 1869 /* No flag - this is updated unconditionally. */ 1880 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 1881 KvmEvents.nmi.masked = 1; 1870 KvmEvents.nmi.masked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx); 1882 1871 1883 1872 if (TRPMHasTrap(pVCpu)) … … 2059 2048 KvmEvents.flags |= KVM_VCPUEVENT_VALID_SHADOW; 2060 2049 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT)) 2061 { 2062 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2063 KvmEvents.interrupt.shadow = 0; 2064 else if (EMGetInhibitInterruptsPC(pVCpu) == pRun->s.regs.regs.rip) 2065 KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI; 2066 else 2067 { 2068 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2069 KvmEvents.interrupt.shadow = 0; 2070 } 2071 } 2072 else if (KvmEvents.interrupt.shadow) 2073 EMSetInhibitInterruptsPC(pVCpu, pRun->s.regs.regs.rip); 2074 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 2075 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 2050 KvmEvents.interrupt.shadow = !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx) ? 0 2051 : KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI; 2052 else 2053 CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx, KvmEvents.interrupt.shadow != 0, pRun->s.regs.regs.rip); 2076 2054 2077 2055 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI)) 2078 KvmEvents.nmi.masked = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) ? 1 : 0; 2079 else if (KvmEvents.nmi.masked) 2080 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 2081 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) 2082 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 2056 KvmEvents.nmi.masked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx); 2057 else 2058 CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, KvmEvents.nmi.masked != 0); 2083 2059 2084 2060 /* KVM will own the INT + NMI inhibit state soon: */ -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r96407 r97178 368 368 { 369 369 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 370 Assert(! VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));370 Assert(!CPUMIsInInterruptShadow(pCtx)); 371 371 Assert(pfInjected); 372 372 *pfInjected = false; -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r96925 r97178 2553 2553 PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL); 2554 2554 PRINT_FLAG(VMCPU_FF_,TLB_FLUSH); 2555 PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);2556 PRINT_FLAG(VMCPU_FF_,BLOCK_NMIS);2557 2555 PRINT_FLAG(VMCPU_FF_,TO_R3); 2558 2556 PRINT_FLAG(VMCPU_FF_,IOM); … … 2561 2559 else 2562 2560 pHlp->pfnPrintf(pHlp, "\n"); 2563 2564 if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)2565 pHlp->pfnPrintf(pHlp, " intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(pVCpu));2566 2561 2567 2562 /* the groups */ -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r97150 r97178 122 122 ; 123 123 ; Guest context state 124 ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.)125 124 ; 126 125 .Guest resq 0 … … 183 182 .Guest.gs.u32Limit resd 1 184 183 .Guest.gs.Attr resd 1 185 .Guest.eip resq 1186 .Guest.eflags resq 1187 .Guest.cr0 resq 1188 .Guest.cr2 resq 1189 .Guest.cr3 resq 1190 .Guest.cr4 resq 1191 .Guest.dr resq 8192 .Guest.gdtrPadding resw 3193 .Guest.gdtr resw 0194 .Guest.gdtr.cbGdt resw 1195 .Guest.gdtr.pGdt resq 1196 .Guest.idtrPadding resw 3197 .Guest.idtr resw 0198 .Guest.idtr.cbIdt resw 1199 .Guest.idtr.pIdt resq 1200 184 .Guest.ldtr.Sel resw 1 201 185 .Guest.ldtr.PaddingSel resw 1 … … 212 196 .Guest.tr.u32Limit resd 1 213 197 .Guest.tr.Attr resd 1 198 .Guest.eip resq 1 199 .Guest.eflags resq 1 200 .Guest.fInhibit resb 1 201 alignb 8 202 .Guest.uRipInhibitInt resq 1 203 .Guest.cr0 resq 1 204 .Guest.cr2 resq 1 205 .Guest.cr3 resq 1 206 .Guest.cr4 resq 1 207 .Guest.dr resq 8 208 .Guest.gdtrPadding resw 3 209 .Guest.gdtr resw 0 210 .Guest.gdtr.cbGdt resw 1 211 .Guest.gdtr.pGdt resq 1 212 .Guest.idtrPadding resw 3 213 .Guest.idtr resw 0 214 .Guest.idtr.cbIdt resw 1 215 .Guest.idtr.pIdt resq 1 214 216 .Guest.SysEnter.cs resb 8 215 217 .Guest.SysEnter.eip resb 8 … … 222 224 .Guest.msrSFMASK resb 8 223 225 .Guest.msrKERNELGSBASE resb 8 224 .Guest.uMsrPadding0 resb 8225 226 226 227 alignb 8 … … 290 291 .Guest.hwvirt.enmHwvirt resd 1 291 292 .Guest.hwvirt.fGif resb 1 292 alignb 8293 .Guest.hwvirt.f LocalForcedActionsresd 1293 alignb 4 294 .Guest.hwvirt.fSavedInhibit resd 1 294 295 alignb 64 295 296 -
trunk/src/VBox/VMM/include/EMInternal.h
r96407 r97178 165 165 uint32_t cIemThenRemInstructions; 166 166 167 /** Inhibit interrupts for this instruction. Valid only when VM_FF_INHIBIT_INTERRUPTS is set. */168 RTGCUINTPTR GCPtrInhibitInterrupts;169 170 167 /** Start of the current time slice in ms. */ 171 168 uint64_t u64TimeSliceStart; … … 196 193 } MWait; 197 194 195 #if 0 198 196 /** Make sure the jmp_buf is at a 32-byte boundrary. */ 199 uint64_t au64Padding1[3]; 197 uint64_t au64Padding1[4]; 198 #endif 200 199 union 201 200 { -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r97043 r97178 161 161 GEN_CHECK_OFF(CPUMCTX, hwvirt.enmHwvirt); 162 162 GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif); 163 GEN_CHECK_OFF(CPUMCTX, hwvirt.f LocalForcedActions);163 GEN_CHECK_OFF(CPUMCTX, hwvirt.fSavedInhibit); 164 164 /** @todo NSTVMX: add rest of hwvirt fields when code is more 165 165 * finalized. */
Note:
See TracChangeset
for help on using the changeset viewer.