Changeset 97178 in vbox for trunk/include/VBox
- Timestamp:
- Oct 17, 2022 9:06:03 PM (2 years ago)
- Location:
- trunk/include/VBox/vmm
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r97096 r97178 1686 1686 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn))) 1687 1687 1688 /** @def CPUMCTX_ASSERT_NOT_EXTRN 1689 * Macro for asserting that @a a_fNotExtrn are present in @a a_pCtx. 1690 * 1691 * @param a_pCtx The CPU context of the calling EMT. 1692 * @param a_fNotExtrn Mask of CPUMCTX_EXTRN_XXX bits to check. 1693 */ 1694 #define CPUMCTX_ASSERT_NOT_EXTRN(a_pCtx, a_fNotExtrn) \ 1695 AssertMsg(!((a_pCtx)->fExtrn & (a_fNotExtrn)), \ 1696 ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pCtx)->fExtrn, (a_fNotExtrn))) 1697 1688 1698 /** @def CPUM_IMPORT_EXTRN_RET 1689 1699 * Macro for making sure the state specified by @a fExtrnImport is present, … … 1907 1917 pCtx->hwvirt.fGif = fGif; 1908 1918 } 1919 1920 /** 1921 * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS. 1922 * 1923 * This also inhibit NMIs, except perhaps for nested guests. 1924 * 1925 * @returns true if interrupts are inhibited by interrupt shadow, false if not. 1926 * @param pCtx Current guest CPU context. 1927 * @note Requires pCtx->rip to be up to date. 1928 * @note Does not clear fInhibit when CPUMCTX::uRipInhibitInt differs 1929 * from CPUMCTX::rip. 1930 */ 1931 DECLINLINE(bool) CPUMIsInInterruptShadow(PCCPUMCTX pCtx) 1932 { 1933 if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW)) 1934 return false; 1935 1936 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 1937 return pCtx->uRipInhibitInt == pCtx->rip; 1938 } 1939 1940 /** 1941 * Checks if we're in an "interrupt shadow", i.e. after a STI, POPF or MOV SS, 1942 * updating the state if stale. 1943 * 1944 * This also inhibit NMIs, except perhaps for nested guests. 1945 * 1946 * @returns true if interrupts are inhibited by interrupt shadow, false if not. 1947 * @param pCtx Current guest CPU context. 1948 * @note Requires pCtx->rip to be up to date. 1949 */ 1950 DECLINLINE(bool) CPUMIsInInterruptShadowWithUpdate(PCPUMCTX pCtx) 1951 { 1952 if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW)) 1953 return false; 1954 1955 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 1956 if (pCtx->uRipInhibitInt == pCtx->rip) 1957 return true; 1958 1959 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 1960 return false; 1961 } 1962 1963 /** 1964 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction. 1965 * 1966 * @param pCtx Current guest CPU context. 1967 * @note Requires pCtx->rip to be up to date. 1968 */ 1969 DECLINLINE(void) CPUMSetInInterruptShadow(PCPUMCTX pCtx) 1970 { 1971 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 1972 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 1973 pCtx->uRipInhibitInt = pCtx->rip; 1974 } 1975 1976 /** 1977 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction, 1978 * extended version. 1979 * 1980 * @param pCtx Current guest CPU context. 1981 * @param rip The RIP for which it is inhibited. 1982 */ 1983 DECLINLINE(void) CPUMSetInInterruptShadowEx(PCPUMCTX pCtx, uint64_t rip) 1984 { 1985 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 1986 pCtx->uRipInhibitInt = rip; 1987 } 1988 1989 /** 1990 * Clears the "interrupt shadow" flag. 1991 * 1992 * @param pCtx Current guest CPU context. 1993 */ 1994 DECLINLINE(void) CPUMClearInterruptShadow(PCPUMCTX pCtx) 1995 { 1996 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 1997 } 1998 1999 /** 2000 * Update the "interrupt shadow" flag. 2001 * 2002 * @param pCtx Current guest CPU context. 2003 * @param fInhibited The new state. 2004 * @note Requires pCtx->rip to be up to date. 2005 */ 2006 DECLINLINE(void) CPUMUpdateInterruptShadow(PCPUMCTX pCtx, bool fInhibited) 2007 { 2008 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP); 2009 if (!fInhibited) 2010 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 2011 else 2012 { 2013 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 2014 pCtx->uRipInhibitInt = pCtx->rip; 2015 } 2016 } 2017 2018 /** 2019 * Update the "interrupt shadow" flag, extended version. 2020 * 2021 * @returns fInhibited. 2022 * @param pCtx Current guest CPU context. 2023 * @param fInhibited The new state. 2024 * @param rip The RIP for which it is inhibited. 2025 */ 2026 DECLINLINE(bool) CPUMUpdateInterruptShadowEx(PCPUMCTX pCtx, bool fInhibited, uint64_t rip) 2027 { 2028 if (!fInhibited) 2029 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW; 2030 else 2031 { 2032 pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW; 2033 pCtx->uRipInhibitInt = rip; 2034 } 2035 return fInhibited; 2036 } 2037 2038 /* VMX forward declarations used by extended function versions: */ 2039 DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx); 2040 DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls); 2041 DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx); 2042 DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking); 2043 2044 /** 2045 * Checks whether interrupts, include NMIs, are inhibited by pending NMI 2046 * delivery. 2047 * 2048 * This only checks the inhibit mask. 2049 * 2050 * @retval true if interrupts are inhibited by NMI handling. 2051 * @retval false if interrupts are not inhibited by NMI handling. 2052 * @param pCtx Current guest CPU context. 2053 */ 2054 DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmi(PCCPUMCTX pCtx) 2055 { 2056 return (pCtx->fInhibit & CPUMCTX_INHIBIT_NMI) != 0; 2057 } 2058 2059 /** 2060 * Extended version of CPUMAreInterruptsInhibitedByNmi() that takes VMX non-root 2061 * mode into account when check whether interrupts are inhibited by NMI. 2062 * 2063 * @retval true if interrupts are inhibited by NMI handling. 2064 * @retval false if interrupts are not inhibited by NMI handling. 2065 * @param pCtx Current guest CPU context. 2066 */ 2067 DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmiEx(PCCPUMCTX pCtx) 2068 { 2069 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */ 2070 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2071 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2072 return CPUMAreInterruptsInhibitedByNmi(pCtx); 2073 return CPUMIsGuestVmxVirtNmiBlocking(pCtx); 2074 } 2075 2076 /** 2077 * Marks interrupts, include NMIs, as inhibited by pending NMI delivery. 2078 * 2079 * @param pCtx Current guest CPU context. 2080 */ 2081 DECLINLINE(void) CPUMSetInterruptInhibitingByNmi(PCPUMCTX pCtx) 2082 { 2083 pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI; 2084 } 2085 2086 /** 2087 * Extended version of CPUMSetInterruptInhibitingByNmi() that takes VMX non-root 2088 * mode into account when marking interrupts as inhibited by NMI. 2089 * 2090 * @param pVCpu The cross context virtual CPU structure. 2091 */ 2092 DECLINLINE(void) CPUMSetInterruptInhibitingByNmiEx(PCPUMCTX pCtx) 2093 { 2094 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */ 2095 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2096 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2097 CPUMSetInterruptInhibitingByNmi(pCtx); 2098 else 2099 CPUMSetGuestVmxVirtNmiBlocking(pCtx, true); 2100 } 2101 2102 /** 2103 * Marks interrupts, include NMIs, as no longer inhibited by pending NMI 2104 * delivery. 2105 * 2106 * @param pCtx Current guest CPU context. 2107 */ 2108 DECLINLINE(void) CPUMClearInterruptInhibitingByNmi(PCPUMCTX pCtx) 2109 { 2110 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI; 2111 } 2112 2113 /** 2114 * Extended version of CPUMClearInterruptInhibitingByNmi() that takes VMX 2115 * non-root mode into account when doing the updating. 2116 * 2117 * @param pVCpu The cross context virtual CPU structure. 2118 * @param fInhibited The new state. 2119 */ 2120 DECLINLINE(void) CPUMClearInterruptInhibitingByNmiEx(PCPUMCTX pCtx) 2121 { 2122 /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */ 2123 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2124 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2125 CPUMClearInterruptInhibitingByNmi(pCtx); 2126 else 2127 CPUMSetGuestVmxVirtNmiBlocking(pCtx, false); 2128 } 2129 2130 /** 2131 * Update whether interrupts, include NMIs, are inhibited by pending NMI 2132 * delivery. 2133 * 2134 * @param pCtx Current guest CPU context. 2135 * @param fInhibited The new state. 2136 */ 2137 DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmi(PCPUMCTX pCtx, bool fInhibited) 2138 { 2139 if (!fInhibited) 2140 pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI; 2141 else 2142 pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI; 2143 } 2144 2145 /** 2146 * Extended version of CPUMUpdateInterruptInhibitingByNmi() that takes VMX 2147 * non-root mode into account when doing the updating. 2148 * 2149 * @param pVCpu The cross context virtual CPU structure. 2150 * @param fInhibited The new state. 2151 */ 2152 DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmiEx(PCPUMCTX pCtx, bool fInhibited) 2153 { 2154 /* 2155 * Set the state of guest-NMI blocking in any of the following cases: 2156 * - We're not executing a nested-guest. 2157 * - We're executing an SVM nested-guest[1]. 2158 * - We're executing a VMX nested-guest without virtual-NMIs enabled. 2159 * 2160 * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking. 2161 * SVM hypervisors must track NMI blocking themselves by intercepting 2162 * the IRET instruction after injection of an NMI. 2163 */ 2164 if ( !CPUMIsGuestInVmxNonRootMode(pCtx) 2165 || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI)) 2166 CPUMUpdateInterruptInhibitingByNmi(pCtx, fInhibited); 2167 /* 2168 * Set the state of virtual-NMI blocking, if we are executing a 2169 * VMX nested-guest with virtual-NMIs enabled. 2170 */ 2171 else 2172 CPUMSetGuestVmxVirtNmiBlocking(pCtx, fInhibited); 2173 } 2174 1909 2175 1910 2176 /** … … 2767 3033 CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED, 2768 3034 CPUMINTERRUPTIBILITY_INT_DISABLED, 2769 CPUMINTERRUPTIBILITY_INT_INHIBITED, 3035 CPUMINTERRUPTIBILITY_INT_INHIBITED, /**< @todo rename as it inhibits NMIs too. */ 2770 3036 CPUMINTERRUPTIBILITY_NMI_INHIBIT, 2771 3037 CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT, … … 2775 3041 2776 3042 VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu); 2777 VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);2778 VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);2779 3043 2780 3044 /** @name Typical scalable bus frequency values. -
trunk/include/VBox/vmm/cpum.mac
r97150 r97178 150 150 .gs.u32Limit resd 1 151 151 .gs.Attr resd 1 152 153 .ldtr.Sel resw 1 154 .ldtr.PaddingSel resw 1 155 .ldtr.ValidSel resw 1 156 .ldtr.fFlags resw 1 157 .ldtr.u64Base resq 1 158 .ldtr.u32Limit resd 1 159 .ldtr.Attr resd 1 160 .tr.Sel resw 1 161 .tr.PaddingSel resw 1 162 .tr.ValidSel resw 1 163 .tr.fFlags resw 1 164 .tr.u64Base resq 1 165 .tr.u32Limit resd 1 166 .tr.Attr resd 1 167 152 168 .eip resq 1 153 169 .eflags resq 1 … … 217 233 .gs.u32Limit resd 1 218 234 .gs.Attr resd 1 219 .eip resq 1220 .eflags resq 1221 .cr0 resq 1222 .cr2 resq 1223 .cr3 resq 1224 .cr4 resq 1225 .dr resq 8226 .gdtrPadding resw 3227 .gdtr resw 0228 .gdtr.cbGdt resw 1229 .gdtr.pGdt resq 1230 .idtrPadding resw 3231 .idtr resw 0232 .idtr.cbIdt resw 1233 .idtr.pIdt resq 1234 235 .ldtr.Sel resw 1 235 236 .ldtr.PaddingSel resw 1 … … 246 247 .tr.u32Limit resd 1 247 248 .tr.Attr resd 1 249 .eip resq 1 250 .eflags resq 1 251 .fInhibit resb 1 252 alignb 8 253 .uRipInhibitInt resq 1 254 .cr0 resq 1 255 .cr2 resq 1 256 .cr3 resq 1 257 .cr4 resq 1 258 .dr resq 8 259 .gdtrPadding resw 3 260 .gdtr resw 0 261 .gdtr.cbGdt resw 1 262 .gdtr.pGdt resq 1 263 .idtrPadding resw 3 264 .idtr resw 0 265 .idtr.cbIdt resw 1 266 .idtr.pIdt resq 1 248 267 .SysEnter.cs resb 8 249 268 .SysEnter.eip resb 8 … … 256 275 .msrSFMASK resb 8 257 276 .msrKERNELGSBASE resb 8 258 .uMsrPadding0 resb 8259 277 260 278 alignb 8 … … 323 341 .hwvirt.enmHwvirt resd 1 324 342 .hwvirt.fGif resb 1 325 alignb 8326 .hwvirt.f LocalForcedActionsresd 1343 alignb 4 344 .hwvirt.fSavedInhibit resd 1 327 345 alignb 64 328 346 endstruc -
trunk/include/VBox/vmm/cpumctx.h
r97150 r97178 266 266 CPUMSELREG gs; 267 267 /** @} */ 268 269 CPUMSELREG ldtr; 270 CPUMSELREG tr; 268 271 269 272 /** The program counter. */ … … 399 402 } CPUM_UNION_NM(s); 400 403 404 /** The task register. 405 * Only the guest context uses all the members. */ 406 CPUMSELREG ldtr; 407 /** The task register. 408 * Only the guest context uses all the members. */ 409 CPUMSELREG tr; 410 401 411 /** The program counter. */ 402 412 union … … 416 426 /** @} */ /*(CPUMCTXCORE)*/ 417 427 428 /** Interrupt & exception inhibiting (CPUMCTX_INHIBIT_XXX). */ 429 uint8_t fInhibit; 430 uint8_t abPadding[7]; 431 /** The RIP value fInhibit is/was valid for. */ 432 uint64_t uRipInhibitInt; 418 433 419 434 /** @name Control registers. … … 422 437 uint64_t cr2; 423 438 uint64_t cr3; 424 /** @todo the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */425 439 uint64_t cr4; 440 /** @todo Add the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */ 426 441 /** @} */ 427 442 … … 445 460 /** Interrupt Descriptor Table register. */ 446 461 VBOXIDTR idtr; 447 448 /** The task register.449 * Only the guest context uses all the members. */450 CPUMSELREG ldtr;451 /** The task register.452 * Only the guest context uses all the members. */453 CPUMSELREG tr;454 462 455 463 /** The sysenter msr registers. … … 466 474 uint64_t msrSFMASK; /**< syscall flag mask. */ 467 475 uint64_t msrKERNELGSBASE; /**< swapgs exchange value. */ 468 uint64_t uMsrPadding0; /**< no longer used (used to hold a copy of APIC base MSR). */469 476 /** @} */ 470 477 471 /** 0x228 - Externalized state tracker, CPUMCTX_EXTRN_XXX. */ 478 /** 0x230 - Externalized state tracker, CPUMCTX_EXTRN_XXX. 479 * @todo Move up after uRipInhibitInt after fInhibit moves into RFLAGS. 480 * That will put this in the same cacheline as RIP, RFLAGS and CR0 481 * which are typically always imported and exported again during an 482 * VM exit. */ 472 483 uint64_t fExtrn; 473 484 474 uint64_t au64Unused[2];485 uint64_t u64Unused; 475 486 476 487 /** 0x240 - PAE PDPTEs. */ … … 634 645 /** 0x11134 - Global interrupt flag - AMD only (always true on Intel). */ 635 646 bool fGif; 636 bool afPadding1[3]; 637 /** 0x11138 - A subset of guest force flags that are saved while running the 638 * nested-guest. */ 639 #ifdef VMCPU_WITH_64_BIT_FFS 640 uint64_t fLocalForcedActions; 641 #else 642 uint32_t fLocalForcedActions; 643 uint32_t fPadding; 644 #endif 645 #if 0 646 /** 0x11140 - Pad to 64 byte boundary. */ 647 uint8_t abPadding0[8+16+32]; 648 #endif 647 /** 0x11135 - Padding. */ 648 bool afPadding0[3]; 649 /** 0x11138 - A subset of guest inhibit flags (CPUMCTX_INHIBIT_XXX) that are 650 * saved while running the nested-guest. */ 651 uint32_t fSavedInhibit; 652 /** 0x1113c - Pad to 64 byte boundary. */ 653 uint8_t abPadding1[4]; 649 654 } hwvirt; 650 655 } CPUMCTX; … … 656 661 AssertCompileSizeAlignment(CPUMCTX, 16); 657 662 AssertCompileSizeAlignment(CPUMCTX, 8); 658 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0); 659 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 8); 660 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 16); 661 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 24); 662 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 32); 663 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 40); 664 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 48); 665 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 56); 666 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r8, 64); 667 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r9, 72); 668 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 80); 669 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 88); 670 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 96); 671 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 104); 672 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 112); 673 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 120); 674 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 128); 675 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 152); 676 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 176); 677 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 200); 678 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 224); 679 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 248); 680 AssertCompileMemberOffset(CPUMCTX, rip, 272); 681 AssertCompileMemberOffset(CPUMCTX, rflags, 280); 682 AssertCompileMemberOffset(CPUMCTX, cr0, 288); 683 AssertCompileMemberOffset(CPUMCTX, cr2, 296); 684 AssertCompileMemberOffset(CPUMCTX, cr3, 304); 685 AssertCompileMemberOffset(CPUMCTX, cr4, 312); 686 AssertCompileMemberOffset(CPUMCTX, dr, 320); 687 AssertCompileMemberOffset(CPUMCTX, gdtr, 384+6); 688 AssertCompileMemberOffset(CPUMCTX, idtr, 400+6); 689 AssertCompileMemberOffset(CPUMCTX, ldtr, 416); 690 AssertCompileMemberOffset(CPUMCTX, tr, 440); 691 AssertCompileMemberOffset(CPUMCTX, SysEnter, 464); 692 AssertCompileMemberOffset(CPUMCTX, msrEFER, 488); 693 AssertCompileMemberOffset(CPUMCTX, msrSTAR, 496); 694 AssertCompileMemberOffset(CPUMCTX, msrPAT, 504); 695 AssertCompileMemberOffset(CPUMCTX, msrLSTAR, 512); 696 AssertCompileMemberOffset(CPUMCTX, msrCSTAR, 520); 697 AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 528); 698 AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 536); 699 AssertCompileMemberOffset(CPUMCTX, aPaePdpes, 0x240); 700 AssertCompileMemberOffset(CPUMCTX, aXcr, 0x260); 701 AssertCompileMemberOffset(CPUMCTX, fXStateMask, 0x270); 702 AssertCompileMemberOffset(CPUMCTX, fUsedFpuGuest, 0x278); 703 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x300); 704 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) abXState, 0x300); 705 AssertCompileMemberAlignment(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x100); 663 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0x0000); 664 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 0x0008); 665 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 0x0010); 666 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 0x0018); 667 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 0x0020); 668 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 0x0028); 669 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 0x0030); 670 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 0x0038); 671 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r8, 0x0040); 672 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r9, 0x0048); 673 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 0x0050); 674 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 0x0058); 675 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 0x0060); 676 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 0x0068); 677 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 0x0070); 678 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 0x0078); 679 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 0x0080); 680 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 0x0098); 681 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 0x00b0); 682 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 0x00c8); 683 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 0x00e0); 684 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 0x00f8); 685 AssertCompileMemberOffset(CPUMCTX, ldtr, 0x0110); 686 AssertCompileMemberOffset(CPUMCTX, tr, 0x0128); 687 AssertCompileMemberOffset(CPUMCTX, rip, 0x0140); 688 AssertCompileMemberOffset(CPUMCTX, rflags, 0x0148); 689 AssertCompileMemberOffset(CPUMCTX, fInhibit, 0x0150); 690 AssertCompileMemberOffset(CPUMCTX, uRipInhibitInt, 0x0158); 691 AssertCompileMemberOffset(CPUMCTX, cr0, 0x0160); 692 AssertCompileMemberOffset(CPUMCTX, cr2, 0x0168); 693 AssertCompileMemberOffset(CPUMCTX, cr3, 0x0170); 694 AssertCompileMemberOffset(CPUMCTX, cr4, 0x0178); 695 AssertCompileMemberOffset(CPUMCTX, dr, 0x0180); 696 AssertCompileMemberOffset(CPUMCTX, gdtr, 0x01c0+6); 697 AssertCompileMemberOffset(CPUMCTX, idtr, 0x01d0+6); 698 AssertCompileMemberOffset(CPUMCTX, SysEnter, 0x01e0); 699 AssertCompileMemberOffset(CPUMCTX, msrEFER, 0x01f8); 700 AssertCompileMemberOffset(CPUMCTX, msrSTAR, 0x0200); 701 AssertCompileMemberOffset(CPUMCTX, msrPAT, 0x0208); 702 AssertCompileMemberOffset(CPUMCTX, msrLSTAR, 0x0210); 703 AssertCompileMemberOffset(CPUMCTX, msrCSTAR, 0x0218); 704 AssertCompileMemberOffset(CPUMCTX, msrSFMASK, 0x0220); 705 AssertCompileMemberOffset(CPUMCTX, msrKERNELGSBASE, 0x0228); 706 AssertCompileMemberOffset(CPUMCTX, aPaePdpes, 0x0240); 707 AssertCompileMemberOffset(CPUMCTX, aXcr, 0x0260); 708 AssertCompileMemberOffset(CPUMCTX, fXStateMask, 0x0270); 709 AssertCompileMemberOffset(CPUMCTX, fUsedFpuGuest, 0x0278); 710 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x0300); 711 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(u.) abXState, 0x0300); 712 AssertCompileMemberAlignment(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x0100); 706 713 /* Only do spot checks for hwvirt */ 707 AssertCompileMemberAlignment(CPUMCTX, hwvirt, 0x1000);714 AssertCompileMemberAlignment(CPUMCTX, hwvirt, 0x1000); 708 715 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.Vmcb, X86_PAGE_SIZE); 709 716 AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abMsrBitmap, X86_PAGE_SIZE); … … 725 732 AssertCompileMemberOffset(CPUMCTX, hwvirt.enmHwvirt, 0x11130); 726 733 AssertCompileMemberOffset(CPUMCTX, hwvirt.fGif, 0x11134); 727 AssertCompileMemberOffset(CPUMCTX, hwvirt.f LocalForcedActions,0x11138);734 AssertCompileMemberOffset(CPUMCTX, hwvirt.fSavedInhibit, 0x11138); 728 735 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs); 729 736 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r0); … … 1021 1028 1022 1029 1030 /** @name CPUMCTX_INHIBIT_XXX - Interrupt inhibiting flags. 1031 * @{ */ 1032 /** Interrupt shadow following MOV SS or POP SS. 1033 * 1034 * When this in effect, both maskable and non-maskable interrupts are blocked 1035 * from delivery for one instruction. Same for certain debug exceptions too, 1036 * unlike the STI variant. 1037 * 1038 * It is implementation specific whether a sequence of two or more of these 1039 * instructions will have any effect on the instruction following the last one 1040 * of them. */ 1041 #define CPUMCTX_INHIBIT_SHADOW_SS UINT8_C(0x01) 1042 /** Interrupt shadow following STI. 1043 * Same as CPUMCTX_INHIBIT_SHADOW_SS but without blocking any debug exceptions. */ 1044 #define CPUMCTX_INHIBIT_SHADOW_STI UINT8_C(0x02) 1045 /** Mask combining STI and SS shadowing. */ 1046 #define CPUMCTX_INHIBIT_SHADOW (CPUMCTX_INHIBIT_SHADOW_SS | CPUMCTX_INHIBIT_SHADOW_STI) 1047 1048 /** Interrupts blocked by NMI delivery. This condition is cleared by IRET. 1049 * 1050 * Section "6.7 NONMASKABLE INTERRUPT (NMI)" in Intel SDM Vol 3A states that 1051 * "The processor also invokes certain hardware conditions to ensure that no 1052 * other interrupts, including NMI interrupts, are received until the NMI 1053 * handler has completed executing." This flag indicates that these 1054 * conditions are currently active. */ 1055 #define CPUMCTX_INHIBIT_NMI UINT8_C(0x04) 1056 /** @} */ 1057 1058 1023 1059 /** 1024 1060 * Additional guest MSRs (i.e. not part of the CPU context structure). -
trunk/include/VBox/vmm/em.h
r96999 r97178 138 138 /** @} */ 139 139 140 VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC);141 VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu);142 VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu);143 140 VMMDECL(void) EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled); 144 141 VMMDECL(bool) EMAreHypercallInstructionsEnabled(PVMCPU pVCpu); -
trunk/include/VBox/vmm/vm.h
r96811 r97178 490 490 /** The bit number for VMCPU_FF_DBGF. */ 491 491 #define VMCPU_FF_DBGF_BIT 10 492 /** This action forces the VM to service any pending updates to CR3 (used only493 * by HM). */494 492 /** Hardware virtualized nested-guest interrupt pending. */ 495 493 #define VMCPU_FF_INTERRUPT_NESTED_GUEST RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT) … … 523 521 /* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */ 524 522 /* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */ 525 /** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */ 526 #define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_64(VMCPU_FF_INHIBIT_INTERRUPTS_BIT) 527 #define VMCPU_FF_INHIBIT_INTERRUPTS_BIT 24 528 /** Block injection of non-maskable interrupts to the guest. */ 529 #define VMCPU_FF_BLOCK_NMIS RT_BIT_64(VMCPU_FF_BLOCK_NMIS_BIT) 530 #define VMCPU_FF_BLOCK_NMIS_BIT 25 523 /* 24 used to be VMCPU_FF_INHIBIT_INTERRUPTS, which moved to CPUMCTX::fInhibit in v7.0.4. */ 524 /* 25 used to be VMCPU_FF_BLOCK_NMIS, which moved to CPUMCTX::fInhibit in v7.0.4. */ 531 525 /** Force return to Ring-3. */ 532 526 #define VMCPU_FF_TO_R3 RT_BIT_64(VMCPU_FF_TO_R3_BIT) … … 576 570 /** High priority VMCPU pre-execution actions. */ 577 571 #define VMCPU_FF_HIGH_PRIORITY_PRE_MASK ( VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC \ 578 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_ INHIBIT_INTERRUPTS | VMCPU_FF_DBGF \572 | VMCPU_FF_UPDATE_APIC | VMCPU_FF_DBGF \ 579 573 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \ 580 574 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE \ … … 584 578 #define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY ) 585 579 /** High priority VMCPU pre raw-mode execution mask. */ 586 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \ 587 | VMCPU_FF_INHIBIT_INTERRUPTS ) 580 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK ( VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL ) 588 581 589 582 /** High priority post-execution actions. */
Note:
See TracChangeset
for help on using the changeset viewer.