VirtualBox

Changeset 97178 in vbox


Ignore:
Timestamp:
Oct 17, 2022 9:06:03 PM (2 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
154153
Message:

VMM/CPUM,EM,HM,IEM,++: Moved VMCPU_FF_INHIBIT_INTERRUPTS and VMCPU_FF_BLOCK_NMIS to CPUMCTX::fInhibit. Moved ldtr and tr up to the CPUMCTXCORE area in hope for better cache alignment of rip, rflags and crX register fields. bugref:9941

Location:
trunk
Files:
22 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r97096 r97178  
    16861686              ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
    16871687
     1688/** @def CPUMCTX_ASSERT_NOT_EXTRN
     1689 * Macro for asserting that @a a_fNotExtrn are present in @a a_pCtx.
     1690 *
     1691 * @param   a_pCtx          The CPU context of the calling EMT.
     1692 * @param   a_fNotExtrn     Mask of CPUMCTX_EXTRN_XXX bits to check.
     1693 */
     1694#define CPUMCTX_ASSERT_NOT_EXTRN(a_pCtx, a_fNotExtrn) \
     1695    AssertMsg(!((a_pCtx)->fExtrn & (a_fNotExtrn)), \
     1696              ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pCtx)->fExtrn, (a_fNotExtrn)))
     1697
    16881698/** @def CPUM_IMPORT_EXTRN_RET
    16891699 * Macro for making sure the state specified by @a fExtrnImport is present,
     
    19071917    pCtx->hwvirt.fGif = fGif;
    19081918}
     1919
     1920/**
     1921 * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS.
     1922 * 
     1923 * This also inhibit NMIs, except perhaps for nested guests.
     1924 * 
     1925 * @returns true if interrupts are inhibited by interrupt shadow, false if not.
     1926 * @param   pCtx    Current guest CPU context.
     1927 * @note    Requires pCtx->rip to be up to date.
     1928 * @note    Does not clear fInhibit when CPUMCTX::uRipInhibitInt differs
     1929 *          from CPUMCTX::rip.
     1930 */
     1931DECLINLINE(bool) CPUMIsInInterruptShadow(PCCPUMCTX pCtx)
     1932{
     1933    if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW))
     1934        return false;
     1935
     1936    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     1937    return pCtx->uRipInhibitInt == pCtx->rip;
     1938}
     1939
     1940/**
     1941 * Checks if we're in an "interrupt shadow", i.e. after a STI, POPF or MOV SS,
     1942 * updating the state if stale.
     1943 * 
     1944 * This also inhibit NMIs, except perhaps for nested guests.
     1945 * 
     1946 * @returns true if interrupts are inhibited by interrupt shadow, false if not.
     1947 * @param   pCtx    Current guest CPU context.
     1948 * @note    Requires pCtx->rip to be up to date.
     1949 */
     1950DECLINLINE(bool) CPUMIsInInterruptShadowWithUpdate(PCPUMCTX pCtx)
     1951{
     1952    if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW))
     1953        return false;
     1954
     1955    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     1956    if (pCtx->uRipInhibitInt == pCtx->rip)
     1957        return true;
     1958
     1959    pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     1960    return false;
     1961}
     1962
     1963/**
     1964 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction.
     1965 *
     1966 * @param   pCtx    Current guest CPU context.
     1967 * @note    Requires pCtx->rip to be up to date.
     1968 */
     1969DECLINLINE(void) CPUMSetInInterruptShadow(PCPUMCTX pCtx)
     1970{
     1971    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     1972    pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     1973    pCtx->uRipInhibitInt = pCtx->rip;
     1974}
     1975
     1976/**
     1977 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction,
     1978 * extended version.
     1979 *
     1980 * @param   pCtx    Current guest CPU context.
     1981 * @param   rip     The RIP for which it is inhibited.
     1982 */
     1983DECLINLINE(void) CPUMSetInInterruptShadowEx(PCPUMCTX pCtx, uint64_t rip)
     1984{
     1985    pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     1986    pCtx->uRipInhibitInt = rip;
     1987}
     1988
     1989/**
     1990 * Clears the "interrupt shadow" flag.
     1991 *
     1992 * @param   pCtx    Current guest CPU context.
     1993 */
     1994DECLINLINE(void) CPUMClearInterruptShadow(PCPUMCTX pCtx)
     1995{
     1996    pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     1997}
     1998
     1999/**
     2000 * Update the "interrupt shadow" flag.
     2001 *
     2002 * @param   pCtx        Current guest CPU context.
     2003 * @param   fInhibited  The new state.
     2004 * @note    Requires pCtx->rip to be up to date.
     2005 */
     2006DECLINLINE(void) CPUMUpdateInterruptShadow(PCPUMCTX pCtx, bool fInhibited)
     2007{
     2008    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     2009    if (!fInhibited)
     2010        pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     2011    else
     2012    {
     2013        pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     2014        pCtx->uRipInhibitInt = pCtx->rip;
     2015    }
     2016}
     2017
     2018/**
     2019 * Update the "interrupt shadow" flag, extended version.
     2020 *
     2021 * @returns fInhibited.
     2022 * @param   pCtx        Current guest CPU context.
     2023 * @param   fInhibited  The new state.
     2024 * @param   rip         The RIP for which it is inhibited.
     2025 */
     2026DECLINLINE(bool) CPUMUpdateInterruptShadowEx(PCPUMCTX pCtx, bool fInhibited, uint64_t rip)
     2027{
     2028    if (!fInhibited)
     2029        pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     2030    else
     2031    {
     2032        pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     2033        pCtx->uRipInhibitInt = rip;
     2034    }
     2035    return fInhibited;
     2036}
     2037
     2038/* VMX forward declarations used by extended function versions: */
     2039DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx);
     2040DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls);
     2041DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx);
     2042DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking);
     2043
     2044/**
     2045 * Checks whether interrupts, include NMIs, are inhibited by pending NMI
     2046 * delivery.
     2047 *
     2048 * This only checks the inhibit mask.
     2049 *
     2050 * @retval  true if interrupts are inhibited by NMI handling.
     2051 * @retval  false if interrupts are not inhibited by NMI handling.
     2052 * @param   pCtx        Current guest CPU context.
     2053 */
     2054DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmi(PCCPUMCTX pCtx)
     2055{
     2056    return (pCtx->fInhibit & CPUMCTX_INHIBIT_NMI) != 0;
     2057}
     2058
     2059/**
     2060 * Extended version of CPUMAreInterruptsInhibitedByNmi() that takes VMX non-root
     2061 * mode into account when check whether interrupts are inhibited by NMI.
     2062 *
     2063 * @retval  true if interrupts are inhibited by NMI handling.
     2064 * @retval  false if interrupts are not inhibited by NMI handling.
     2065 * @param   pCtx        Current guest CPU context.
     2066 */
     2067DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmiEx(PCCPUMCTX pCtx)
     2068{
     2069    /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
     2070    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2071        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2072        return CPUMAreInterruptsInhibitedByNmi(pCtx);
     2073    return CPUMIsGuestVmxVirtNmiBlocking(pCtx);
     2074}
     2075
     2076/**
     2077 * Marks interrupts, include NMIs, as inhibited by pending NMI delivery.
     2078 *
     2079 * @param   pCtx        Current guest CPU context.
     2080 */
     2081DECLINLINE(void) CPUMSetInterruptInhibitingByNmi(PCPUMCTX pCtx)
     2082{
     2083    pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI;
     2084}
     2085
     2086/**
     2087 * Extended version of CPUMSetInterruptInhibitingByNmi() that takes VMX non-root
     2088 * mode into account when marking interrupts as inhibited by NMI.
     2089 *
     2090 * @param   pVCpu       The cross context virtual CPU structure.
     2091 */
     2092DECLINLINE(void) CPUMSetInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
     2093{
     2094    /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
     2095    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2096        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2097        CPUMSetInterruptInhibitingByNmi(pCtx);
     2098    else
     2099        CPUMSetGuestVmxVirtNmiBlocking(pCtx, true);
     2100}
     2101
     2102/**
     2103 * Marks interrupts, include NMIs, as no longer inhibited by pending NMI
     2104 * delivery.
     2105 *
     2106 * @param   pCtx        Current guest CPU context.
     2107 */
     2108DECLINLINE(void) CPUMClearInterruptInhibitingByNmi(PCPUMCTX pCtx)
     2109{
     2110    pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI;
     2111}
     2112
     2113/**
     2114 * Extended version of CPUMClearInterruptInhibitingByNmi() that takes VMX
     2115 * non-root mode into account when doing the updating.
     2116 *
     2117 * @param   pVCpu       The cross context virtual CPU structure.
     2118 * @param   fInhibited  The new state.
     2119 */
     2120DECLINLINE(void) CPUMClearInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
     2121{
     2122    /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
     2123    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2124        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2125        CPUMClearInterruptInhibitingByNmi(pCtx);
     2126    else
     2127        CPUMSetGuestVmxVirtNmiBlocking(pCtx, false);
     2128}
     2129
     2130/**
     2131 * Update whether interrupts, include NMIs, are inhibited by pending NMI
     2132 * delivery.
     2133 *
     2134 * @param   pCtx        Current guest CPU context.
     2135 * @param   fInhibited  The new state.
     2136 */
     2137DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmi(PCPUMCTX pCtx, bool fInhibited)
     2138{
     2139    if (!fInhibited)
     2140        pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI;
     2141    else
     2142        pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI;
     2143}
     2144
     2145/**
     2146 * Extended version of CPUMUpdateInterruptInhibitingByNmi() that takes VMX
     2147 * non-root mode into account when doing the updating.
     2148 *
     2149 * @param   pVCpu       The cross context virtual CPU structure.
     2150 * @param   fInhibited  The new state.
     2151 */
     2152DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmiEx(PCPUMCTX pCtx, bool fInhibited)
     2153{
     2154    /*
     2155     * Set the state of guest-NMI blocking in any of the following cases:
     2156     *   - We're not executing a nested-guest.
     2157     *   - We're executing an SVM nested-guest[1].
     2158     *   - We're executing a VMX nested-guest without virtual-NMIs enabled.
     2159     *
     2160     * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
     2161     *        SVM hypervisors must track NMI blocking themselves by intercepting
     2162     *        the IRET instruction after injection of an NMI.
     2163     */
     2164    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2165        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2166        CPUMUpdateInterruptInhibitingByNmi(pCtx, fInhibited);
     2167    /*
     2168     * Set the state of virtual-NMI blocking, if we are executing a
     2169     * VMX nested-guest with virtual-NMIs enabled.
     2170     */
     2171    else
     2172        CPUMSetGuestVmxVirtNmiBlocking(pCtx, fInhibited);
     2173}
     2174
    19092175
    19102176/**
     
    27673033    CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
    27683034    CPUMINTERRUPTIBILITY_INT_DISABLED,
    2769     CPUMINTERRUPTIBILITY_INT_INHIBITED,
     3035    CPUMINTERRUPTIBILITY_INT_INHIBITED, /**< @todo rename as it inhibits NMIs too. */
    27703036    CPUMINTERRUPTIBILITY_NMI_INHIBIT,
    27713037    CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
     
    27753041
    27763042VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
    2777 VMM_INT_DECL(bool)                 CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);
    2778 VMM_INT_DECL(void)                 CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);
    27793043
    27803044/** @name Typical scalable bus frequency values.
  • trunk/include/VBox/vmm/cpum.mac

    r97150 r97178  
    150150    .gs.u32Limit        resd    1
    151151    .gs.Attr            resd    1
     152
     153    .ldtr.Sel           resw    1
     154    .ldtr.PaddingSel    resw    1
     155    .ldtr.ValidSel      resw    1
     156    .ldtr.fFlags        resw    1
     157    .ldtr.u64Base       resq    1
     158    .ldtr.u32Limit      resd    1
     159    .ldtr.Attr          resd    1
     160    .tr.Sel             resw    1
     161    .tr.PaddingSel      resw    1
     162    .tr.ValidSel        resw    1
     163    .tr.fFlags          resw    1
     164    .tr.u64Base         resq    1
     165    .tr.u32Limit        resd    1
     166    .tr.Attr            resd    1
     167
    152168    .eip                resq    1
    153169    .eflags             resq    1
     
    217233    .gs.u32Limit        resd    1
    218234    .gs.Attr            resd    1
    219     .eip                resq    1
    220     .eflags             resq    1
    221     .cr0                resq    1
    222     .cr2                resq    1
    223     .cr3                resq    1
    224     .cr4                resq    1
    225     .dr                 resq    8
    226     .gdtrPadding        resw    3
    227     .gdtr               resw    0
    228     .gdtr.cbGdt         resw    1
    229     .gdtr.pGdt          resq    1
    230     .idtrPadding        resw    3
    231     .idtr               resw    0
    232     .idtr.cbIdt         resw    1
    233     .idtr.pIdt          resq    1
    234235    .ldtr.Sel           resw    1
    235236    .ldtr.PaddingSel    resw    1
     
    246247    .tr.u32Limit        resd    1
    247248    .tr.Attr            resd    1
     249    .eip                resq    1
     250    .eflags             resq    1
     251    .fInhibit           resb    1
     252    alignb 8
     253    .uRipInhibitInt     resq    1
     254    .cr0                resq    1
     255    .cr2                resq    1
     256    .cr3                resq    1
     257    .cr4                resq    1
     258    .dr                 resq    8
     259    .gdtrPadding        resw    3
     260    .gdtr               resw    0
     261    .gdtr.cbGdt         resw    1
     262    .gdtr.pGdt          resq    1
     263    .idtrPadding        resw    3
     264    .idtr               resw    0
     265    .idtr.cbIdt         resw    1
     266    .idtr.pIdt          resq    1
    248267    .SysEnter.cs        resb    8
    249268    .SysEnter.eip       resb    8
     
    256275    .msrSFMASK          resb    8
    257276    .msrKERNELGSBASE    resb    8
    258     .uMsrPadding0       resb    8
    259277
    260278    alignb 8
     
    323341    .hwvirt.enmHwvirt                   resd        1
    324342    .hwvirt.fGif                        resb        1
    325     alignb 8
    326     .hwvirt.fLocalForcedActions         resd        1
     343    alignb 4
     344    .hwvirt.fSavedInhibit               resd        1
    327345    alignb 64
    328346endstruc
  • trunk/include/VBox/vmm/cpumctx.h

    r97150 r97178  
    266266    CPUMSELREG          gs;
    267267    /** @} */
     268
     269    CPUMSELREG          ldtr;
     270    CPUMSELREG          tr;
    268271
    269272    /** The program counter. */
     
    399402    } CPUM_UNION_NM(s);
    400403
     404    /** The task register.
     405     * Only the guest context uses all the members. */
     406    CPUMSELREG          ldtr;
     407    /** The task register.
     408     * Only the guest context uses all the members. */
     409    CPUMSELREG          tr;
     410
    401411    /** The program counter. */
    402412    union
     
    416426    /** @} */ /*(CPUMCTXCORE)*/
    417427
     428    /** Interrupt & exception inhibiting (CPUMCTX_INHIBIT_XXX). */
     429    uint8_t             fInhibit;
     430    uint8_t             abPadding[7];
     431    /** The RIP value fInhibit is/was valid for. */
     432    uint64_t            uRipInhibitInt;
    418433
    419434    /** @name Control registers.
     
    422437    uint64_t            cr2;
    423438    uint64_t            cr3;
    424     /** @todo the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */
    425439    uint64_t            cr4;
     440    /** @todo Add the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */
    426441    /** @} */
    427442
     
    445460    /** Interrupt Descriptor Table register. */
    446461    VBOXIDTR            idtr;
    447 
    448     /** The task register.
    449      * Only the guest context uses all the members. */
    450     CPUMSELREG          ldtr;
    451     /** The task register.
    452      * Only the guest context uses all the members. */
    453     CPUMSELREG          tr;
    454462
    455463    /** The sysenter msr registers.
     
    466474    uint64_t            msrSFMASK;          /**< syscall flag mask. */
    467475    uint64_t            msrKERNELGSBASE;    /**< swapgs exchange value. */
    468     uint64_t            uMsrPadding0;       /**< no longer used (used to hold a copy of APIC base MSR). */
    469476    /** @} */
    470477
    471     /** 0x228 - Externalized state tracker, CPUMCTX_EXTRN_XXX. */
     478    /** 0x230 - Externalized state tracker, CPUMCTX_EXTRN_XXX.
     479     * @todo Move up after uRipInhibitInt after fInhibit moves into RFLAGS.
     480     *       That will put this in the same cacheline as RIP, RFLAGS and CR0
     481     *       which are typically always imported and exported again during an
     482     *       VM exit. */
    472483    uint64_t            fExtrn;
    473484
    474     uint64_t            au64Unused[2];
     485    uint64_t            u64Unused;
    475486
    476487    /** 0x240 - PAE PDPTEs. */
     
    634645        /** 0x11134 - Global interrupt flag - AMD only (always true on Intel). */
    635646        bool                    fGif;
    636         bool                    afPadding1[3];
    637         /** 0x11138 - A subset of guest force flags that are saved while running the
    638          *  nested-guest. */
    639 #ifdef VMCPU_WITH_64_BIT_FFS
    640         uint64_t                fLocalForcedActions;
    641 #else
    642         uint32_t                fLocalForcedActions;
    643         uint32_t                fPadding;
    644 #endif
    645 #if 0
    646         /** 0x11140 - Pad to 64 byte boundary. */
    647         uint8_t                 abPadding0[8+16+32];
    648 #endif
     647        /** 0x11135 - Padding. */
     648        bool                    afPadding0[3];
     649        /** 0x11138 - A subset of guest inhibit flags (CPUMCTX_INHIBIT_XXX) that are
     650         *  saved while running the nested-guest. */
     651        uint32_t                fSavedInhibit;
     652        /** 0x1113c - Pad to 64 byte boundary. */
     653        uint8_t                 abPadding1[4];
    649654    } hwvirt;
    650655} CPUMCTX;
     
    656661AssertCompileSizeAlignment(CPUMCTX, 16);
    657662AssertCompileSizeAlignment(CPUMCTX, 8);
    658 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax,   0);
    659 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx,   8);
    660 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx,  16);
    661 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx,  24);
    662 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp,  32);
    663 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp,  40);
    664 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi,  48);
    665 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi,  56);
    666 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r8,  64);
    667 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r9,  72);
    668 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10,  80);
    669 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11,  88);
    670 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12,  96);
    671 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 104);
    672 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 112);
    673 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 120);
    674 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 128);
    675 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 152);
    676 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 176);
    677 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 200);
    678 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 224);
    679 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 248);
    680 AssertCompileMemberOffset(CPUMCTX,                        rip, 272);
    681 AssertCompileMemberOffset(CPUMCTX,                     rflags, 280);
    682 AssertCompileMemberOffset(CPUMCTX,                        cr0, 288);
    683 AssertCompileMemberOffset(CPUMCTX,                        cr2, 296);
    684 AssertCompileMemberOffset(CPUMCTX,                        cr3, 304);
    685 AssertCompileMemberOffset(CPUMCTX,                        cr4, 312);
    686 AssertCompileMemberOffset(CPUMCTX,                         dr, 320);
    687 AssertCompileMemberOffset(CPUMCTX,                       gdtr, 384+6);
    688 AssertCompileMemberOffset(CPUMCTX,                       idtr, 400+6);
    689 AssertCompileMemberOffset(CPUMCTX,                       ldtr, 416);
    690 AssertCompileMemberOffset(CPUMCTX,                         tr, 440);
    691 AssertCompileMemberOffset(CPUMCTX,                   SysEnter, 464);
    692 AssertCompileMemberOffset(CPUMCTX,                    msrEFER, 488);
    693 AssertCompileMemberOffset(CPUMCTX,                    msrSTAR, 496);
    694 AssertCompileMemberOffset(CPUMCTX,                     msrPAT, 504);
    695 AssertCompileMemberOffset(CPUMCTX,                   msrLSTAR, 512);
    696 AssertCompileMemberOffset(CPUMCTX,                   msrCSTAR, 520);
    697 AssertCompileMemberOffset(CPUMCTX,                  msrSFMASK, 528);
    698 AssertCompileMemberOffset(CPUMCTX,            msrKERNELGSBASE, 536);
    699 AssertCompileMemberOffset(CPUMCTX,                  aPaePdpes, 0x240);
    700 AssertCompileMemberOffset(CPUMCTX,                       aXcr, 0x260);
    701 AssertCompileMemberOffset(CPUMCTX,                fXStateMask, 0x270);
    702 AssertCompileMemberOffset(CPUMCTX,                fUsedFpuGuest, 0x278);
    703 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(u.) XState, 0x300);
    704 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(u.) abXState, 0x300);
    705 AssertCompileMemberAlignment(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x100);
     663AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0x0000);
     664AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 0x0008);
     665AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 0x0010);
     666AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 0x0018);
     667AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 0x0020);
     668AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 0x0028);
     669AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 0x0030);
     670AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 0x0038);
     671AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r8, 0x0040);
     672AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r9, 0x0048);
     673AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 0x0050);
     674AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 0x0058);
     675AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 0x0060);
     676AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 0x0068);
     677AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 0x0070);
     678AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 0x0078);
     679AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 0x0080);
     680AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 0x0098);
     681AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 0x00b0);
     682AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 0x00c8);
     683AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 0x00e0);
     684AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 0x00f8);
     685AssertCompileMemberOffset(CPUMCTX,                                      ldtr, 0x0110);
     686AssertCompileMemberOffset(CPUMCTX,                                        tr, 0x0128);
     687AssertCompileMemberOffset(CPUMCTX,                                       rip, 0x0140);
     688AssertCompileMemberOffset(CPUMCTX,                                    rflags, 0x0148);
     689AssertCompileMemberOffset(CPUMCTX,                                  fInhibit, 0x0150);
     690AssertCompileMemberOffset(CPUMCTX,                            uRipInhibitInt, 0x0158);
     691AssertCompileMemberOffset(CPUMCTX,                                       cr0, 0x0160);
     692AssertCompileMemberOffset(CPUMCTX,                                       cr2, 0x0168);
     693AssertCompileMemberOffset(CPUMCTX,                                       cr3, 0x0170);
     694AssertCompileMemberOffset(CPUMCTX,                                       cr4, 0x0178);
     695AssertCompileMemberOffset(CPUMCTX,                                        dr, 0x0180);
     696AssertCompileMemberOffset(CPUMCTX,                                      gdtr, 0x01c0+6);
     697AssertCompileMemberOffset(CPUMCTX,                                      idtr, 0x01d0+6);
     698AssertCompileMemberOffset(CPUMCTX,                                  SysEnter, 0x01e0);
     699AssertCompileMemberOffset(CPUMCTX,                                   msrEFER, 0x01f8);
     700AssertCompileMemberOffset(CPUMCTX,                                   msrSTAR, 0x0200);
     701AssertCompileMemberOffset(CPUMCTX,                                    msrPAT, 0x0208);
     702AssertCompileMemberOffset(CPUMCTX,                                  msrLSTAR, 0x0210);
     703AssertCompileMemberOffset(CPUMCTX,                                  msrCSTAR, 0x0218);
     704AssertCompileMemberOffset(CPUMCTX,                                 msrSFMASK, 0x0220);
     705AssertCompileMemberOffset(CPUMCTX,                           msrKERNELGSBASE, 0x0228);
     706AssertCompileMemberOffset(CPUMCTX,                                 aPaePdpes, 0x0240);
     707AssertCompileMemberOffset(CPUMCTX,                                      aXcr, 0x0260);
     708AssertCompileMemberOffset(CPUMCTX,                               fXStateMask, 0x0270);
     709AssertCompileMemberOffset(CPUMCTX,                             fUsedFpuGuest, 0x0278);
     710AssertCompileMemberOffset(CPUMCTX,                  CPUM_UNION_NM(u.) XState, 0x0300);
     711AssertCompileMemberOffset(CPUMCTX,                CPUM_UNION_NM(u.) abXState, 0x0300);
     712AssertCompileMemberAlignment(CPUMCTX,               CPUM_UNION_NM(u.) XState, 0x0100);
    706713/* Only do spot checks for hwvirt */
    707 AssertCompileMemberAlignment(CPUMCTX,                   hwvirt, 0x1000);
     714AssertCompileMemberAlignment(CPUMCTX,                                 hwvirt, 0x1000);
    708715AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.Vmcb,                  X86_PAGE_SIZE);
    709716AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abMsrBitmap,           X86_PAGE_SIZE);
     
    725732AssertCompileMemberOffset(CPUMCTX,    hwvirt.enmHwvirt,                                   0x11130);
    726733AssertCompileMemberOffset(CPUMCTX,    hwvirt.fGif,                                        0x11134);
    727 AssertCompileMemberOffset(CPUMCTX,    hwvirt.fLocalForcedActions,                         0x11138);
     734AssertCompileMemberOffset(CPUMCTX,    hwvirt.fSavedInhibit,                               0x11138);
    728735AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
    729736AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.)  r0);
     
    10211028
    10221029
     1030/** @name CPUMCTX_INHIBIT_XXX - Interrupt inhibiting flags.
     1031 * @{ */
     1032/** Interrupt shadow following MOV SS or POP SS.
     1033 *
     1034 * When this in effect, both maskable and non-maskable interrupts are blocked
     1035 * from delivery for one instruction.  Same for certain debug exceptions too,
     1036 * unlike the STI variant.
     1037 *
     1038 * It is implementation specific whether a sequence of two or more of these
     1039 * instructions will have any effect on the instruction following the last one
     1040 * of them. */
     1041#define CPUMCTX_INHIBIT_SHADOW_SS   UINT8_C(0x01)
     1042/** Interrupt shadow following STI.
     1043 * Same as CPUMCTX_INHIBIT_SHADOW_SS but without blocking any debug exceptions. */
     1044#define CPUMCTX_INHIBIT_SHADOW_STI  UINT8_C(0x02)
     1045/** Mask combining STI and SS shadowing. */
     1046#define CPUMCTX_INHIBIT_SHADOW      (CPUMCTX_INHIBIT_SHADOW_SS | CPUMCTX_INHIBIT_SHADOW_STI)
     1047
     1048/** Interrupts blocked by NMI delivery.  This condition is cleared by IRET.
     1049 *
     1050 * Section "6.7 NONMASKABLE INTERRUPT (NMI)" in Intel SDM Vol 3A states that
     1051 * "The processor also invokes certain hardware conditions to ensure that no
     1052 * other interrupts, including NMI interrupts, are received until the NMI
     1053 * handler has completed executing."  This flag indicates that these
     1054 * conditions are currently active.  */
     1055#define CPUMCTX_INHIBIT_NMI         UINT8_C(0x04)
     1056/** @} */
     1057
     1058
    10231059/**
    10241060 * Additional guest MSRs (i.e. not part of the CPU context structure).
  • trunk/include/VBox/vmm/em.h

    r96999 r97178  
    138138/** @}  */
    139139
    140 VMMDECL(void)                   EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC);
    141 VMMDECL(RTGCUINTPTR)            EMGetInhibitInterruptsPC(PVMCPU pVCpu);
    142 VMMDECL(bool)                   EMIsInhibitInterruptsActive(PVMCPU pVCpu);
    143140VMMDECL(void)                   EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled);
    144141VMMDECL(bool)                   EMAreHypercallInstructionsEnabled(PVMCPU pVCpu);
  • trunk/include/VBox/vmm/vm.h

    r96811 r97178  
    490490/** The bit number for VMCPU_FF_DBGF. */
    491491#define VMCPU_FF_DBGF_BIT                   10
    492 /** This action forces the VM to service any pending updates to CR3 (used only
    493  *  by HM). */
    494492/** Hardware virtualized nested-guest interrupt pending. */
    495493#define VMCPU_FF_INTERRUPT_NESTED_GUEST     RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT)
     
    523521/* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */
    524522/* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */
    525 /** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
    526 #define VMCPU_FF_INHIBIT_INTERRUPTS         RT_BIT_64(VMCPU_FF_INHIBIT_INTERRUPTS_BIT)
    527 #define VMCPU_FF_INHIBIT_INTERRUPTS_BIT     24
    528 /** Block injection of non-maskable interrupts to the guest. */
    529 #define VMCPU_FF_BLOCK_NMIS                 RT_BIT_64(VMCPU_FF_BLOCK_NMIS_BIT)
    530 #define VMCPU_FF_BLOCK_NMIS_BIT             25
     523/* 24 used to be VMCPU_FF_INHIBIT_INTERRUPTS, which moved to CPUMCTX::fInhibit in v7.0.4. */
     524/* 25 used to be VMCPU_FF_BLOCK_NMIS, which moved to CPUMCTX::fInhibit in v7.0.4. */
    531525/** Force return to Ring-3. */
    532526#define VMCPU_FF_TO_R3                      RT_BIT_64(VMCPU_FF_TO_R3_BIT)
     
    576570/** High priority VMCPU pre-execution actions. */
    577571#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK         (  VMCPU_FF_TIMER        | VMCPU_FF_INTERRUPT_APIC     | VMCPU_FF_INTERRUPT_PIC \
    578                                                  | VMCPU_FF_UPDATE_APIC  | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF \
     572                                                 | VMCPU_FF_UPDATE_APIC  | VMCPU_FF_DBGF \
    579573                                                 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
    580574                                                 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF  | VMCPU_FF_VMX_APIC_WRITE \
     
    584578#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK        (  VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY )
    585579/** High priority VMCPU pre raw-mode execution mask. */
    586 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
    587                                                  | VMCPU_FF_INHIBIT_INTERRUPTS )
     580#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL )
    588581
    589582/** High priority post-execution actions. */
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r97096 r97178  
    19431943         * or raw-mode). Hence we use the function below which handles the details.
    19441944         */
    1945         if (    CPUMIsGuestPhysIntrEnabled(pVCpu)
    1946             && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS | VMCPU_FF_INHIBIT_INTERRUPTS))
     1945        if (   pVCpu->cpum.s.Guest.fInhibit == 0
     1946            || (   !(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI)
     1947                && pVCpu->cpum.s.Guest.uRipInhibitInt != pVCpu->cpum.s.Guest.rip))
    19471948        {
    1948             if (   !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
    1949                 ||  CPUMIsGuestVirtIntrEnabled(pVCpu))
    1950                 return CPUMINTERRUPTIBILITY_UNRESTRAINED;
    1951 
    1952             /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
    1953             return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
     1949            /** @todo OPT: this next call should be inlined! */
     1950            if (CPUMIsGuestPhysIntrEnabled(pVCpu))
     1951            {
     1952                /** @todo OPT: type this out as it repeats tests. */
     1953                if (   !CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.s.Guest)
     1954                    || CPUMIsGuestVirtIntrEnabled(pVCpu))
     1955                    return CPUMINTERRUPTIBILITY_UNRESTRAINED;
     1956
     1957                /* Physical interrupts are enabled, but nested-guest virtual interrupts are disabled. */
     1958                return CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED;
     1959            }
     1960            return CPUMINTERRUPTIBILITY_INT_DISABLED;
    19541961        }
    19551962
     
    19621969         * See Intel spec. 25.4.1 "Event Blocking".
    19631970         */
    1964         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    1965             return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
    1966 
    1967         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     1971        /** @todo r=bird: The above comment mixes up VMX root-mode and non-root. Section
     1972         *        25.4.1 is only applicable to VMX non-root mode.  In root mode /
     1973         *        non-VMX mode, I have not see any evidence in the intel manuals that
     1974         *        NMIs are not blocked when in an interrupt shadow. Section "6.7
     1975         *        NONMASKABLE INTERRUPT (NMI)" in SDM 3A seems pretty clear to me.
     1976         */
     1977        if (!(pVCpu->cpum.s.Guest.fInhibit & CPUMCTX_INHIBIT_NMI))
    19681978            return CPUMINTERRUPTIBILITY_INT_INHIBITED;
    1969 
    1970         return CPUMINTERRUPTIBILITY_INT_DISABLED;
     1979        return CPUMINTERRUPTIBILITY_NMI_INHIBIT;
    19711980    }
    19721981    return CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT;
     
    20022011    }
    20032012#endif
    2004 }
    2005 
    2006 
    2007 /**
    2008  * Gets whether the guest (or nested-guest) is currently blocking delivery of NMIs.
    2009  *
    2010  * @returns @c true if NMIs are blocked, @c false otherwise.
    2011  * @param   pVCpu       The cross context virtual CPU structure.
    2012  */
    2013 VMM_INT_DECL(bool) CPUMIsGuestNmiBlocking(PCVMCPU pVCpu)
    2014 {
    2015     /*
    2016      * Return the state of guest-NMI blocking in any of the following cases:
    2017      *   - We're not executing a nested-guest.
    2018      *   - We're executing an SVM nested-guest[1].
    2019      *   - We're executing a VMX nested-guest without virtual-NMIs enabled.
    2020      *
    2021      * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
    2022      *        SVM hypervisors must track NMI blocking themselves by intercepting
    2023      *        the IRET instruction after injection of an NMI.
    2024      */
    2025     if (   !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)
    2026         || !CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.s.Guest, VMX_PIN_CTLS_VIRT_NMI))
    2027         return VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2028 
    2029     /*
    2030      * Return the state of virtual-NMI blocking, if we are executing a
    2031      * VMX nested-guest with virtual-NMIs enabled.
    2032      */
    2033     return CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.s.Guest);
    2034 }
    2035 
    2036 
    2037 /**
    2038  * Sets blocking delivery of NMIs to the guest.
    2039  *
    2040  * @param   pVCpu   The cross context virtual CPU structure.
    2041  * @param   fBlock  Whether NMIs are blocked or not.
    2042  */
    2043 VMM_INT_DECL(void) CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock)
    2044 {
    2045     /*
    2046      * Set the state of guest-NMI blocking in any of the following cases:
    2047      *   - We're not executing a nested-guest.
    2048      *   - We're executing an SVM nested-guest[1].
    2049      *   - We're executing a VMX nested-guest without virtual-NMIs enabled.
    2050      *
    2051      * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
    2052      *        SVM hypervisors must track NMI blocking themselves by intercepting
    2053      *        the IRET instruction after injection of an NMI.
    2054      */
    2055     if (   !CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.s.Guest)
    2056         || !CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.s.Guest, VMX_PIN_CTLS_VIRT_NMI))
    2057     {
    2058         if (fBlock == VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    2059         { /* probably likely */ }
    2060         else if (fBlock)
    2061             VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2062         else
    2063             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2064         return;
    2065     }
    2066 
    2067     /*
    2068      * Set the state of virtual-NMI blocking, if we are executing a
    2069      * VMX nested-guest with virtual-NMIs enabled.
    2070      */
    2071     return CPUMSetGuestVmxVirtNmiBlocking(&pVCpu->cpum.s.Guest, fBlock);
    20722013}
    20732014
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r96407 r97178  
    7777    Assert(pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI && enmNewState == EMSTATE_HALTED);
    7878    pVCpu->em.s.enmState = enmNewState;
    79 }
    80 
    81 
    82 /**
    83  * Sets the PC for which interrupts should be inhibited.
    84  *
    85  * @param   pVCpu       The cross context virtual CPU structure.
    86  * @param   PC          The PC.
    87  */
    88 VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC)
    89 {
    90     pVCpu->em.s.GCPtrInhibitInterrupts = PC;
    91     VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    92 }
    93 
    94 
    95 /**
    96  * Gets the PC for which interrupts should be inhibited.
    97  *
    98  * There are a few instructions which inhibits or delays interrupts
    99  * for the instruction following them. These instructions are:
    100  *      - STI
    101  *      - MOV SS, r/m16
    102  *      - POP SS
    103  *
    104  * @returns The PC for which interrupts should be inhibited.
    105  * @param   pVCpu       The cross context virtual CPU structure.
    106  *
    107  */
    108 VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu)
    109 {
    110     return pVCpu->em.s.GCPtrInhibitInterrupts;
    111 }
    112 
    113 
    114 /**
    115  * Checks if interrupt inhibiting is enabled for the current instruction.
    116  *
    117  * @returns true if interrupts are inhibited, false if not.
    118  * @param   pVCpu       The cross context virtual CPU structure.
    119  */
    120 VMMDECL(bool) EMIsInhibitInterruptsActive(PVMCPU pVCpu)
    121 {
    122     if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    123         return false;
    124     if (pVCpu->em.s.GCPtrInhibitInterrupts == CPUMGetGuestRIP(pVCpu))
    125         return true;
    126     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    127     return false;
    12879}
    12980
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r97156 r97178  
    36603660     * Normally, NMI blocking is in effect whenever we inject an NMI.
    36613661     */
    3662     bool fBlockNmi;
    3663     if (   u8Vector == X86_XCPT_NMI
    3664         && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
    3665         fBlockNmi = true;
    3666     else
    3667         fBlockNmi = false;
     3662    bool fBlockNmi = u8Vector == X86_XCPT_NMI
     3663                  && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
    36683664
    36693665#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    37103706     * Set NMI blocking if necessary.
    37113707     */
    3712     if (   fBlockNmi
    3713         && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    3714         VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3708    if (fBlockNmi)
     3709        CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
    37153710
    37163711    /*
     
    96049599         */
    96059600        else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
    9606                  && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     9601                 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    96079602                 && !TRPMHasTrap(pVCpu))
    96089603        {
     
    96269621    {
    96279622        rcStrict = iemVmxApicWriteEmulation(pVCpu);
    9628         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     9623        Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    96299624        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
    96309625    }
     
    96339628    {
    96349629        rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
    9635         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     9630        Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    96369631        Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    96379632    }
     
    97149709    if (   fExecuteInhibit
    97159710        && rcStrict == VINF_SUCCESS
    9716         && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    9717         && EMIsInhibitInterruptsActive(pVCpu))
     9711        && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    97189712    {
    97199713        rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
     
    97599753        else if (pVCpu->iem.s.cActiveMappings > 0)
    97609754            iemMemRollback(pVCpu);
    9761         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
     9755        /** @todo drop this after we bake this change into RIP advancing. */
     9756        CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
    97629757    }
    97639758
     
    1001710012    if (   fIntrEnabled
    1001810013        && TRPMHasTrap(pVCpu)
    10019         && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
     10014        && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    1002010015    {
    1002110016        uint8_t     u8TrapNo;
     
    1010210097                                                      | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    1010310098                                                      | VMCPU_FF_TLB_FLUSH
    10104                                                       | VMCPU_FF_INHIBIT_INTERRUPTS
    10105                                                       | VMCPU_FF_BLOCK_NMIS
    1010610099                                                      | VMCPU_FF_UNHALT );
    1010710100
     
    1028410277                                                      | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    1028510278                                                      | VMCPU_FF_TLB_FLUSH
    10286                                                       | VMCPU_FF_INHIBIT_INTERRUPTS
    10287                                                       | VMCPU_FF_BLOCK_NMIS
    1028810279                                                      | VMCPU_FF_UNHALT );
    1028910280                        if (RT_LIKELY(   (   (   !fCpu
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

    r97126 r97178  
    38643864IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
    38653865{
    3866     bool fBlockingNmi = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3866    bool fBlockingNmi = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
    38673867
    38683868#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    39133913     */
    39143914    if (fBlockingNmi)
    3915         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3915        CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
    39163916
    39173917    /*
     
    47034703{
    47044704    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
    4705     if (rcStrict == VINF_SUCCESS)
    4706     {
    4707         if (iSegReg == X86_SREG_SS)
    4708             EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
    4709     }
     4705    if (iSegReg == X86_SREG_SS && rcStrict == VINF_SUCCESS)
     4706        CPUMSetInInterruptShadow(&pVCpu->cpum.GstCtx);
    47104707    return rcStrict;
    47114708}
     
    47654762        pVCpu->cpum.GstCtx.rsp = TmpRsp.u;
    47664763        if (iSegReg == X86_SREG_SS)
    4767             EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
     4764            CPUMSetInInterruptShadow(&pVCpu->cpum.GstCtx);
    47684765    }
    47694766    return rcStrict;
     
    75157512    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    75167513    if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF))
    7517         EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
     7514        CPUMSetInInterruptShadow(&pVCpu->cpum.GstCtx);
    75187515    Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
    75197516    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp

    r96821 r97178  
    245245            pVmcbMemCtrl->IntCtrl.n.u8VTPR = pVmcbCtrl->IntCtrl.n.u8VTPR;           /* V_TPR. */
    246246
    247             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)              /* Interrupt shadow. */
    248                 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
     247            if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))            /* Interrupt shadow. */
     248                pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;
     249            else
    249250            {
    250251                pVmcbMemCtrl->IntShadow.n.u1IntShadow = 1;
    251                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    252252                LogFlow(("iemSvmVmexit: Interrupt shadow till %#RX64\n", pVCpu->cpum.GstCtx.rip));
     253                CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
    253254            }
    254             else
    255                 pVmcbMemCtrl->IntShadow.n.u1IntShadow = 0;
    256255
    257256            /*
     
    323322
    324323        /*
    325          * Restore the subset of force-flags that were preserved.
    326          */
    327         if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
    328         {
    329             VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
    330             pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
    331         }
     324         * Restore the subset of the inhibit flags that were preserved.
     325         */
     326        pVCpu->cpum.GstCtx.fInhibit |= pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit;
    332327
    333328        if (rcStrict == VINF_SUCCESS)
     
    734729         * preserve VMCPU_FF_INHIBIT_INTERRUPTS.
    735730         */
    736         pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
    737         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     731        pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = pVCpu->cpum.GstCtx.fInhibit & CPUMCTX_INHIBIT_NMI;
     732        pVCpu->cpum.GstCtx.fInhibit            &=                              ~CPUMCTX_INHIBIT_NMI;
    738733
    739734        /*
     
    754749            LogFlow(("iemSvmVmrun: setting interrupt shadow. inhibit PC=%#RX64\n", pVmcbNstGst->u64RIP));
    755750            /** @todo will this cause trouble if the nested-guest is 64-bit but the guest is 32-bit? */
    756             EMSetInhibitInterruptsPC(pVCpu, pVmcbNstGst->u64RIP);
     751            CPUMSetInInterruptShadowEx(&pVCpu->cpum.GstCtx, pVmcbNstGst->u64RIP);
    757752        }
    758753
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp

    r97157 r97178  
    11881188{
    11891189    /* We shouldn't be called multiple times during VM-entry. */
    1190     Assert(pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions == 0);
     1190    Assert(pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit == 0);
    11911191
    11921192    /* MTF should not be set outside VMX non-root mode. */
     
    12211221     * the nested-guest.
    12221222     */
    1223     pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;
     1223    pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = pVCpu->cpum.GstCtx.fInhibit & CPUMCTX_INHIBIT_NMI;
    12241224}
    12251225
     
    12321232static void iemVmxVmexitRestoreNmiBlockingFF(PVMCPUCC pVCpu) RT_NOEXCEPT
    12331233{
    1234     if (pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions)
    1235     {
    1236         VMCPU_FF_SET_MASK(pVCpu, pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions);
    1237         pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions = 0;
    1238     }
     1234    /** @todo r=bird: why aren't we clearing the nested guest flags first here?
     1235     *        If there is some other code doing that already, it would be great
     1236     *        to point to it here... */
     1237    pVCpu->cpum.GstCtx.fInhibit |= pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit;
     1238    pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit = 0;
    12391239}
    12401240
     
    14661466    else
    14671467    {
    1468         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     1468        if (CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    14691469            pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
    14701470    }
    14711471
    14721472    /* Blocking-by-STI. */
    1473     if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    1474         && pVCpu->cpum.GstCtx.rip == EMGetInhibitInterruptsPC(pVCpu))
     1473    if (!(pVCpu->cpum.GstCtx.fInhibit & CPUMCTX_INHIBIT_SHADOW))
     1474    { /* probable */}
     1475    else
    14751476    {
    14761477        /** @todo NSTVMX: We can't distinguish between blocking-by-MovSS and blocking-by-STI
    14771478         *        currently. */
    1478         pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
     1479        if (pVCpu->cpum.GstCtx.rip == pVCpu->cpum.GstCtx.uRipInhibitInt)
     1480            pVmcs->u32GuestIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_STI; /** @todo r=bird: Why the STI one? MOVSS seems to block more and the one to use. */
    14791481
    14801482        /* Clear inhibition unconditionally since we've ensured it isn't set prior to executing VMLAUNCH/VMRESUME. */
    1481         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     1483        CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
    14821484    }
    14831485    /* Nothing to do for SMI/enclave. We don't support enclaves or SMM yet. */
     
    25642566
    25652567        /* Clear any saved NMI-blocking state so we don't assert on next VM-entry (if it was in effect on the previous one). */
    2566         pVCpu->cpum.GstCtx.hwvirt.fLocalForcedActions &= ~VMCPU_FF_BLOCK_NMIS;
     2568        pVCpu->cpum.GstCtx.hwvirt.fSavedInhibit &= ~CPUMCTX_INHIBIT_NMI;
    25672569    }
    25682570    else
     
    69966998    if (   !fEntryVectoring
    69976999        && (pVmcs->u32GuestIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS)))
    6998         EMSetInhibitInterruptsPC(pVCpu, pVmcs->u64GuestRip.u);
     7000        CPUMSetInInterruptShadowEx(&pVCpu->cpum.GstCtx, pVmcs->u64GuestRip.u);
    69997001    else
    7000         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     7002        Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    70017003
    70027004    /* NMI blocking. */
     
    70087010        {
    70097011            pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking = false;
    7010             if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    7011                 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     7012            CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
    70127013        }
    70137014    }
     
    77297730    /** @todo Distinguish block-by-MovSS from block-by-STI. Currently we
    77307731     *        use block-by-STI here which is not quite correct. */
    7731     if (   !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    7732         ||  pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
     7732    if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
    77337733    { /* likely */ }
    77347734    else
  • trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h

    r96407 r97178  
    368368    {
    369369        ADD_REG64(WHvRegisterInterruptState, 0);
    370         if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    371             && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
     370        if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    372371            aValues[iReg - 1].InterruptState.InterruptShadow = 1;
    373         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    374             aValues[iReg - 1].InterruptState.NmiMasked = 1;
     372        aValues[iReg - 1].InterruptState.NmiMasked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
    375373    }
    376374    else if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT)
    377375    {
    378376        if (   pVCpu->nem.s.fLastInterruptShadow
    379             || (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    380                 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip))
     377            || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    381378        {
    382379            ADD_REG64(WHvRegisterInterruptState, 0);
    383             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    384                 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip)
     380            if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    385381                aValues[iReg - 1].InterruptState.InterruptShadow = 1;
    386382            /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
     
    991987
    992988        if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT))
    993         {
    994             pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
    995             if (aValues[iReg].InterruptState.InterruptShadow)
    996                 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
    997             else
    998                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    999         }
     989            pVCpu->nem.s.fLastInterruptShadow = CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx,
     990                                                                            aValues[iReg].InterruptState.InterruptShadow,
     991                                                                            aValues[iReg + 1].Reg64);
    1000992
    1001993        if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI))
    1002         {
    1003             if (aValues[iReg].InterruptState.NmiMasked)
    1004                 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    1005             else
    1006                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
    1007         }
     994            CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, aValues[iReg].InterruptState.NmiMasked);
    1008995
    1009996        fWhat |= CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI;
     
    12791266    pVCpu->cpum.GstCtx.rip += pExitCtx->InstructionLength;
    12801267    pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
    1281 
    1282     /* Update interrupt inhibition. */
    1283     if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    1284     { /* likely */ }
    1285     else if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
    1286         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     1268    CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
    12871269}
    12881270
     
    14711453    Assert(   (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT))
    14721454           ==                              (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_INHIBIT_INT));
     1455
    14731456    NEM_WIN_COPY_BACK_SEG(pVCpu->cpum.GstCtx.cs, pExitCtx->Cs);
    14741457    pVCpu->cpum.GstCtx.rip      = pExitCtx->Rip;
    14751458    pVCpu->cpum.GstCtx.rflags.u = pExitCtx->Rflags;
    1476 
    1477     pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
    1478     if (!pExitCtx->ExecutionState.InterruptShadow)
    1479     {
    1480         if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    1481         { /* likely */ }
    1482         else
    1483             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    1484     }
    1485     else
    1486         EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
    1487 
     1459    pVCpu->nem.s.fLastInterruptShadow = CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx,
     1460                                                                    pExitCtx->ExecutionState.InterruptShadow,
     1461                                                                    pExitCtx->Rip);
    14881462    APICSetTpr(pVCpu, pExitCtx->Cr8 << 4);
    14891463
     
    20812055    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    20822056    pCtx->rip += cb;
    2083 
    2084     /* Update interrupt shadow. */
    2085     if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    2086         && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    2087         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     2057    /** @todo Why not clear RF too? */
     2058    CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
    20882059}
    20892060
     
    24502421            return rcStrict;
    24512422    }
    2452     bool const fInhibitInterrupts = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    2453                                  && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip;
    24542423
    24552424    /*
     
    24582427    if (fPendingNmi)
    24592428    {
    2460         if (   !fInhibitInterrupts
    2461             && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     2429        if (   !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
     2430            && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    24622431        {
    24632432            VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
     
    24792448    if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    24802449    {
    2481         if (   !fInhibitInterrupts
     2450        /** @todo check NMI inhibiting here too! */
     2451        if (   !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    24822452            && pVCpu->cpum.GstCtx.rflags.Bits.u1IF)
    24832453        {
  • trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h

    r97159 r97178  
    16981698static uint32_t vmxHCGetGuestIntrStateAndUpdateFFs(PVMCPUCC pVCpu)
    16991699{
     1700    uint32_t fIntrState = 0;
     1701
    17001702    /*
    17011703     * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS.
    17021704     */
    1703     uint32_t fIntrState = 0;
    1704     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     1705    if (CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
    17051706    {
    17061707        /* If inhibition is active, RIP and RFLAGS should've been imported from the VMCS already. */
    17071708        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
    17081709
    1709         PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1710         if (pCtx->rip == EMGetInhibitInterruptsPC(pVCpu))
    1711         {
    1712             if (pCtx->eflags.Bits.u1IF)
    1713                 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
    1714             else
    1715                 fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
    1716         }
    1717         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    1718         {
    1719             /*
    1720              * We can clear the inhibit force flag as even if we go back to the recompiler
    1721              * without executing guest code in VT-x, the flag's condition to be cleared is
    1722              * met and thus the cleared state is correct.
    1723              */
    1724             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    1725         }
     1710        /** @todo r=bird: This heuristic isn't all that correct, it would be safer
     1711         * to always use MOVSS here.  Best deal would be to track both bits in CPUM. */
     1712        if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
     1713            fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
     1714        else
     1715            fIntrState = VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS;
    17261716    }
    17271717
     
    17291719     * Check if we should inhibit NMI delivery.
    17301720     */
    1731     if (CPUMIsGuestNmiBlocking(pVCpu))
     1721    if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
    17321722        fIntrState |= VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI;
    17331723
     
    33803370
    33813371/**
     3372 * Worker for vmxHCImportGuestIntrState that handles the case where any of the
     3373 * relevant VMX_VMCS32_GUEST_INT_STATE bits are set.
     3374 */
     3375DECL_NO_INLINE(static,void) vmxHCImportGuestIntrStateSlow(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo, uint32_t fGstIntState)
     3376{
     3377    /*
     3378     * We must import RIP here to set our EM interrupt-inhibited state.
     3379     * We also import RFLAGS as our code that evaluates pending interrupts
     3380     * before VM-entry requires it.
     3381     */
     3382    vmxHCImportGuestRip(pVCpu);
     3383    vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
     3384
     3385    CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx,
     3386                                RT_BOOL(fGstIntState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)),
     3387                                pVCpu->cpum.GstCtx.rip);
     3388    CPUMUpdateInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx, RT_BOOL(fGstIntState & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI));
     3389}
     3390
     3391
     3392/**
    33823393 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU
    33833394 * context.
     
    33993410    if (!u32Val)
    34003411    {
    3401         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    3402             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    3403 /** @todo r=bird: This is a call which isn't necessary most of the time, this
    3404  * path is taken on basically all exits. Try find a way to eliminating it. */
    3405         CPUMSetGuestNmiBlocking(pVCpu, false);
     3412        CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
     3413        CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
    34063414    }
    34073415    else
    3408     {
    3409 /** @todo consider this branch for non-inlining. */
    3410         /*
    3411          * We must import RIP here to set our EM interrupt-inhibited state.
    3412          * We also import RFLAGS as our code that evaluates pending interrupts
    3413          * before VM-entry requires it.
    3414          */
    3415         vmxHCImportGuestRip(pVCpu);
    3416         vmxHCImportGuestRFlags(pVCpu, pVmcsInfo);
    3417 
    3418         if (u32Val & (VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS | VMX_VMCS_GUEST_INT_STATE_BLOCK_STI))
    3419             EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
    3420         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    3421             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    3422 
    3423         bool const fNmiBlocking = RT_BOOL(u32Val & VMX_VMCS_GUEST_INT_STATE_BLOCK_NMI);
    3424         CPUMSetGuestNmiBlocking(pVCpu, fNmiBlocking);
    3425     }
     3416        vmxHCImportGuestIntrStateSlow(pVCpu, pVmcsInfo, u32Val);
    34263417}
    34273418
     
    49274918     */
    49284919    if (   !VCPU_2_VMXSTATE(pVCpu).Event.fPending
    4929         && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     4920        && !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
    49304921    {
    49314922        /** @todo SMI. SMIs take priority over NMIs. */
     
    49474938             * the nested-hypervisor is using virtual-NMIs.
    49484939             */
    4949             if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     4940            if (!CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    49504941            {
    49514942#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    49614952                return VINF_SUCCESS;
    49624953            }
    4963             else if (!fIsNestedGuest)
     4954            if (!fIsNestedGuest)
    49644955                vmxHCSetNmiWindowExitVmcs(pVCpu, pVmcsInfo);
    49654956        }
     
    61636154    pVCpu->cpum.GstCtx.rip += cbInstr;
    61646155    ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_RIP);
    6165 
    6166     /* Update interrupt inhibition. */
    6167     if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    6168         && pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
    6169         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6156    CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
     6157    /** @todo clear RF? */
    61706158}
    61716159
     
    62916279            && enmRaise == IEMXCPTRAISE_PREV_EVENT
    62926280            && (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    6293             && CPUMIsGuestNmiBlocking(pVCpu))
    6294         {
    6295             CPUMSetGuestNmiBlocking(pVCpu, false);
    6296         }
     6281            && CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
     6282            CPUMClearInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
    62976283
    62986284        switch (enmRaise)
     
    63766362    }
    63776363    else if (   (pVmcsInfo->u32PinCtls & VMX_PIN_CTLS_VIRT_NMI)
    6378              && !CPUMIsGuestNmiBlocking(pVCpu))
     6364             && !CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
    63796365    {
    63806366        if (    VMX_EXIT_INT_INFO_IS_VALID(uExitIntInfo)
     
    63896375             * See Intel spec. 31.7.1.2 "Resuming Guest Software After Handling An Exception".
    63906376             */
    6391             CPUMSetGuestNmiBlocking(pVCpu, true);
     6377            CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
    63926378            Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
    63936379        }
     
    64066392            if (VMX_EXIT_QUAL_EPT_IS_NMI_UNBLOCK_IRET(pVmxTransient->uExitQual))
    64076393            {
    6408                 CPUMSetGuestNmiBlocking(pVCpu, true);
     6394                CPUMSetInterruptInhibitingByNmiEx(&pVCpu->cpum.GstCtx);
    64096395                Log4Func(("Set NMI blocking. uExitReason=%u\n", pVmxTransient->uExitReason));
    64106396            }
     
    76547640    }
    76557641
    7656     Assert(!CPUMIsGuestNmiBlocking(pVCpu));
     7642    Assert(!CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx));
    76577643
    76587644    /*
     
    76667652    if (fIntrState & VMX_VMCS_GUEST_INT_STATE_BLOCK_STI)
    76677653    {
    7668         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    7669             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     7654        CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
    76707655
    76717656        fIntrState &= ~VMX_VMCS_GUEST_INT_STATE_BLOCK_STI;
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r96811 r97178  
    26922692
    26932693        if (fWhat & CPUMCTX_EXTRN_INHIBIT_INT)
    2694         {
    2695             if (pVmcbCtrl->IntShadow.n.u1IntShadow)
    2696                 EMSetInhibitInterruptsPC(pVCpu, pVmcbGuest->u64RIP);
    2697             else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    2698                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    2699         }
     2694            CPUMUpdateInterruptShadowEx(pCtx, pVmcbCtrl->IntShadow.n.u1IntShadow, pVmcbGuest->u64RIP);
    27002695
    27012696        if (fWhat & CPUMCTX_EXTRN_RIP)
     
    35043499
    35053500/**
    3506  * Checks if the guest (or nested-guest) has an interrupt shadow active right
    3507  * now.
    3508  *
    3509  * @returns @c true if the interrupt shadow is active, @c false otherwise.
    3510  * @param   pVCpu   The cross context virtual CPU structure.
    3511  *
    3512  * @remarks No-long-jump zone!!!
    3513  * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
    3514  */
    3515 static bool hmR0SvmIsIntrShadowActive(PVMCPUCC pVCpu)
    3516 {
    3517     /*
    3518      * Instructions like STI and MOV SS inhibit interrupts till the next instruction
    3519      * completes. Check if we should inhibit interrupts or clear any existing
    3520      * interrupt inhibition.
    3521      */
    3522     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    3523     {
    3524         if (pVCpu->cpum.GstCtx.rip != EMGetInhibitInterruptsPC(pVCpu))
    3525         {
    3526             /*
    3527              * We can clear the inhibit force flag as even if we go back to the recompiler
    3528              * without executing guest code in AMD-V, the flag's condition to be cleared is
    3529              * met and thus the cleared state is correct.
    3530              */
    3531             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    3532             return false;
    3533         }
    3534         return true;
    3535     }
    3536     return false;
    3537 }
    3538 
    3539 
    3540 /**
    35413501 * Sets the virtual interrupt intercept control in the VMCB.
    35423502 *
     
    36123572
    36133573    bool const fGif        = CPUMGetGuestGif(pCtx);
    3614     bool const fIntShadow  = hmR0SvmIsIntrShadowActive(pVCpu);
    3615     bool const fBlockNmi   = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
     3574    bool const fIntShadow  = CPUMIsInInterruptShadowWithUpdate(pCtx);
     3575    bool const fBlockNmi   = CPUMAreInterruptsInhibitedByNmi(pCtx);
    36163576
    36173577    Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n",
     
    37343694    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    37353695
    3736     bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu);
     3696    bool const fIntShadow = CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx);
    37373697#ifdef VBOX_STRICT
    37383698    PCCPUMCTX  pCtx       = &pVCpu->cpum.GstCtx;
     
    37823742         * VM-exit to determine the state.
    37833743         */
    3784         if (    Event.n.u3Type   == SVM_EVENT_NMI
    3785             &&  Event.n.u8Vector == X86_XCPT_NMI
    3786             && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    3787         {
    3788             VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    3789         }
     3744        if (   Event.n.u3Type   == SVM_EVENT_NMI
     3745            && Event.n.u8Vector == X86_XCPT_NMI)
     3746            CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
    37903747
    37913748        /*
     
    38093766     * but we still need to intercept IRET in order to eventually clear NMI inhibition.
    38103767     */
    3811     if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     3768    if (CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    38123769        hmR0SvmSetCtrlIntercept(pVmcb, SVM_CTRL_INTERCEPT_IRET);
    38133770
     
    67116668                    /* If we are re-injecting an NMI, clear NMI blocking. */
    67126669                    if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)
    6713                         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     6670                        CPUMClearInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
    67146671
    67156672                    /* Determine a vectoring #PF condition, see comment in hmR0SvmExitXcptPF(). */
     
    68026759DECLINLINE(void) hmR0SvmAdvanceRip(PVMCPUCC pVCpu, uint32_t cb)
    68036760{
    6804     PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    6805     pCtx->rip += cb;
    6806 
    6807     /* Update interrupt shadow. */
    6808     if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    6809         && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    6810         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     6761    pVCpu->cpum.GstCtx.rip += cb;
     6762    CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
     6763    /** @todo clear RF. */
    68116764}
    68126765
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r97150 r97178  
    158158 */
    159159#define CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID    RT_BIT(12)
     160
     161
     162/** For saved state only: Block injection of non-maskable interrupts to the guest.
     163 * @note This flag was moved to CPUMCTX::fInhibit in v7.0.2. */
     164#define CPUM_OLD_VMCPU_FF_BLOCK_NMIS            RT_BIT_64(25)
    160165
    161166
     
    25412546            SSMR3PutMem(pSSM,   &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap));
    25422547            SSMR3PutMem(pSSM,   &pGstCtx->hwvirt.svm.abIoBitmap[0],  sizeof(pGstCtx->hwvirt.svm.abIoBitmap));
    2543             SSMR3PutU32(pSSM,    pGstCtx->hwvirt.fLocalForcedActions);
     2548            /* This is saved in the old VMCPUM_FF format.  Change if more flags are added. */
     2549            SSMR3PutU32(pSSM,    pGstCtx->hwvirt.fSavedInhibit & CPUMCTX_INHIBIT_NMI ? CPUM_OLD_VMCPU_FF_BLOCK_NMIS : 0);
    25442550            SSMR3PutBool(pSSM,   pGstCtx->hwvirt.fGif);
    25452551        }
     
    28272833                        SSMR3GetMem(pSSM,      &pGstCtx->hwvirt.svm.abMsrBitmap[0], sizeof(pGstCtx->hwvirt.svm.abMsrBitmap));
    28282834                        SSMR3GetMem(pSSM,      &pGstCtx->hwvirt.svm.abIoBitmap[0],  sizeof(pGstCtx->hwvirt.svm.abIoBitmap));
    2829                         SSMR3GetU32(pSSM,      &pGstCtx->hwvirt.fLocalForcedActions);
     2835
     2836                        uint32_t fSavedLocalFFs = 0;
     2837                        rc = SSMR3GetU32(pSSM,      &fSavedLocalFFs);
     2838                        AssertRCReturn(rc, rc);
     2839                        Assert(fSavedLocalFFs == 0 || fSavedLocalFFs == CPUM_OLD_VMCPU_FF_BLOCK_NMIS);
     2840                        pGstCtx->hwvirt.fSavedInhibit = fSavedLocalFFs & CPUM_OLD_VMCPU_FF_BLOCK_NMIS ? CPUMCTX_INHIBIT_NMI : 0;
     2841
    28302842                        SSMR3GetBool(pSSM,     &pGstCtx->hwvirt.fGif);
    28312843                    }
     
    40004012
    40014013    pHlp->pfnPrintf(pHlp, "VCPU[%u] hardware virtualization state:\n", pVCpu->idCpu);
    4002     pHlp->pfnPrintf(pHlp, "fLocalForcedActions          = %#RX32\n",  pCtx->hwvirt.fLocalForcedActions);
     4014    pHlp->pfnPrintf(pHlp, "fSavedInhibit                = %#RX32\n",  pCtx->hwvirt.fSavedInhibit);
    40034015    pHlp->pfnPrintf(pHlp, "In nested-guest hwvirt mode  = %RTbool\n", CPUMIsGuestInNestedHwvirtMode(pCtx));
    40044016
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r96894 r97178  
    4747*********************************************************************************************************************************/
    4848#define LOG_GROUP LOG_GROUP_EM
    49 #define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
     49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
    5050#include <VBox/vmm/em.h>
    5151#include <VBox/vmm/vmm.h>
     
    17211721         * The instruction following an emulated STI should *always* be executed!
    17221722         *
    1723          * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
     1723         * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
    17241724         *       the eip is the same as the inhibited instr address.  Before we
    17251725         *       are able to execute this instruction in raw mode (iret to
     
    17291729         *       unlikely, but such timing sensitive problem are not as rare as
    17301730         *       you might think.
    1731          */
    1732         if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
     1731         *
     1732         * Note! This used to be a force action flag. Can probably ditch this code.
     1733         */
     1734        if (   CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    17331735            && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
    17341736        {
    1735             CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
    1736             if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
     1737            CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
     1738            if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
    17371739            {
    1738                 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
    1739                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     1740                CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
     1741                Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
     1742                     (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
    17401743            }
    17411744            else
    1742                 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
     1745                Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
    17431746        }
    17441747
     
    17921795        Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
    17931796        bool fWakeupPending = false;
    1794         if (   !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
     1797        if (    VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
     1798                                         | VMCPU_FF_INTERRUPT_NMI  | VMCPU_FF_INTERRUPT_NESTED_GUEST
     1799                                         | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
     1800            && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
    17951801            && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
    1796             && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)  /* Interrupt shadows block both NMIs and interrupts. */
    1797             && !TRPMHasTrap(pVCpu))                                  /* An event could already be scheduled for dispatching. */
    1798         {
    1799             bool fInVmxNonRootMode;
    1800             bool fInSvmHwvirtMode;
    1801             bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
    1802             if (fInNestedGuest)
     1802            && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)             /* Interrupt shadows block both NMIs and interrupts. */
     1803            /** @todo r=bird: But interrupt shadows probably do not block vmexits due to host interrupts... */
     1804            && !TRPMHasTrap(pVCpu))                                      /* An event could already be scheduled for dispatching. */
     1805        {
     1806            if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
    18031807            {
    1804                 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
    1805                 fInSvmHwvirtMode  = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
    1806             }
    1807             else
    1808             {
    1809                 fInVmxNonRootMode = false;
    1810                 fInSvmHwvirtMode  = false;
    1811             }
    1812 
    1813             bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
    1814             if (fGif)
    1815             {
     1808                bool fInVmxNonRootMode;
     1809                bool fInSvmHwvirtMode;
     1810                if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
     1811                {
     1812                    fInVmxNonRootMode = false;
     1813                    fInSvmHwvirtMode  = false;
     1814                }
     1815                else
     1816                {
     1817                    fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
     1818                    fInSvmHwvirtMode  = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
     1819                }
     1820
     1821                if (0)
     1822                { }
    18161823#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    18171824                /*
     
    18241831                 * See Intel spec. 26.7.6 "NMI-Window Exiting".
    18251832                 */
    1826                 if (    VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
    1827                     && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
     1833                else if (    VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
     1834                         && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
    18281835                {
    18291836                    Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
     
    18351842                    UPDATE_RC();
    18361843                }
    1837                 else
    18381844#endif
    18391845                /*
    18401846                 * NMIs (take priority over external interrupts).
    18411847                 */
    1842                 if (    VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
    1843                     && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
     1848                else if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
     1849                         && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    18441850                {
    18451851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
     
    19741980                    }
    19751981                }
    1976             }
     1982            } /* CPUMGetGuestGif */
    19771983        }
    19781984
     
    20802086        /* check that we got them all  */
    20812087        AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
    2082         AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
     2088        AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
    20832089    }
    20842090
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux.cpp

    r96407 r97178  
    14761476            pVCpu->cpum.GstCtx.rip = pRun->s.regs.regs.rip;
    14771477
    1478         if (KvmEvents.interrupt.shadow)
    1479             EMSetInhibitInterruptsPC(pVCpu, pVCpu->cpum.GstCtx.rip);
    1480         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    1481             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    1482 
    1483         if (KvmEvents.nmi.masked)
    1484             VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    1485         else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    1486             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     1478        CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx, KvmEvents.interrupt.shadow != 0, pVCpu->cpum.GstCtx.rip);
     1479        CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, KvmEvents.nmi.masked != 0);
    14871480
    14881481        if (KvmEvents.interrupt.injected)
     
    18691862
    18701863        KvmEvents.flags = KVM_VCPUEVENT_VALID_SHADOW;
    1871         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    1872         {
    1873             if (pRun->s.regs.regs.rip == EMGetInhibitInterruptsPC(pVCpu))
    1874                 KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI;
    1875             else
    1876                 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    1877         }
     1864        if (!CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx))
     1865        { /* probably likely */ }
     1866        else
     1867            KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI;
    18781868
    18791869        /* No flag - this is updated unconditionally. */
    1880         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    1881             KvmEvents.nmi.masked = 1;
     1870        KvmEvents.nmi.masked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
    18821871
    18831872        if (TRPMHasTrap(pVCpu))
     
    20592048    KvmEvents.flags |= KVM_VCPUEVENT_VALID_SHADOW;
    20602049    if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_INT))
    2061     {
    2062         if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    2063             KvmEvents.interrupt.shadow = 0;
    2064         else if (EMGetInhibitInterruptsPC(pVCpu) == pRun->s.regs.regs.rip)
    2065             KvmEvents.interrupt.shadow = KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI;
    2066         else
    2067         {
    2068             VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
    2069             KvmEvents.interrupt.shadow = 0;
    2070         }
    2071     }
    2072     else if (KvmEvents.interrupt.shadow)
    2073         EMSetInhibitInterruptsPC(pVCpu, pRun->s.regs.regs.rip);
    2074     else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    2075         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     2050        KvmEvents.interrupt.shadow = !CPUMIsInInterruptShadowWithUpdate(&pVCpu->cpum.GstCtx) ? 0
     2051                                   : KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI;
     2052    else
     2053        CPUMUpdateInterruptShadowEx(&pVCpu->cpum.GstCtx, KvmEvents.interrupt.shadow != 0, pRun->s.regs.regs.rip);
    20762054
    20772055    if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_INHIBIT_NMI))
    2078         KvmEvents.nmi.masked = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS) ? 1 : 0;
    2079     else if (KvmEvents.nmi.masked)
    2080         VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    2081     else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    2082         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
     2056        KvmEvents.nmi.masked = CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx);
     2057    else
     2058        CPUMUpdateInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx, KvmEvents.nmi.masked != 0);
    20832059
    20842060    /* KVM will own the INT + NMI inhibit state soon: */
  • trunk/src/VBox/VMM/VMMR3/TRPM.cpp

    r96407 r97178  
    368368{
    369369    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
    370     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
     370    Assert(!CPUMIsInInterruptShadow(pCtx));
    371371    Assert(pfInjected);
    372372    *pfInjected = false;
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r96925 r97178  
    25532553        PRINT_FLAG(VMCPU_FF_,PGM_SYNC_CR3_NON_GLOBAL);
    25542554        PRINT_FLAG(VMCPU_FF_,TLB_FLUSH);
    2555         PRINT_FLAG(VMCPU_FF_,INHIBIT_INTERRUPTS);
    2556         PRINT_FLAG(VMCPU_FF_,BLOCK_NMIS);
    25572555        PRINT_FLAG(VMCPU_FF_,TO_R3);
    25582556        PRINT_FLAG(VMCPU_FF_,IOM);
     
    25612559        else
    25622560            pHlp->pfnPrintf(pHlp, "\n");
    2563 
    2564         if (fLocalForcedActions & VMCPU_FF_INHIBIT_INTERRUPTS)
    2565             pHlp->pfnPrintf(pHlp, "    intr inhibit RIP: %RGp\n", EMGetInhibitInterruptsPC(pVCpu));
    25662561
    25672562        /* the groups */
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r97150 r97178  
    122122    ;
    123123    ; Guest context state
    124     ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.)
    125124    ;
    126125    .Guest                      resq    0
     
    183182    .Guest.gs.u32Limit          resd    1
    184183    .Guest.gs.Attr              resd    1
    185     .Guest.eip                  resq    1
    186     .Guest.eflags               resq    1
    187     .Guest.cr0                  resq    1
    188     .Guest.cr2                  resq    1
    189     .Guest.cr3                  resq    1
    190     .Guest.cr4                  resq    1
    191     .Guest.dr                   resq    8
    192     .Guest.gdtrPadding          resw    3
    193     .Guest.gdtr                 resw    0
    194     .Guest.gdtr.cbGdt           resw    1
    195     .Guest.gdtr.pGdt            resq    1
    196     .Guest.idtrPadding          resw    3
    197     .Guest.idtr                 resw    0
    198     .Guest.idtr.cbIdt           resw    1
    199     .Guest.idtr.pIdt            resq    1
    200184    .Guest.ldtr.Sel             resw    1
    201185    .Guest.ldtr.PaddingSel      resw    1
     
    212196    .Guest.tr.u32Limit          resd    1
    213197    .Guest.tr.Attr              resd    1
     198    .Guest.eip                  resq    1
     199    .Guest.eflags               resq    1
     200    .Guest.fInhibit             resb    1
     201    alignb 8
     202    .Guest.uRipInhibitInt       resq    1
     203    .Guest.cr0                  resq    1
     204    .Guest.cr2                  resq    1
     205    .Guest.cr3                  resq    1
     206    .Guest.cr4                  resq    1
     207    .Guest.dr                   resq    8
     208    .Guest.gdtrPadding          resw    3
     209    .Guest.gdtr                 resw    0
     210    .Guest.gdtr.cbGdt           resw    1
     211    .Guest.gdtr.pGdt            resq    1
     212    .Guest.idtrPadding          resw    3
     213    .Guest.idtr                 resw    0
     214    .Guest.idtr.cbIdt           resw    1
     215    .Guest.idtr.pIdt            resq    1
    214216    .Guest.SysEnter.cs          resb    8
    215217    .Guest.SysEnter.eip         resb    8
     
    222224    .Guest.msrSFMASK            resb    8
    223225    .Guest.msrKERNELGSBASE      resb    8
    224     .Guest.uMsrPadding0         resb    8
    225226
    226227    alignb 8
     
    290291    .Guest.hwvirt.enmHwvirt                 resd        1
    291292    .Guest.hwvirt.fGif                      resb        1
    292     alignb 8
    293     .Guest.hwvirt.fLocalForcedActions       resd        1
     293    alignb 4
     294    .Guest.hwvirt.fSavedInhibit             resd        1
    294295    alignb 64
    295296
  • trunk/src/VBox/VMM/include/EMInternal.h

    r96407 r97178  
    165165    uint32_t                cIemThenRemInstructions;
    166166
    167     /** Inhibit interrupts for this instruction. Valid only when VM_FF_INHIBIT_INTERRUPTS is set. */
    168     RTGCUINTPTR             GCPtrInhibitInterrupts;
    169 
    170167    /** Start of the current time slice in ms. */
    171168    uint64_t                u64TimeSliceStart;
     
    196193    } MWait;
    197194
     195#if 0
    198196    /** Make sure the jmp_buf is at a 32-byte boundrary. */
    199     uint64_t                au64Padding1[3];
     197    uint64_t                au64Padding1[4];
     198#endif
    200199    union
    201200    {
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r97043 r97178  
    161161    GEN_CHECK_OFF(CPUMCTX, hwvirt.enmHwvirt);
    162162    GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
    163     GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions);
     163    GEN_CHECK_OFF(CPUMCTX, hwvirt.fSavedInhibit);
    164164    /** @todo NSTVMX: add rest of hwvirt fields when code is more
    165165     *        finalized. */
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette