VirtualBox

Changeset 97178 in vbox for trunk/include/VBox


Ignore:
Timestamp:
Oct 17, 2022 9:06:03 PM (2 years ago)
Author:
vboxsync
Message:

VMM/CPUM,EM,HM,IEM,++: Moved VMCPU_FF_INHIBIT_INTERRUPTS and VMCPU_FF_BLOCK_NMIS to CPUMCTX::fInhibit. Moved ldtr and tr up to the CPUMCTXCORE area in hope for better cache alignment of rip, rflags and crX register fields. bugref:9941

Location:
trunk/include/VBox/vmm
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r97096 r97178  
    16861686              ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
    16871687
     1688/** @def CPUMCTX_ASSERT_NOT_EXTRN
     1689 * Macro for asserting that @a a_fNotExtrn are present in @a a_pCtx.
     1690 *
     1691 * @param   a_pCtx          The CPU context of the calling EMT.
     1692 * @param   a_fNotExtrn     Mask of CPUMCTX_EXTRN_XXX bits to check.
     1693 */
     1694#define CPUMCTX_ASSERT_NOT_EXTRN(a_pCtx, a_fNotExtrn) \
     1695    AssertMsg(!((a_pCtx)->fExtrn & (a_fNotExtrn)), \
     1696              ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pCtx)->fExtrn, (a_fNotExtrn)))
     1697
    16881698/** @def CPUM_IMPORT_EXTRN_RET
    16891699 * Macro for making sure the state specified by @a fExtrnImport is present,
     
    19071917    pCtx->hwvirt.fGif = fGif;
    19081918}
     1919
     1920/**
     1921 * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS.
     1922 * 
     1923 * This also inhibit NMIs, except perhaps for nested guests.
     1924 * 
     1925 * @returns true if interrupts are inhibited by interrupt shadow, false if not.
     1926 * @param   pCtx    Current guest CPU context.
     1927 * @note    Requires pCtx->rip to be up to date.
     1928 * @note    Does not clear fInhibit when CPUMCTX::uRipInhibitInt differs
     1929 *          from CPUMCTX::rip.
     1930 */
     1931DECLINLINE(bool) CPUMIsInInterruptShadow(PCCPUMCTX pCtx)
     1932{
     1933    if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW))
     1934        return false;
     1935
     1936    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     1937    return pCtx->uRipInhibitInt == pCtx->rip;
     1938}
     1939
     1940/**
     1941 * Checks if we're in an "interrupt shadow", i.e. after a STI, POPF or MOV SS,
     1942 * updating the state if stale.
     1943 * 
     1944 * This also inhibit NMIs, except perhaps for nested guests.
     1945 * 
     1946 * @returns true if interrupts are inhibited by interrupt shadow, false if not.
     1947 * @param   pCtx    Current guest CPU context.
     1948 * @note    Requires pCtx->rip to be up to date.
     1949 */
     1950DECLINLINE(bool) CPUMIsInInterruptShadowWithUpdate(PCPUMCTX pCtx)
     1951{
     1952    if (!(pCtx->fInhibit & CPUMCTX_INHIBIT_SHADOW))
     1953        return false;
     1954
     1955    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     1956    if (pCtx->uRipInhibitInt == pCtx->rip)
     1957        return true;
     1958
     1959    pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     1960    return false;
     1961}
     1962
     1963/**
     1964 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction.
     1965 *
     1966 * @param   pCtx    Current guest CPU context.
     1967 * @note    Requires pCtx->rip to be up to date.
     1968 */
     1969DECLINLINE(void) CPUMSetInInterruptShadow(PCPUMCTX pCtx)
     1970{
     1971    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     1972    pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     1973    pCtx->uRipInhibitInt = pCtx->rip;
     1974}
     1975
     1976/**
     1977 * Sets the "interrupt shadow" flag, after a STI, POPF or MOV SS instruction,
     1978 * extended version.
     1979 *
     1980 * @param   pCtx    Current guest CPU context.
     1981 * @param   rip     The RIP for which it is inhibited.
     1982 */
     1983DECLINLINE(void) CPUMSetInInterruptShadowEx(PCPUMCTX pCtx, uint64_t rip)
     1984{
     1985    pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     1986    pCtx->uRipInhibitInt = rip;
     1987}
     1988
     1989/**
     1990 * Clears the "interrupt shadow" flag.
     1991 *
     1992 * @param   pCtx    Current guest CPU context.
     1993 */
     1994DECLINLINE(void) CPUMClearInterruptShadow(PCPUMCTX pCtx)
     1995{
     1996    pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     1997}
     1998
     1999/**
     2000 * Update the "interrupt shadow" flag.
     2001 *
     2002 * @param   pCtx        Current guest CPU context.
     2003 * @param   fInhibited  The new state.
     2004 * @note    Requires pCtx->rip to be up to date.
     2005 */
     2006DECLINLINE(void) CPUMUpdateInterruptShadow(PCPUMCTX pCtx, bool fInhibited)
     2007{
     2008    CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
     2009    if (!fInhibited)
     2010        pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     2011    else
     2012    {
     2013        pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     2014        pCtx->uRipInhibitInt = pCtx->rip;
     2015    }
     2016}
     2017
     2018/**
     2019 * Update the "interrupt shadow" flag, extended version.
     2020 *
     2021 * @returns fInhibited.
     2022 * @param   pCtx        Current guest CPU context.
     2023 * @param   fInhibited  The new state.
     2024 * @param   rip         The RIP for which it is inhibited.
     2025 */
     2026DECLINLINE(bool) CPUMUpdateInterruptShadowEx(PCPUMCTX pCtx, bool fInhibited, uint64_t rip)
     2027{
     2028    if (!fInhibited)
     2029        pCtx->fInhibit &= ~CPUMCTX_INHIBIT_SHADOW;
     2030    else
     2031    {
     2032        pCtx->fInhibit |= CPUMCTX_INHIBIT_SHADOW;
     2033        pCtx->uRipInhibitInt = rip;
     2034    }
     2035    return fInhibited;
     2036}
     2037
     2038/* VMX forward declarations used by extended function versions: */
     2039DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx);
     2040DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls);
     2041DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx);
     2042DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking);
     2043
     2044/**
     2045 * Checks whether interrupts, include NMIs, are inhibited by pending NMI
     2046 * delivery.
     2047 *
     2048 * This only checks the inhibit mask.
     2049 *
     2050 * @retval  true if interrupts are inhibited by NMI handling.
     2051 * @retval  false if interrupts are not inhibited by NMI handling.
     2052 * @param   pCtx        Current guest CPU context.
     2053 */
     2054DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmi(PCCPUMCTX pCtx)
     2055{
     2056    return (pCtx->fInhibit & CPUMCTX_INHIBIT_NMI) != 0;
     2057}
     2058
     2059/**
     2060 * Extended version of CPUMAreInterruptsInhibitedByNmi() that takes VMX non-root
     2061 * mode into account when check whether interrupts are inhibited by NMI.
     2062 *
     2063 * @retval  true if interrupts are inhibited by NMI handling.
     2064 * @retval  false if interrupts are not inhibited by NMI handling.
     2065 * @param   pCtx        Current guest CPU context.
     2066 */
     2067DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmiEx(PCCPUMCTX pCtx)
     2068{
     2069    /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
     2070    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2071        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2072        return CPUMAreInterruptsInhibitedByNmi(pCtx);
     2073    return CPUMIsGuestVmxVirtNmiBlocking(pCtx);
     2074}
     2075
     2076/**
     2077 * Marks interrupts, include NMIs, as inhibited by pending NMI delivery.
     2078 *
     2079 * @param   pCtx        Current guest CPU context.
     2080 */
     2081DECLINLINE(void) CPUMSetInterruptInhibitingByNmi(PCPUMCTX pCtx)
     2082{
     2083    pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI;
     2084}
     2085
     2086/**
     2087 * Extended version of CPUMSetInterruptInhibitingByNmi() that takes VMX non-root
     2088 * mode into account when marking interrupts as inhibited by NMI.
     2089 *
     2090 * @param   pVCpu       The cross context virtual CPU structure.
     2091 */
     2092DECLINLINE(void) CPUMSetInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
     2093{
     2094    /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
     2095    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2096        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2097        CPUMSetInterruptInhibitingByNmi(pCtx);
     2098    else
     2099        CPUMSetGuestVmxVirtNmiBlocking(pCtx, true);
     2100}
     2101
     2102/**
     2103 * Marks interrupts, include NMIs, as no longer inhibited by pending NMI
     2104 * delivery.
     2105 *
     2106 * @param   pCtx        Current guest CPU context.
     2107 */
     2108DECLINLINE(void) CPUMClearInterruptInhibitingByNmi(PCPUMCTX pCtx)
     2109{
     2110    pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI;
     2111}
     2112
     2113/**
     2114 * Extended version of CPUMClearInterruptInhibitingByNmi() that takes VMX
     2115 * non-root mode into account when doing the updating.
     2116 *
     2117 * @param   pVCpu       The cross context virtual CPU structure.
     2118 * @param   fInhibited  The new state.
     2119 */
     2120DECLINLINE(void) CPUMClearInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
     2121{
     2122    /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
     2123    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2124        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2125        CPUMClearInterruptInhibitingByNmi(pCtx);
     2126    else
     2127        CPUMSetGuestVmxVirtNmiBlocking(pCtx, false);
     2128}
     2129
     2130/**
     2131 * Update whether interrupts, include NMIs, are inhibited by pending NMI
     2132 * delivery.
     2133 *
     2134 * @param   pCtx        Current guest CPU context.
     2135 * @param   fInhibited  The new state.
     2136 */
     2137DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmi(PCPUMCTX pCtx, bool fInhibited)
     2138{
     2139    if (!fInhibited)
     2140        pCtx->fInhibit &= ~CPUMCTX_INHIBIT_NMI;
     2141    else
     2142        pCtx->fInhibit |= CPUMCTX_INHIBIT_NMI;
     2143}
     2144
     2145/**
     2146 * Extended version of CPUMUpdateInterruptInhibitingByNmi() that takes VMX
     2147 * non-root mode into account when doing the updating.
     2148 *
     2149 * @param   pVCpu       The cross context virtual CPU structure.
     2150 * @param   fInhibited  The new state.
     2151 */
     2152DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmiEx(PCPUMCTX pCtx, bool fInhibited)
     2153{
     2154    /*
     2155     * Set the state of guest-NMI blocking in any of the following cases:
     2156     *   - We're not executing a nested-guest.
     2157     *   - We're executing an SVM nested-guest[1].
     2158     *   - We're executing a VMX nested-guest without virtual-NMIs enabled.
     2159     *
     2160     * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
     2161     *        SVM hypervisors must track NMI blocking themselves by intercepting
     2162     *        the IRET instruction after injection of an NMI.
     2163     */
     2164    if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
     2165        || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
     2166        CPUMUpdateInterruptInhibitingByNmi(pCtx, fInhibited);
     2167    /*
     2168     * Set the state of virtual-NMI blocking, if we are executing a
     2169     * VMX nested-guest with virtual-NMIs enabled.
     2170     */
     2171    else
     2172        CPUMSetGuestVmxVirtNmiBlocking(pCtx, fInhibited);
     2173}
     2174
    19092175
    19102176/**
     
    27673033    CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
    27683034    CPUMINTERRUPTIBILITY_INT_DISABLED,
    2769     CPUMINTERRUPTIBILITY_INT_INHIBITED,
     3035    CPUMINTERRUPTIBILITY_INT_INHIBITED, /**< @todo rename as it inhibits NMIs too. */
    27703036    CPUMINTERRUPTIBILITY_NMI_INHIBIT,
    27713037    CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
     
    27753041
    27763042VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
    2777 VMM_INT_DECL(bool)                 CPUMIsGuestNmiBlocking(PCVMCPU pVCpu);
    2778 VMM_INT_DECL(void)                 CPUMSetGuestNmiBlocking(PVMCPU pVCpu, bool fBlock);
    27793043
    27803044/** @name Typical scalable bus frequency values.
  • trunk/include/VBox/vmm/cpum.mac

    r97150 r97178  
    150150    .gs.u32Limit        resd    1
    151151    .gs.Attr            resd    1
     152
     153    .ldtr.Sel           resw    1
     154    .ldtr.PaddingSel    resw    1
     155    .ldtr.ValidSel      resw    1
     156    .ldtr.fFlags        resw    1
     157    .ldtr.u64Base       resq    1
     158    .ldtr.u32Limit      resd    1
     159    .ldtr.Attr          resd    1
     160    .tr.Sel             resw    1
     161    .tr.PaddingSel      resw    1
     162    .tr.ValidSel        resw    1
     163    .tr.fFlags          resw    1
     164    .tr.u64Base         resq    1
     165    .tr.u32Limit        resd    1
     166    .tr.Attr            resd    1
     167
    152168    .eip                resq    1
    153169    .eflags             resq    1
     
    217233    .gs.u32Limit        resd    1
    218234    .gs.Attr            resd    1
    219     .eip                resq    1
    220     .eflags             resq    1
    221     .cr0                resq    1
    222     .cr2                resq    1
    223     .cr3                resq    1
    224     .cr4                resq    1
    225     .dr                 resq    8
    226     .gdtrPadding        resw    3
    227     .gdtr               resw    0
    228     .gdtr.cbGdt         resw    1
    229     .gdtr.pGdt          resq    1
    230     .idtrPadding        resw    3
    231     .idtr               resw    0
    232     .idtr.cbIdt         resw    1
    233     .idtr.pIdt          resq    1
    234235    .ldtr.Sel           resw    1
    235236    .ldtr.PaddingSel    resw    1
     
    246247    .tr.u32Limit        resd    1
    247248    .tr.Attr            resd    1
     249    .eip                resq    1
     250    .eflags             resq    1
     251    .fInhibit           resb    1
     252    alignb 8
     253    .uRipInhibitInt     resq    1
     254    .cr0                resq    1
     255    .cr2                resq    1
     256    .cr3                resq    1
     257    .cr4                resq    1
     258    .dr                 resq    8
     259    .gdtrPadding        resw    3
     260    .gdtr               resw    0
     261    .gdtr.cbGdt         resw    1
     262    .gdtr.pGdt          resq    1
     263    .idtrPadding        resw    3
     264    .idtr               resw    0
     265    .idtr.cbIdt         resw    1
     266    .idtr.pIdt          resq    1
    248267    .SysEnter.cs        resb    8
    249268    .SysEnter.eip       resb    8
     
    256275    .msrSFMASK          resb    8
    257276    .msrKERNELGSBASE    resb    8
    258     .uMsrPadding0       resb    8
    259277
    260278    alignb 8
     
    323341    .hwvirt.enmHwvirt                   resd        1
    324342    .hwvirt.fGif                        resb        1
    325     alignb 8
    326     .hwvirt.fLocalForcedActions         resd        1
     343    alignb 4
     344    .hwvirt.fSavedInhibit               resd        1
    327345    alignb 64
    328346endstruc
  • trunk/include/VBox/vmm/cpumctx.h

    r97150 r97178  
    266266    CPUMSELREG          gs;
    267267    /** @} */
     268
     269    CPUMSELREG          ldtr;
     270    CPUMSELREG          tr;
    268271
    269272    /** The program counter. */
     
    399402    } CPUM_UNION_NM(s);
    400403
     404    /** The task register.
     405     * Only the guest context uses all the members. */
     406    CPUMSELREG          ldtr;
     407    /** The task register.
     408     * Only the guest context uses all the members. */
     409    CPUMSELREG          tr;
     410
    401411    /** The program counter. */
    402412    union
     
    416426    /** @} */ /*(CPUMCTXCORE)*/
    417427
     428    /** Interrupt & exception inhibiting (CPUMCTX_INHIBIT_XXX). */
     429    uint8_t             fInhibit;
     430    uint8_t             abPadding[7];
     431    /** The RIP value fInhibit is/was valid for. */
     432    uint64_t            uRipInhibitInt;
    418433
    419434    /** @name Control registers.
     
    422437    uint64_t            cr2;
    423438    uint64_t            cr3;
    424     /** @todo the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */
    425439    uint64_t            cr4;
     440    /** @todo Add the 4 PAE PDPE registers. See PGMCPU::aGstPaePdpeRegs. */
    426441    /** @} */
    427442
     
    445460    /** Interrupt Descriptor Table register. */
    446461    VBOXIDTR            idtr;
    447 
    448     /** The task register.
    449      * Only the guest context uses all the members. */
    450     CPUMSELREG          ldtr;
    451     /** The task register.
    452      * Only the guest context uses all the members. */
    453     CPUMSELREG          tr;
    454462
    455463    /** The sysenter msr registers.
     
    466474    uint64_t            msrSFMASK;          /**< syscall flag mask. */
    467475    uint64_t            msrKERNELGSBASE;    /**< swapgs exchange value. */
    468     uint64_t            uMsrPadding0;       /**< no longer used (used to hold a copy of APIC base MSR). */
    469476    /** @} */
    470477
    471     /** 0x228 - Externalized state tracker, CPUMCTX_EXTRN_XXX. */
     478    /** 0x230 - Externalized state tracker, CPUMCTX_EXTRN_XXX.
     479     * @todo Move up after uRipInhibitInt after fInhibit moves into RFLAGS.
     480     *       That will put this in the same cacheline as RIP, RFLAGS and CR0
     481     *       which are typically always imported and exported again during an
     482     *       VM exit. */
    472483    uint64_t            fExtrn;
    473484
    474     uint64_t            au64Unused[2];
     485    uint64_t            u64Unused;
    475486
    476487    /** 0x240 - PAE PDPTEs. */
     
    634645        /** 0x11134 - Global interrupt flag - AMD only (always true on Intel). */
    635646        bool                    fGif;
    636         bool                    afPadding1[3];
    637         /** 0x11138 - A subset of guest force flags that are saved while running the
    638          *  nested-guest. */
    639 #ifdef VMCPU_WITH_64_BIT_FFS
    640         uint64_t                fLocalForcedActions;
    641 #else
    642         uint32_t                fLocalForcedActions;
    643         uint32_t                fPadding;
    644 #endif
    645 #if 0
    646         /** 0x11140 - Pad to 64 byte boundary. */
    647         uint8_t                 abPadding0[8+16+32];
    648 #endif
     647        /** 0x11135 - Padding. */
     648        bool                    afPadding0[3];
     649        /** 0x11138 - A subset of guest inhibit flags (CPUMCTX_INHIBIT_XXX) that are
     650         *  saved while running the nested-guest. */
     651        uint32_t                fSavedInhibit;
     652        /** 0x1113c - Pad to 64 byte boundary. */
     653        uint8_t                 abPadding1[4];
    649654    } hwvirt;
    650655} CPUMCTX;
     
    656661AssertCompileSizeAlignment(CPUMCTX, 16);
    657662AssertCompileSizeAlignment(CPUMCTX, 8);
    658 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax,   0);
    659 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx,   8);
    660 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx,  16);
    661 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx,  24);
    662 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp,  32);
    663 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp,  40);
    664 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi,  48);
    665 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi,  56);
    666 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r8,  64);
    667 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r9,  72);
    668 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10,  80);
    669 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11,  88);
    670 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12,  96);
    671 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 104);
    672 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 112);
    673 AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 120);
    674 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 128);
    675 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 152);
    676 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 176);
    677 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 200);
    678 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 224);
    679 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 248);
    680 AssertCompileMemberOffset(CPUMCTX,                        rip, 272);
    681 AssertCompileMemberOffset(CPUMCTX,                     rflags, 280);
    682 AssertCompileMemberOffset(CPUMCTX,                        cr0, 288);
    683 AssertCompileMemberOffset(CPUMCTX,                        cr2, 296);
    684 AssertCompileMemberOffset(CPUMCTX,                        cr3, 304);
    685 AssertCompileMemberOffset(CPUMCTX,                        cr4, 312);
    686 AssertCompileMemberOffset(CPUMCTX,                         dr, 320);
    687 AssertCompileMemberOffset(CPUMCTX,                       gdtr, 384+6);
    688 AssertCompileMemberOffset(CPUMCTX,                       idtr, 400+6);
    689 AssertCompileMemberOffset(CPUMCTX,                       ldtr, 416);
    690 AssertCompileMemberOffset(CPUMCTX,                         tr, 440);
    691 AssertCompileMemberOffset(CPUMCTX,                   SysEnter, 464);
    692 AssertCompileMemberOffset(CPUMCTX,                    msrEFER, 488);
    693 AssertCompileMemberOffset(CPUMCTX,                    msrSTAR, 496);
    694 AssertCompileMemberOffset(CPUMCTX,                     msrPAT, 504);
    695 AssertCompileMemberOffset(CPUMCTX,                   msrLSTAR, 512);
    696 AssertCompileMemberOffset(CPUMCTX,                   msrCSTAR, 520);
    697 AssertCompileMemberOffset(CPUMCTX,                  msrSFMASK, 528);
    698 AssertCompileMemberOffset(CPUMCTX,            msrKERNELGSBASE, 536);
    699 AssertCompileMemberOffset(CPUMCTX,                  aPaePdpes, 0x240);
    700 AssertCompileMemberOffset(CPUMCTX,                       aXcr, 0x260);
    701 AssertCompileMemberOffset(CPUMCTX,                fXStateMask, 0x270);
    702 AssertCompileMemberOffset(CPUMCTX,                fUsedFpuGuest, 0x278);
    703 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(u.) XState, 0x300);
    704 AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(u.) abXState, 0x300);
    705 AssertCompileMemberAlignment(CPUMCTX, CPUM_UNION_NM(u.) XState, 0x100);
     663AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rax, 0x0000);
     664AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rcx, 0x0008);
     665AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdx, 0x0010);
     666AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbx, 0x0018);
     667AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsp, 0x0020);
     668AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rbp, 0x0028);
     669AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rsi, 0x0030);
     670AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) rdi, 0x0038);
     671AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r8, 0x0040);
     672AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.)  r9, 0x0048);
     673AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r10, 0x0050);
     674AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r11, 0x0058);
     675AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r12, 0x0060);
     676AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r13, 0x0068);
     677AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r14, 0x0070);
     678AssertCompileMemberOffset(CPUMCTX, CPUM_UNION_NM(g.) CPUM_STRUCT_NM(qw.) r15, 0x0078);
     679AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) es, 0x0080);
     680AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) cs, 0x0098);
     681AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ss, 0x00b0);
     682AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) ds, 0x00c8);
     683AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) fs, 0x00e0);
     684AssertCompileMemberOffset(CPUMCTX,   CPUM_UNION_NM(s.) CPUM_STRUCT_NM(n.) gs, 0x00f8);
     685AssertCompileMemberOffset(CPUMCTX,                                      ldtr, 0x0110);
     686AssertCompileMemberOffset(CPUMCTX,                                        tr, 0x0128);
     687AssertCompileMemberOffset(CPUMCTX,                                       rip, 0x0140);
     688AssertCompileMemberOffset(CPUMCTX,                                    rflags, 0x0148);
     689AssertCompileMemberOffset(CPUMCTX,                                  fInhibit, 0x0150);
     690AssertCompileMemberOffset(CPUMCTX,                            uRipInhibitInt, 0x0158);
     691AssertCompileMemberOffset(CPUMCTX,                                       cr0, 0x0160);
     692AssertCompileMemberOffset(CPUMCTX,                                       cr2, 0x0168);
     693AssertCompileMemberOffset(CPUMCTX,                                       cr3, 0x0170);
     694AssertCompileMemberOffset(CPUMCTX,                                       cr4, 0x0178);
     695AssertCompileMemberOffset(CPUMCTX,                                        dr, 0x0180);
     696AssertCompileMemberOffset(CPUMCTX,                                      gdtr, 0x01c0+6);
     697AssertCompileMemberOffset(CPUMCTX,                                      idtr, 0x01d0+6);
     698AssertCompileMemberOffset(CPUMCTX,                                  SysEnter, 0x01e0);
     699AssertCompileMemberOffset(CPUMCTX,                                   msrEFER, 0x01f8);
     700AssertCompileMemberOffset(CPUMCTX,                                   msrSTAR, 0x0200);
     701AssertCompileMemberOffset(CPUMCTX,                                    msrPAT, 0x0208);
     702AssertCompileMemberOffset(CPUMCTX,                                  msrLSTAR, 0x0210);
     703AssertCompileMemberOffset(CPUMCTX,                                  msrCSTAR, 0x0218);
     704AssertCompileMemberOffset(CPUMCTX,                                 msrSFMASK, 0x0220);
     705AssertCompileMemberOffset(CPUMCTX,                           msrKERNELGSBASE, 0x0228);
     706AssertCompileMemberOffset(CPUMCTX,                                 aPaePdpes, 0x0240);
     707AssertCompileMemberOffset(CPUMCTX,                                      aXcr, 0x0260);
     708AssertCompileMemberOffset(CPUMCTX,                               fXStateMask, 0x0270);
     709AssertCompileMemberOffset(CPUMCTX,                             fUsedFpuGuest, 0x0278);
     710AssertCompileMemberOffset(CPUMCTX,                  CPUM_UNION_NM(u.) XState, 0x0300);
     711AssertCompileMemberOffset(CPUMCTX,                CPUM_UNION_NM(u.) abXState, 0x0300);
     712AssertCompileMemberAlignment(CPUMCTX,               CPUM_UNION_NM(u.) XState, 0x0100);
    706713/* Only do spot checks for hwvirt */
    707 AssertCompileMemberAlignment(CPUMCTX,                   hwvirt, 0x1000);
     714AssertCompileMemberAlignment(CPUMCTX,                                 hwvirt, 0x1000);
    708715AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.Vmcb,                  X86_PAGE_SIZE);
    709716AssertCompileMemberAlignment(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.abMsrBitmap,           X86_PAGE_SIZE);
     
    725732AssertCompileMemberOffset(CPUMCTX,    hwvirt.enmHwvirt,                                   0x11130);
    726733AssertCompileMemberOffset(CPUMCTX,    hwvirt.fGif,                                        0x11134);
    727 AssertCompileMemberOffset(CPUMCTX,    hwvirt.fLocalForcedActions,                         0x11138);
     734AssertCompileMemberOffset(CPUMCTX,    hwvirt.fSavedInhibit,                               0x11138);
    728735AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
    729736AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.)  r0);
     
    10211028
    10221029
     1030/** @name CPUMCTX_INHIBIT_XXX - Interrupt inhibiting flags.
     1031 * @{ */
     1032/** Interrupt shadow following MOV SS or POP SS.
     1033 *
     1034 * When this in effect, both maskable and non-maskable interrupts are blocked
     1035 * from delivery for one instruction.  Same for certain debug exceptions too,
     1036 * unlike the STI variant.
     1037 *
     1038 * It is implementation specific whether a sequence of two or more of these
     1039 * instructions will have any effect on the instruction following the last one
     1040 * of them. */
     1041#define CPUMCTX_INHIBIT_SHADOW_SS   UINT8_C(0x01)
     1042/** Interrupt shadow following STI.
     1043 * Same as CPUMCTX_INHIBIT_SHADOW_SS but without blocking any debug exceptions. */
     1044#define CPUMCTX_INHIBIT_SHADOW_STI  UINT8_C(0x02)
     1045/** Mask combining STI and SS shadowing. */
     1046#define CPUMCTX_INHIBIT_SHADOW      (CPUMCTX_INHIBIT_SHADOW_SS | CPUMCTX_INHIBIT_SHADOW_STI)
     1047
     1048/** Interrupts blocked by NMI delivery.  This condition is cleared by IRET.
     1049 *
     1050 * Section "6.7 NONMASKABLE INTERRUPT (NMI)" in Intel SDM Vol 3A states that
     1051 * "The processor also invokes certain hardware conditions to ensure that no
     1052 * other interrupts, including NMI interrupts, are received until the NMI
     1053 * handler has completed executing."  This flag indicates that these
     1054 * conditions are currently active.  */
     1055#define CPUMCTX_INHIBIT_NMI         UINT8_C(0x04)
     1056/** @} */
     1057
     1058
    10231059/**
    10241060 * Additional guest MSRs (i.e. not part of the CPU context structure).
  • trunk/include/VBox/vmm/em.h

    r96999 r97178  
    138138/** @}  */
    139139
    140 VMMDECL(void)                   EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC);
    141 VMMDECL(RTGCUINTPTR)            EMGetInhibitInterruptsPC(PVMCPU pVCpu);
    142 VMMDECL(bool)                   EMIsInhibitInterruptsActive(PVMCPU pVCpu);
    143140VMMDECL(void)                   EMSetHypercallInstructionsEnabled(PVMCPU pVCpu, bool fEnabled);
    144141VMMDECL(bool)                   EMAreHypercallInstructionsEnabled(PVMCPU pVCpu);
  • trunk/include/VBox/vmm/vm.h

    r96811 r97178  
    490490/** The bit number for VMCPU_FF_DBGF. */
    491491#define VMCPU_FF_DBGF_BIT                   10
    492 /** This action forces the VM to service any pending updates to CR3 (used only
    493  *  by HM). */
    494492/** Hardware virtualized nested-guest interrupt pending. */
    495493#define VMCPU_FF_INTERRUPT_NESTED_GUEST     RT_BIT_64(VMCPU_FF_INTERRUPT_NESTED_GUEST_BIT)
     
    523521/* 22 used to be VMCPU_FF_SELM_SYNC_GDT (raw-mode only). */
    524522/* 23 used to be VMCPU_FF_SELM_SYNC_LDT (raw-mode only). */
    525 /** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
    526 #define VMCPU_FF_INHIBIT_INTERRUPTS         RT_BIT_64(VMCPU_FF_INHIBIT_INTERRUPTS_BIT)
    527 #define VMCPU_FF_INHIBIT_INTERRUPTS_BIT     24
    528 /** Block injection of non-maskable interrupts to the guest. */
    529 #define VMCPU_FF_BLOCK_NMIS                 RT_BIT_64(VMCPU_FF_BLOCK_NMIS_BIT)
    530 #define VMCPU_FF_BLOCK_NMIS_BIT             25
     523/* 24 used to be VMCPU_FF_INHIBIT_INTERRUPTS, which moved to CPUMCTX::fInhibit in v7.0.4. */
     524/* 25 used to be VMCPU_FF_BLOCK_NMIS, which moved to CPUMCTX::fInhibit in v7.0.4. */
    531525/** Force return to Ring-3. */
    532526#define VMCPU_FF_TO_R3                      RT_BIT_64(VMCPU_FF_TO_R3_BIT)
     
    576570/** High priority VMCPU pre-execution actions. */
    577571#define VMCPU_FF_HIGH_PRIORITY_PRE_MASK         (  VMCPU_FF_TIMER        | VMCPU_FF_INTERRUPT_APIC     | VMCPU_FF_INTERRUPT_PIC \
    578                                                  | VMCPU_FF_UPDATE_APIC  | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF \
     572                                                 | VMCPU_FF_UPDATE_APIC  | VMCPU_FF_DBGF \
    579573                                                 | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
    580574                                                 | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF  | VMCPU_FF_VMX_APIC_WRITE \
     
    584578#define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK        (  VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY )
    585579/** High priority VMCPU pre raw-mode execution mask. */
    586 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL \
    587                                                  | VMCPU_FF_INHIBIT_INTERRUPTS )
     580#define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL )
    588581
    589582/** High priority post-execution actions. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette