VirtualBox

Changeset 98970 in vbox for trunk


Ignore:
Timestamp:
Mar 15, 2023 8:56:57 AM (2 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
156319
Message:

VMM: More ARMv8 x86/amd64 separation work, bugref:10385

Location:
trunk
Files:
4 added
5 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum-x86-amd64.h

    r98959 r98970  
    3434 */
    3535
    36 #ifndef VBOX_INCLUDED_vmm_cpum_h
    37 #define VBOX_INCLUDED_vmm_cpum_h
     36#ifndef VBOX_INCLUDED_vmm_cpum_x86_amd64_h
     37#define VBOX_INCLUDED_vmm_cpum_x86_amd64_h
    3838#ifndef RT_WITHOUT_PRAGMA_ONCE
    3939# pragma once
     
    4141
    4242#include <iprt/x86.h>
    43 #include <VBox/types.h>
    44 #include <VBox/vmm/cpumctx.h>
    45 #include <VBox/vmm/stam.h>
    46 #include <VBox/vmm/vmapi.h>
    4743#include <VBox/vmm/hm_svm.h>
    4844#include <VBox/vmm/hm_vmx.h>
     
    15681564VMMDECL(RTSEL)          CPUMGetGuestGS(PCVMCPU pVCpu);
    15691565VMMDECL(RTSEL)          CPUMGetGuestSS(PCVMCPU pVCpu);
    1570 VMMDECL(uint64_t)       CPUMGetGuestFlatPC(PVMCPU pVCpu);
    1571 VMMDECL(uint64_t)       CPUMGetGuestFlatSP(PVMCPU pVCpu);
    15721566VMMDECL(uint64_t)       CPUMGetGuestDR0(PCVMCPU pVCpu);
    15731567VMMDECL(uint64_t)       CPUMGetGuestDR1(PCVMCPU pVCpu);
     
    15891583VMMDECL(CPUMMICROARCH)  CPUMGetGuestMicroarch(PCVM pVM);
    15901584VMMDECL(void)           CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth);
    1591 VMMDECL(CPUMCPUVENDOR)  CPUMGetHostCpuVendor(PVM pVM);
    1592 VMMDECL(CPUMMICROARCH)  CPUMGetHostMicroarch(PCVM pVM);
    15931585/** @} */
    15941586
     
    16451637/** @name Misc Guest Predicate Functions.
    16461638 * @{  */
    1647 VMMDECL(bool)       CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
    16481639VMMDECL(bool)       CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
    16491640VMMDECL(bool)       CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
     
    16871678VMM_INT_DECL(bool)      CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu);
    16881679VMM_INT_DECL(uint64_t)  CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu);
    1689 /** @} */
    1690 
    1691 /** @name Externalized State Helpers.
    1692  * @{ */
    1693 /** @def CPUM_ASSERT_NOT_EXTRN
    1694  * Macro for asserting that @a a_fNotExtrn are present.
    1695  *
    1696  * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
    1697  * @param   a_fNotExtrn     Mask of CPUMCTX_EXTRN_XXX bits to check.
    1698  *
    1699  * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
    1700  */
    1701 #define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
    1702     AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
    1703               ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
    1704 
    1705 /** @def CPUMCTX_ASSERT_NOT_EXTRN
    1706  * Macro for asserting that @a a_fNotExtrn are present in @a a_pCtx.
    1707  *
    1708  * @param   a_pCtx          The CPU context of the calling EMT.
    1709  * @param   a_fNotExtrn     Mask of CPUMCTX_EXTRN_XXX bits to check.
    1710  */
    1711 #define CPUMCTX_ASSERT_NOT_EXTRN(a_pCtx, a_fNotExtrn) \
    1712     AssertMsg(!((a_pCtx)->fExtrn & (a_fNotExtrn)), \
    1713               ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pCtx)->fExtrn, (a_fNotExtrn)))
    1714 
    1715 /** @def CPUM_IMPORT_EXTRN_RET
    1716  * Macro for making sure the state specified by @a fExtrnImport is present,
    1717  * calling CPUMImportGuestStateOnDemand() to get it if necessary.
    1718  *
    1719  * Will return if CPUMImportGuestStateOnDemand() fails.
    1720  *
    1721  * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
    1722  * @param   a_fExtrnImport  Mask of CPUMCTX_EXTRN_XXX bits to get.
    1723  * @thread  EMT(a_pVCpu)
    1724  *
    1725  * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
    1726  */
    1727 #define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
    1728     do { \
    1729         if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
    1730         { /* already present, consider this likely */ } \
    1731         else \
    1732         { \
    1733             int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    1734             AssertRCReturn(rcCpumImport, rcCpumImport); \
    1735         } \
    1736     } while (0)
    1737 
    1738 /** @def CPUM_IMPORT_EXTRN_RCSTRICT
    1739  * Macro for making sure the state specified by @a fExtrnImport is present,
    1740  * calling CPUMImportGuestStateOnDemand() to get it if necessary.
    1741  *
    1742  * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
    1743  *
    1744  * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
    1745  * @param   a_fExtrnImport  Mask of CPUMCTX_EXTRN_XXX bits to get.
    1746  * @param   a_rcStrict      Strict status code variable to update on failure.
    1747  * @thread  EMT(a_pVCpu)
    1748  *
    1749  * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
    1750  */
    1751 #define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
    1752     do { \
    1753         if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
    1754         { /* already present, consider this likely */ } \
    1755         else \
    1756         { \
    1757             int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    1758             AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
    1759         } \
    1760     } while (0)
    1761 
    1762 VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport);
    17631680/** @} */
    17641681
     
    31713088 */
    31723089
    3173 VMMR3DECL(int)          CPUMR3Init(PVM pVM);
    3174 VMMR3DECL(int)          CPUMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
    3175 VMMR3DECL(void)         CPUMR3LogCpuIdAndMsrFeatures(PVM pVM);
    3176 VMMR3DECL(void)         CPUMR3Relocate(PVM pVM);
    3177 VMMR3DECL(int)          CPUMR3Term(PVM pVM);
    3178 VMMR3DECL(void)         CPUMR3Reset(PVM pVM);
    3179 VMMR3DECL(void)         CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
    3180 VMMDECL(bool)           CPUMR3IsStateRestorePending(PVM pVM);
    31813090VMMR3DECL(int)          CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
    31823091
     
    31893098VMMR3DECL(int)              CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
    31903099VMMR3DECL(const char *)     CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
    3191 VMMR3DECL(const char *)     CPUMCpuVendorName(CPUMCPUVENDOR enmVendor);
    31923100#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    31933101VMMR3DECL(uint32_t)         CPUMR3DeterminHostMxCsrMask(void);
     
    32493157
    32503158
    3251 #endif /* !VBOX_INCLUDED_vmm_cpum_h */
    3252 
     3159#endif /* !VBOX_INCLUDED_vmm_cpum_x86_amd64_h */
     3160
  • trunk/include/VBox/vmm/cpum.h

    r98827 r98970  
    4040#endif
    4141
    42 #include <iprt/x86.h>
    4342#include <VBox/types.h>
    4443#include <VBox/vmm/cpumctx.h>
    4544#include <VBox/vmm/stam.h>
    4645#include <VBox/vmm/vmapi.h>
    47 #include <VBox/vmm/hm_svm.h>
    48 #include <VBox/vmm/hm_vmx.h>
     46#include <VBox/vmm/cpum-common.h>
     47
     48#ifndef VBOX_VMM_TARGET_ARMV8
     49# include <VBox/vmm/cpum-x86-amd64.h>
     50#else
     51# include <VBox/vmm/cpum-armv8.h>
     52#endif
    4953
    5054RT_C_DECLS_BEGIN
     
    5559 */
    5660
    57 /**
    58  * CPUID feature to set or clear.
    59  */
    60 typedef enum CPUMCPUIDFEATURE
    61 {
    62     CPUMCPUIDFEATURE_INVALID = 0,
    63     /** The APIC feature bit. (Std+Ext)
    64      * Note! There is a per-cpu flag for masking this CPUID feature bit when the
    65      *       APICBASE.ENABLED bit is zero.  So, this feature is only set/cleared
    66      *       at VM construction time like all the others.  This didn't used to be
    67      *       that way, this is new with 5.1. */
    68     CPUMCPUIDFEATURE_APIC,
    69     /** The sysenter/sysexit feature bit. (Std) */
    70     CPUMCPUIDFEATURE_SEP,
    71     /** The SYSCALL/SYSEXIT feature bit (64 bits mode only for Intel CPUs). (Ext) */
    72     CPUMCPUIDFEATURE_SYSCALL,
    73     /** The PAE feature bit. (Std+Ext) */
    74     CPUMCPUIDFEATURE_PAE,
    75     /** The NX feature bit. (Ext) */
    76     CPUMCPUIDFEATURE_NX,
    77     /** The LAHF/SAHF feature bit (64 bits mode only). (Ext) */
    78     CPUMCPUIDFEATURE_LAHF,
    79     /** The LONG MODE feature bit. (Ext) */
    80     CPUMCPUIDFEATURE_LONG_MODE,
    81     /** The x2APIC  feature bit. (Std) */
    82     CPUMCPUIDFEATURE_X2APIC,
    83     /** The RDTSCP feature bit. (Ext) */
    84     CPUMCPUIDFEATURE_RDTSCP,
    85     /** The Hypervisor Present bit. (Std) */
    86     CPUMCPUIDFEATURE_HVP,
    87     /** The speculation control feature bits. (StExt) */
    88     CPUMCPUIDFEATURE_SPEC_CTRL,
    89     /** 32bit hackishness. */
    90     CPUMCPUIDFEATURE_32BIT_HACK = 0x7fffffff
    91 } CPUMCPUIDFEATURE;
    92 
    93 /**
    94  * CPU Vendor.
    95  */
    96 typedef enum CPUMCPUVENDOR
    97 {
    98     CPUMCPUVENDOR_INVALID = 0,
    99     CPUMCPUVENDOR_INTEL,
    100     CPUMCPUVENDOR_AMD,
    101     CPUMCPUVENDOR_VIA,
    102     CPUMCPUVENDOR_CYRIX,
    103     CPUMCPUVENDOR_SHANGHAI,
    104     CPUMCPUVENDOR_HYGON,
    105     CPUMCPUVENDOR_UNKNOWN,
    106     /** 32bit hackishness. */
    107     CPUMCPUVENDOR_32BIT_HACK = 0x7fffffff
    108 } CPUMCPUVENDOR;
    109 
    110 
    111 /**
    112  * X86 and AMD64 CPU microarchitectures and in processor generations.
    113  *
    114  * @remarks The separation here is sometimes a little bit too finely grained,
    115  *          and the differences is more like processor generation than micro
    116  *          arch.  This can be useful, so we'll provide functions for getting at
    117  *          more coarse grained info.
    118  */
    119 typedef enum CPUMMICROARCH
    120 {
    121     kCpumMicroarch_Invalid = 0,
    122 
    123     kCpumMicroarch_Intel_First,
    124 
    125     kCpumMicroarch_Intel_8086 = kCpumMicroarch_Intel_First,
    126     kCpumMicroarch_Intel_80186,
    127     kCpumMicroarch_Intel_80286,
    128     kCpumMicroarch_Intel_80386,
    129     kCpumMicroarch_Intel_80486,
    130     kCpumMicroarch_Intel_P5,
    131 
    132     kCpumMicroarch_Intel_P6_Core_Atom_First,
    133     kCpumMicroarch_Intel_P6 = kCpumMicroarch_Intel_P6_Core_Atom_First,
    134     kCpumMicroarch_Intel_P6_II,
    135     kCpumMicroarch_Intel_P6_III,
    136 
    137     kCpumMicroarch_Intel_P6_M_Banias,
    138     kCpumMicroarch_Intel_P6_M_Dothan,
    139     kCpumMicroarch_Intel_Core_Yonah,        /**< Core, also known as Enhanced Pentium M. */
    140 
    141     kCpumMicroarch_Intel_Core2_First,
    142     kCpumMicroarch_Intel_Core2_Merom = kCpumMicroarch_Intel_Core2_First,    /**< 65nm, Merom/Conroe/Kentsfield/Tigerton */
    143     kCpumMicroarch_Intel_Core2_Penryn,      /**< 45nm, Penryn/Wolfdale/Yorkfield/Harpertown */
    144     kCpumMicroarch_Intel_Core2_End,
    145 
    146     kCpumMicroarch_Intel_Core7_First,
    147     kCpumMicroarch_Intel_Core7_Nehalem = kCpumMicroarch_Intel_Core7_First,
    148     kCpumMicroarch_Intel_Core7_Westmere,
    149     kCpumMicroarch_Intel_Core7_SandyBridge,
    150     kCpumMicroarch_Intel_Core7_IvyBridge,
    151     kCpumMicroarch_Intel_Core7_Haswell,
    152     kCpumMicroarch_Intel_Core7_Broadwell,
    153     kCpumMicroarch_Intel_Core7_Skylake,
    154     kCpumMicroarch_Intel_Core7_KabyLake,
    155     kCpumMicroarch_Intel_Core7_CoffeeLake,
    156     kCpumMicroarch_Intel_Core7_WhiskeyLake,
    157     kCpumMicroarch_Intel_Core7_CascadeLake,
    158     kCpumMicroarch_Intel_Core7_CannonLake,  /**< Limited 10nm. */
    159     kCpumMicroarch_Intel_Core7_CometLake,   /**< 10th gen, 14nm desktop + high power mobile.  */
    160     kCpumMicroarch_Intel_Core7_IceLake,     /**< 10th gen, 10nm mobile and some Xeons.  Actually 'Sunny Cove' march. */
    161     kCpumMicroarch_Intel_Core7_SunnyCove = kCpumMicroarch_Intel_Core7_IceLake,
    162     kCpumMicroarch_Intel_Core7_RocketLake,  /**< 11th gen, 14nm desktop + high power mobile.  Aka 'Cypress Cove', backport of 'Willow Cove' to 14nm. */
    163     kCpumMicroarch_Intel_Core7_CypressCove = kCpumMicroarch_Intel_Core7_RocketLake,
    164     kCpumMicroarch_Intel_Core7_TigerLake,   /**< 11th gen, 10nm mobile.  Actually 'Willow Cove' march. */
    165     kCpumMicroarch_Intel_Core7_WillowCove = kCpumMicroarch_Intel_Core7_TigerLake,
    166     kCpumMicroarch_Intel_Core7_AlderLake,   /**< 12th gen, 10nm all platforms(?). */
    167     kCpumMicroarch_Intel_Core7_SapphireRapids, /**< 12th? gen, 10nm server? */
    168     kCpumMicroarch_Intel_Core7_End,
    169 
    170     kCpumMicroarch_Intel_Atom_First,
    171     kCpumMicroarch_Intel_Atom_Bonnell = kCpumMicroarch_Intel_Atom_First,
    172     kCpumMicroarch_Intel_Atom_Lincroft,     /**< Second generation bonnell (44nm). */
    173     kCpumMicroarch_Intel_Atom_Saltwell,     /**< 32nm shrink of Bonnell. */
    174     kCpumMicroarch_Intel_Atom_Silvermont,   /**< 22nm */
    175     kCpumMicroarch_Intel_Atom_Airmount,     /**< 14nm */
    176     kCpumMicroarch_Intel_Atom_Goldmont,     /**< 14nm */
    177     kCpumMicroarch_Intel_Atom_GoldmontPlus, /**< 14nm */
    178     kCpumMicroarch_Intel_Atom_Unknown,
    179     kCpumMicroarch_Intel_Atom_End,
    180 
    181 
    182     kCpumMicroarch_Intel_Phi_First,
    183     kCpumMicroarch_Intel_Phi_KnightsFerry = kCpumMicroarch_Intel_Phi_First,
    184     kCpumMicroarch_Intel_Phi_KnightsCorner,
    185     kCpumMicroarch_Intel_Phi_KnightsLanding,
    186     kCpumMicroarch_Intel_Phi_KnightsHill,
    187     kCpumMicroarch_Intel_Phi_KnightsMill,
    188     kCpumMicroarch_Intel_Phi_End,
    189 
    190     kCpumMicroarch_Intel_P6_Core_Atom_End,
    191 
    192     kCpumMicroarch_Intel_NB_First,
    193     kCpumMicroarch_Intel_NB_Willamette = kCpumMicroarch_Intel_NB_First, /**< 180nm */
    194     kCpumMicroarch_Intel_NB_Northwood,      /**< 130nm */
    195     kCpumMicroarch_Intel_NB_Prescott,       /**< 90nm */
    196     kCpumMicroarch_Intel_NB_Prescott2M,     /**< 90nm */
    197     kCpumMicroarch_Intel_NB_CedarMill,      /**< 65nm */
    198     kCpumMicroarch_Intel_NB_Gallatin,       /**< 90nm Xeon, Pentium 4 Extreme Edition ("Emergency Edition"). */
    199     kCpumMicroarch_Intel_NB_Unknown,
    200     kCpumMicroarch_Intel_NB_End,
    201 
    202     kCpumMicroarch_Intel_Unknown,
    203     kCpumMicroarch_Intel_End,
    204 
    205     kCpumMicroarch_AMD_First,
    206     kCpumMicroarch_AMD_Am286 = kCpumMicroarch_AMD_First,
    207     kCpumMicroarch_AMD_Am386,
    208     kCpumMicroarch_AMD_Am486,
    209     kCpumMicroarch_AMD_Am486Enh,            /**< Covers Am5x86 as well. */
    210     kCpumMicroarch_AMD_K5,
    211     kCpumMicroarch_AMD_K6,
    212 
    213     kCpumMicroarch_AMD_K7_First,
    214     kCpumMicroarch_AMD_K7_Palomino = kCpumMicroarch_AMD_K7_First,
    215     kCpumMicroarch_AMD_K7_Spitfire,
    216     kCpumMicroarch_AMD_K7_Thunderbird,
    217     kCpumMicroarch_AMD_K7_Morgan,
    218     kCpumMicroarch_AMD_K7_Thoroughbred,
    219     kCpumMicroarch_AMD_K7_Barton,
    220     kCpumMicroarch_AMD_K7_Unknown,
    221     kCpumMicroarch_AMD_K7_End,
    222 
    223     kCpumMicroarch_AMD_K8_First,
    224     kCpumMicroarch_AMD_K8_130nm = kCpumMicroarch_AMD_K8_First, /**< 130nm Clawhammer, Sledgehammer, Newcastle, Paris, Odessa, Dublin */
    225     kCpumMicroarch_AMD_K8_90nm,             /**< 90nm shrink */
    226     kCpumMicroarch_AMD_K8_90nm_DualCore,    /**< 90nm with two cores. */
    227     kCpumMicroarch_AMD_K8_90nm_AMDV,        /**< 90nm with AMD-V (usually) and two cores (usually). */
    228     kCpumMicroarch_AMD_K8_65nm,             /**< 65nm shrink. */
    229     kCpumMicroarch_AMD_K8_End,
    230 
    231     kCpumMicroarch_AMD_K10,
    232     kCpumMicroarch_AMD_K10_Lion,
    233     kCpumMicroarch_AMD_K10_Llano,
    234     kCpumMicroarch_AMD_Bobcat,
    235     kCpumMicroarch_AMD_Jaguar,
    236 
    237     kCpumMicroarch_AMD_15h_First,
    238     kCpumMicroarch_AMD_15h_Bulldozer = kCpumMicroarch_AMD_15h_First,
    239     kCpumMicroarch_AMD_15h_Piledriver,
    240     kCpumMicroarch_AMD_15h_Steamroller,     /**< Yet to be released, might have different family.  */
    241     kCpumMicroarch_AMD_15h_Excavator,       /**< Yet to be released, might have different family.  */
    242     kCpumMicroarch_AMD_15h_Unknown,
    243     kCpumMicroarch_AMD_15h_End,
    244 
    245     kCpumMicroarch_AMD_16h_First,
    246     kCpumMicroarch_AMD_16h_End,
    247 
    248     kCpumMicroarch_AMD_Zen_First,
    249     kCpumMicroarch_AMD_Zen_Ryzen = kCpumMicroarch_AMD_Zen_First,
    250     kCpumMicroarch_AMD_Zen_End,
    251 
    252     kCpumMicroarch_AMD_Unknown,
    253     kCpumMicroarch_AMD_End,
    254 
    255     kCpumMicroarch_Hygon_First,
    256     kCpumMicroarch_Hygon_Dhyana = kCpumMicroarch_Hygon_First,
    257     kCpumMicroarch_Hygon_Unknown,
    258     kCpumMicroarch_Hygon_End,
    259 
    260     kCpumMicroarch_VIA_First,
    261     kCpumMicroarch_Centaur_C6 = kCpumMicroarch_VIA_First,
    262     kCpumMicroarch_Centaur_C2,
    263     kCpumMicroarch_Centaur_C3,
    264     kCpumMicroarch_VIA_C3_M2,
    265     kCpumMicroarch_VIA_C3_C5A,          /**< 180nm Samuel - Cyrix III, C3, 1GigaPro. */
    266     kCpumMicroarch_VIA_C3_C5B,          /**< 150nm Samuel 2 - Cyrix III, C3, 1GigaPro, Eden ESP, XP 2000+. */
    267     kCpumMicroarch_VIA_C3_C5C,          /**< 130nm Ezra - C3, Eden ESP. */
    268     kCpumMicroarch_VIA_C3_C5N,          /**< 130nm Ezra-T - C3. */
    269     kCpumMicroarch_VIA_C3_C5XL,         /**< 130nm Nehemiah - C3, Eden ESP, Eden-N. */
    270     kCpumMicroarch_VIA_C3_C5P,          /**< 130nm Nehemiah+ - C3. */
    271     kCpumMicroarch_VIA_C7_C5J,          /**< 90nm Esther - C7, C7-D, C7-M, Eden, Eden ULV. */
    272     kCpumMicroarch_VIA_Isaiah,
    273     kCpumMicroarch_VIA_Unknown,
    274     kCpumMicroarch_VIA_End,
    275 
    276     kCpumMicroarch_Shanghai_First,
    277     kCpumMicroarch_Shanghai_Wudaokou = kCpumMicroarch_Shanghai_First,
    278     kCpumMicroarch_Shanghai_Unknown,
    279     kCpumMicroarch_Shanghai_End,
    280 
    281     kCpumMicroarch_Cyrix_First,
    282     kCpumMicroarch_Cyrix_5x86 = kCpumMicroarch_Cyrix_First,
    283     kCpumMicroarch_Cyrix_M1,
    284     kCpumMicroarch_Cyrix_MediaGX,
    285     kCpumMicroarch_Cyrix_MediaGXm,
    286     kCpumMicroarch_Cyrix_M2,
    287     kCpumMicroarch_Cyrix_Unknown,
    288     kCpumMicroarch_Cyrix_End,
    289 
    290     kCpumMicroarch_NEC_First,
    291     kCpumMicroarch_NEC_V20 = kCpumMicroarch_NEC_First,
    292     kCpumMicroarch_NEC_V30,
    293     kCpumMicroarch_NEC_End,
    294 
    295     kCpumMicroarch_Unknown,
    296 
    297     kCpumMicroarch_32BitHack = 0x7fffffff
    298 } CPUMMICROARCH;
    299 
    300 
    301 /** Predicate macro for catching netburst CPUs. */
    302 #define CPUMMICROARCH_IS_INTEL_NETBURST(a_enmMicroarch) \
    303     ((a_enmMicroarch) >= kCpumMicroarch_Intel_NB_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_NB_End)
    304 
    305 /** Predicate macro for catching Core7 CPUs. */
    306 #define CPUMMICROARCH_IS_INTEL_CORE7(a_enmMicroarch) \
    307     ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core7_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core7_End)
    308 
    309 /** Predicate macro for catching Core 2 CPUs. */
    310 #define CPUMMICROARCH_IS_INTEL_CORE2(a_enmMicroarch) \
    311     ((a_enmMicroarch) >= kCpumMicroarch_Intel_Core2_First && (a_enmMicroarch) <= kCpumMicroarch_Intel_Core2_End)
    312 
    313 /** Predicate macro for catching Atom CPUs, Silvermont and upwards. */
    314 #define CPUMMICROARCH_IS_INTEL_SILVERMONT_PLUS(a_enmMicroarch) \
    315     ((a_enmMicroarch) >= kCpumMicroarch_Intel_Atom_Silvermont && (a_enmMicroarch) <= kCpumMicroarch_Intel_Atom_End)
    316 
    317 /** Predicate macro for catching AMD Family OFh CPUs (aka K8).    */
    318 #define CPUMMICROARCH_IS_AMD_FAM_0FH(a_enmMicroarch) \
    319     ((a_enmMicroarch) >= kCpumMicroarch_AMD_K8_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_K8_End)
    320 
    321 /** Predicate macro for catching AMD Family 10H CPUs (aka K10).    */
    322 #define CPUMMICROARCH_IS_AMD_FAM_10H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10)
    323 
    324 /** Predicate macro for catching AMD Family 11H CPUs (aka Lion).    */
    325 #define CPUMMICROARCH_IS_AMD_FAM_11H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Lion)
    326 
    327 /** Predicate macro for catching AMD Family 12H CPUs (aka Llano).    */
    328 #define CPUMMICROARCH_IS_AMD_FAM_12H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_K10_Llano)
    329 
    330 /** Predicate macro for catching AMD Family 14H CPUs (aka Bobcat).    */
    331 #define CPUMMICROARCH_IS_AMD_FAM_14H(a_enmMicroarch) ((a_enmMicroarch) == kCpumMicroarch_AMD_Bobcat)
    332 
    333 /** Predicate macro for catching AMD Family 15H CPUs (bulldozer and it's
    334  * decendants). */
    335 #define CPUMMICROARCH_IS_AMD_FAM_15H(a_enmMicroarch) \
    336     ((a_enmMicroarch) >= kCpumMicroarch_AMD_15h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_15h_End)
    337 
    338 /** Predicate macro for catching AMD Family 16H CPUs. */
    339 #define CPUMMICROARCH_IS_AMD_FAM_16H(a_enmMicroarch) \
    340     ((a_enmMicroarch) >= kCpumMicroarch_AMD_16h_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_16h_End)
    341 
    342 /** Predicate macro for catching AMD Zen Family CPUs. */
    343 #define CPUMMICROARCH_IS_AMD_FAM_ZEN(a_enmMicroarch) \
    344     ((a_enmMicroarch) >= kCpumMicroarch_AMD_Zen_First && (a_enmMicroarch) <= kCpumMicroarch_AMD_Zen_End)
    345 
    346 
    347 /**
    348  * CPUID leaf.
    349  *
    350  * @remarks This structure is used by the patch manager and is therefore
    351  *          more or less set in stone.
    352  */
    353 typedef struct CPUMCPUIDLEAF
    354 {
    355     /** The leaf number. */
    356     uint32_t    uLeaf;
    357     /** The sub-leaf number. */
    358     uint32_t    uSubLeaf;
    359     /** Sub-leaf mask.  This is 0 when sub-leaves aren't used. */
    360     uint32_t    fSubLeafMask;
    361 
    362     /** The EAX value. */
    363     uint32_t    uEax;
    364     /** The EBX value. */
    365     uint32_t    uEbx;
    366     /** The ECX value. */
    367     uint32_t    uEcx;
    368     /** The EDX value. */
    369     uint32_t    uEdx;
    370 
    371     /** Flags. */
    372     uint32_t    fFlags;
    373 } CPUMCPUIDLEAF;
    37461#ifndef VBOX_FOR_DTRACE_LIB
    375 AssertCompileSize(CPUMCPUIDLEAF, 32);
    376 #endif
    377 /** Pointer to a CPUID leaf. */
    378 typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
    379 /** Pointer to a const CPUID leaf. */
    380 typedef CPUMCPUIDLEAF const *PCCPUMCPUIDLEAF;
    381 
    382 /** @name CPUMCPUIDLEAF::fFlags
    383  * @{ */
    384 /** Indicates working intel leaf 0xb where the lower 8 ECX bits are not modified
    385  * and EDX containing the extended APIC ID. */
    386 #define CPUMCPUIDLEAF_F_INTEL_TOPOLOGY_SUBLEAVES    RT_BIT_32(0)
    387 /** The leaf contains an APIC ID that needs changing to that of the current CPU. */
    388 #define CPUMCPUIDLEAF_F_CONTAINS_APIC_ID            RT_BIT_32(1)
    389 /** The leaf contains an OSXSAVE which needs individual handling on each CPU. */
    390 #define CPUMCPUIDLEAF_F_CONTAINS_OSXSAVE            RT_BIT_32(2)
    391 /** The leaf contains an APIC feature bit which is tied to APICBASE.EN. */
    392 #define CPUMCPUIDLEAF_F_CONTAINS_APIC               RT_BIT_32(3)
    393 /** Mask of the valid flags. */
    394 #define CPUMCPUIDLEAF_F_VALID_MASK                  UINT32_C(0xf)
    395 /** @} */
    396 
    397 /**
    398  * Method used to deal with unknown CPUID leaves.
    399  * @remarks Used in patch code.
    400  */
    401 typedef enum CPUMUNKNOWNCPUID
    402 {
    403     /** Invalid zero value. */
    404     CPUMUNKNOWNCPUID_INVALID = 0,
    405     /** Use given default values (DefCpuId). */
    406     CPUMUNKNOWNCPUID_DEFAULTS,
    407     /** Return the last standard leaf.
    408      * Intel Sandy Bridge has been observed doing this. */
    409     CPUMUNKNOWNCPUID_LAST_STD_LEAF,
    410     /** Return the last standard leaf, with ecx observed.
    411      * Intel Sandy Bridge has been observed doing this. */
    412     CPUMUNKNOWNCPUID_LAST_STD_LEAF_WITH_ECX,
    413     /** The register values are passed thru unmodified. */
    414     CPUMUNKNOWNCPUID_PASSTHRU,
    415     /** End of valid value. */
    416     CPUMUNKNOWNCPUID_END,
    417     /** Ensure 32-bit type. */
    418     CPUMUNKNOWNCPUID_32BIT_HACK = 0x7fffffff
    419 } CPUMUNKNOWNCPUID;
    420 /** Pointer to unknown CPUID leaf method. */
    421 typedef CPUMUNKNOWNCPUID *PCPUMUNKNOWNCPUID;
    422 
    423 
    424 /**
    425  * The register set returned by a CPUID operation.
    426  */
    427 typedef struct CPUMCPUID
    428 {
    429     uint32_t uEax;
    430     uint32_t uEbx;
    431     uint32_t uEcx;
    432     uint32_t uEdx;
    433 } CPUMCPUID;
    434 /** Pointer to a CPUID leaf. */
    435 typedef CPUMCPUID *PCPUMCPUID;
    436 /** Pointer to a const CPUID leaf. */
    437 typedef const CPUMCPUID *PCCPUMCPUID;
    438 
    439 
    440 /**
    441  * MSR read functions.
    442  */
    443 typedef enum CPUMMSRRDFN
    444 {
    445     /** Invalid zero value. */
    446     kCpumMsrRdFn_Invalid = 0,
    447     /** Return the CPUMMSRRANGE::uValue. */
    448     kCpumMsrRdFn_FixedValue,
    449     /** Alias to the MSR range starting at the MSR given by
    450      * CPUMMSRRANGE::uValue.  Must be used in pair with
    451      * kCpumMsrWrFn_MsrAlias. */
    452     kCpumMsrRdFn_MsrAlias,
    453     /** Write only register, GP all read attempts. */
    454     kCpumMsrRdFn_WriteOnly,
    455 
    456     kCpumMsrRdFn_Ia32P5McAddr,
    457     kCpumMsrRdFn_Ia32P5McType,
    458     kCpumMsrRdFn_Ia32TimestampCounter,
    459     kCpumMsrRdFn_Ia32PlatformId,            /**< Takes real CPU value for reference. */
    460     kCpumMsrRdFn_Ia32ApicBase,
    461     kCpumMsrRdFn_Ia32FeatureControl,
    462     kCpumMsrRdFn_Ia32BiosSignId,            /**< Range value returned. */
    463     kCpumMsrRdFn_Ia32SmmMonitorCtl,
    464     kCpumMsrRdFn_Ia32PmcN,
    465     kCpumMsrRdFn_Ia32MonitorFilterLineSize,
    466     kCpumMsrRdFn_Ia32MPerf,
    467     kCpumMsrRdFn_Ia32APerf,
    468     kCpumMsrRdFn_Ia32MtrrCap,               /**< Takes real CPU value for reference.  */
    469     kCpumMsrRdFn_Ia32MtrrPhysBaseN,         /**< Takes register number. */
    470     kCpumMsrRdFn_Ia32MtrrPhysMaskN,         /**< Takes register number. */
    471     kCpumMsrRdFn_Ia32MtrrFixed,             /**< Takes CPUMCPU offset. */
    472     kCpumMsrRdFn_Ia32MtrrDefType,
    473     kCpumMsrRdFn_Ia32Pat,
    474     kCpumMsrRdFn_Ia32SysEnterCs,
    475     kCpumMsrRdFn_Ia32SysEnterEsp,
    476     kCpumMsrRdFn_Ia32SysEnterEip,
    477     kCpumMsrRdFn_Ia32McgCap,
    478     kCpumMsrRdFn_Ia32McgStatus,
    479     kCpumMsrRdFn_Ia32McgCtl,
    480     kCpumMsrRdFn_Ia32DebugCtl,
    481     kCpumMsrRdFn_Ia32SmrrPhysBase,
    482     kCpumMsrRdFn_Ia32SmrrPhysMask,
    483     kCpumMsrRdFn_Ia32PlatformDcaCap,
    484     kCpumMsrRdFn_Ia32CpuDcaCap,
    485     kCpumMsrRdFn_Ia32Dca0Cap,
    486     kCpumMsrRdFn_Ia32PerfEvtSelN,           /**< Range value indicates the register number. */
    487     kCpumMsrRdFn_Ia32PerfStatus,            /**< Range value returned. */
    488     kCpumMsrRdFn_Ia32PerfCtl,               /**< Range value returned. */
    489     kCpumMsrRdFn_Ia32FixedCtrN,             /**< Takes register number of start of range. */
    490     kCpumMsrRdFn_Ia32PerfCapabilities,      /**< Takes reference value. */
    491     kCpumMsrRdFn_Ia32FixedCtrCtrl,
    492     kCpumMsrRdFn_Ia32PerfGlobalStatus,      /**< Takes reference value. */
    493     kCpumMsrRdFn_Ia32PerfGlobalCtrl,
    494     kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
    495     kCpumMsrRdFn_Ia32PebsEnable,
    496     kCpumMsrRdFn_Ia32ClockModulation,       /**< Range value returned. */
    497     kCpumMsrRdFn_Ia32ThermInterrupt,        /**< Range value returned. */
    498     kCpumMsrRdFn_Ia32ThermStatus,           /**< Range value returned. */
    499     kCpumMsrRdFn_Ia32Therm2Ctl,             /**< Range value returned. */
    500     kCpumMsrRdFn_Ia32MiscEnable,            /**< Range value returned. */
    501     kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN,  /**< Takes bank number. */
    502     kCpumMsrRdFn_Ia32McNCtl2,               /**< Takes register number of start of range. */
    503     kCpumMsrRdFn_Ia32DsArea,
    504     kCpumMsrRdFn_Ia32TscDeadline,
    505     kCpumMsrRdFn_Ia32X2ApicN,
    506     kCpumMsrRdFn_Ia32DebugInterface,
    507     kCpumMsrRdFn_Ia32VmxBasic,              /**< Takes real value as reference. */
    508     kCpumMsrRdFn_Ia32VmxPinbasedCtls,       /**< Takes real value as reference. */
    509     kCpumMsrRdFn_Ia32VmxProcbasedCtls,      /**< Takes real value as reference. */
    510     kCpumMsrRdFn_Ia32VmxExitCtls,           /**< Takes real value as reference. */
    511     kCpumMsrRdFn_Ia32VmxEntryCtls,          /**< Takes real value as reference. */
    512     kCpumMsrRdFn_Ia32VmxMisc,               /**< Takes real value as reference. */
    513     kCpumMsrRdFn_Ia32VmxCr0Fixed0,          /**< Takes real value as reference. */
    514     kCpumMsrRdFn_Ia32VmxCr0Fixed1,          /**< Takes real value as reference. */
    515     kCpumMsrRdFn_Ia32VmxCr4Fixed0,          /**< Takes real value as reference. */
    516     kCpumMsrRdFn_Ia32VmxCr4Fixed1,          /**< Takes real value as reference. */
    517     kCpumMsrRdFn_Ia32VmxVmcsEnum,           /**< Takes real value as reference. */
    518     kCpumMsrRdFn_Ia32VmxProcBasedCtls2,     /**< Takes real value as reference. */
    519     kCpumMsrRdFn_Ia32VmxEptVpidCap,         /**< Takes real value as reference. */
    520     kCpumMsrRdFn_Ia32VmxTruePinbasedCtls,   /**< Takes real value as reference. */
    521     kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls,  /**< Takes real value as reference. */
    522     kCpumMsrRdFn_Ia32VmxTrueExitCtls,       /**< Takes real value as reference. */
    523     kCpumMsrRdFn_Ia32VmxTrueEntryCtls,      /**< Takes real value as reference. */
    524     kCpumMsrRdFn_Ia32VmxVmFunc,             /**< Takes real value as reference. */
    525     kCpumMsrRdFn_Ia32SpecCtrl,
    526     kCpumMsrRdFn_Ia32ArchCapabilities,
    527 
    528     kCpumMsrRdFn_Amd64Efer,
    529     kCpumMsrRdFn_Amd64SyscallTarget,
    530     kCpumMsrRdFn_Amd64LongSyscallTarget,
    531     kCpumMsrRdFn_Amd64CompSyscallTarget,
    532     kCpumMsrRdFn_Amd64SyscallFlagMask,
    533     kCpumMsrRdFn_Amd64FsBase,
    534     kCpumMsrRdFn_Amd64GsBase,
    535     kCpumMsrRdFn_Amd64KernelGsBase,
    536     kCpumMsrRdFn_Amd64TscAux,
    537 
    538     kCpumMsrRdFn_IntelEblCrPowerOn,
    539     kCpumMsrRdFn_IntelI7CoreThreadCount,
    540     kCpumMsrRdFn_IntelP4EbcHardPowerOn,
    541     kCpumMsrRdFn_IntelP4EbcSoftPowerOn,
    542     kCpumMsrRdFn_IntelP4EbcFrequencyId,
    543     kCpumMsrRdFn_IntelP6FsbFrequency,       /**< Takes real value as reference. */
    544     kCpumMsrRdFn_IntelPlatformInfo,
    545     kCpumMsrRdFn_IntelFlexRatio,            /**< Takes real value as reference. */
    546     kCpumMsrRdFn_IntelPkgCStConfigControl,
    547     kCpumMsrRdFn_IntelPmgIoCaptureBase,
    548     kCpumMsrRdFn_IntelLastBranchFromToN,
    549     kCpumMsrRdFn_IntelLastBranchFromN,
    550     kCpumMsrRdFn_IntelLastBranchToN,
    551     kCpumMsrRdFn_IntelLastBranchTos,
    552     kCpumMsrRdFn_IntelBblCrCtl,
    553     kCpumMsrRdFn_IntelBblCrCtl3,
    554     kCpumMsrRdFn_IntelI7TemperatureTarget,  /**< Range value returned. */
    555     kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
    556     kCpumMsrRdFn_IntelI7MiscPwrMgmt,
    557     kCpumMsrRdFn_IntelP6CrN,
    558     kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
    559     kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
    560     kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
    561     kCpumMsrRdFn_IntelI7SandyAesNiCtl,
    562     kCpumMsrRdFn_IntelI7TurboRatioLimit,    /**< Returns range value. */
    563     kCpumMsrRdFn_IntelI7LbrSelect,
    564     kCpumMsrRdFn_IntelI7SandyErrorControl,
    565     kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
    566     kCpumMsrRdFn_IntelI7PowerCtl,
    567     kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
    568     kCpumMsrRdFn_IntelI7PebsLdLat,
    569     kCpumMsrRdFn_IntelI7PkgCnResidencyN,     /**< Takes C-state number. */
    570     kCpumMsrRdFn_IntelI7CoreCnResidencyN,    /**< Takes C-state number. */
    571     kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
    572     kCpumMsrRdFn_IntelI7SandyVrMiscConfig,   /**< Takes real value as reference. */
    573     kCpumMsrRdFn_IntelI7SandyRaplPowerUnit,  /**< Takes real value as reference. */
    574     kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN,     /**< Takes real value as reference. */
    575     kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
    576     kCpumMsrRdFn_IntelI7RaplPkgPowerLimit,   /**< Takes real value as reference. */
    577     kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
    578     kCpumMsrRdFn_IntelI7RaplPkgPerfStatus,   /**< Takes real value as reference. */
    579     kCpumMsrRdFn_IntelI7RaplPkgPowerInfo,    /**< Takes real value as reference. */
    580     kCpumMsrRdFn_IntelI7RaplDramPowerLimit,  /**< Takes real value as reference. */
    581     kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
    582     kCpumMsrRdFn_IntelI7RaplDramPerfStatus,  /**< Takes real value as reference. */
    583     kCpumMsrRdFn_IntelI7RaplDramPowerInfo,   /**< Takes real value as reference. */
    584     kCpumMsrRdFn_IntelI7RaplPp0PowerLimit,   /**< Takes real value as reference. */
    585     kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
    586     kCpumMsrRdFn_IntelI7RaplPp0Policy,       /**< Takes real value as reference. */
    587     kCpumMsrRdFn_IntelI7RaplPp0PerfStatus,   /**< Takes real value as reference. */
    588     kCpumMsrRdFn_IntelI7RaplPp1PowerLimit,   /**< Takes real value as reference. */
    589     kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
    590     kCpumMsrRdFn_IntelI7RaplPp1Policy,       /**< Takes real value as reference. */
    591     kCpumMsrRdFn_IntelI7IvyConfigTdpNominal, /**< Takes real value as reference. */
    592     kCpumMsrRdFn_IntelI7IvyConfigTdpLevel1,  /**< Takes real value as reference. */
    593     kCpumMsrRdFn_IntelI7IvyConfigTdpLevel2,  /**< Takes real value as reference. */
    594     kCpumMsrRdFn_IntelI7IvyConfigTdpControl,
    595     kCpumMsrRdFn_IntelI7IvyTurboActivationRatio,
    596     kCpumMsrRdFn_IntelI7UncPerfGlobalCtrl,
    597     kCpumMsrRdFn_IntelI7UncPerfGlobalStatus,
    598     kCpumMsrRdFn_IntelI7UncPerfGlobalOvfCtrl,
    599     kCpumMsrRdFn_IntelI7UncPerfFixedCtrCtrl,
    600     kCpumMsrRdFn_IntelI7UncPerfFixedCtr,
    601     kCpumMsrRdFn_IntelI7UncCBoxConfig,
    602     kCpumMsrRdFn_IntelI7UncArbPerfCtrN,
    603     kCpumMsrRdFn_IntelI7UncArbPerfEvtSelN,
    604     kCpumMsrRdFn_IntelI7SmiCount,
    605     kCpumMsrRdFn_IntelCore2EmttmCrTablesN,  /**< Range value returned. */
    606     kCpumMsrRdFn_IntelCore2SmmCStMiscInfo,
    607     kCpumMsrRdFn_IntelCore1ExtConfig,
    608     kCpumMsrRdFn_IntelCore1DtsCalControl,
    609     kCpumMsrRdFn_IntelCore2PeciControl,
    610     kCpumMsrRdFn_IntelAtSilvCoreC1Recidency,
    611 
    612     kCpumMsrRdFn_P6LastBranchFromIp,
    613     kCpumMsrRdFn_P6LastBranchToIp,
    614     kCpumMsrRdFn_P6LastIntFromIp,
    615     kCpumMsrRdFn_P6LastIntToIp,
    616 
    617     kCpumMsrRdFn_AmdFam15hTscRate,
    618     kCpumMsrRdFn_AmdFam15hLwpCfg,
    619     kCpumMsrRdFn_AmdFam15hLwpCbAddr,
    620     kCpumMsrRdFn_AmdFam10hMc4MiscN,
    621     kCpumMsrRdFn_AmdK8PerfCtlN,
    622     kCpumMsrRdFn_AmdK8PerfCtrN,
    623     kCpumMsrRdFn_AmdK8SysCfg,               /**< Range value returned. */
    624     kCpumMsrRdFn_AmdK8HwCr,
    625     kCpumMsrRdFn_AmdK8IorrBaseN,
    626     kCpumMsrRdFn_AmdK8IorrMaskN,
    627     kCpumMsrRdFn_AmdK8TopOfMemN,
    628     kCpumMsrRdFn_AmdK8NbCfg1,
    629     kCpumMsrRdFn_AmdK8McXcptRedir,
    630     kCpumMsrRdFn_AmdK8CpuNameN,
    631     kCpumMsrRdFn_AmdK8HwThermalCtrl,        /**< Range value returned. */
    632     kCpumMsrRdFn_AmdK8SwThermalCtrl,
    633     kCpumMsrRdFn_AmdK8FidVidControl,        /**< Range value returned. */
    634     kCpumMsrRdFn_AmdK8FidVidStatus,         /**< Range value returned. */
    635     kCpumMsrRdFn_AmdK8McCtlMaskN,
    636     kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
    637     kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
    638     kCpumMsrRdFn_AmdK8IntPendingMessage,
    639     kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
    640     kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
    641     kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
    642     kCpumMsrRdFn_AmdFam10hPStateCurLimit,   /**< Returns range value. */
    643     kCpumMsrRdFn_AmdFam10hPStateControl,    /**< Returns range value. */
    644     kCpumMsrRdFn_AmdFam10hPStateStatus,     /**< Returns range value. */
    645     kCpumMsrRdFn_AmdFam10hPStateN,          /**< Returns range value. This isn't an register index! */
    646     kCpumMsrRdFn_AmdFam10hCofVidControl,    /**< Returns range value. */
    647     kCpumMsrRdFn_AmdFam10hCofVidStatus,     /**< Returns range value. */
    648     kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
    649     kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
    650     kCpumMsrRdFn_AmdK8SmmBase,
    651     kCpumMsrRdFn_AmdK8SmmAddr,
    652     kCpumMsrRdFn_AmdK8SmmMask,
    653     kCpumMsrRdFn_AmdK8VmCr,
    654     kCpumMsrRdFn_AmdK8IgnNe,
    655     kCpumMsrRdFn_AmdK8SmmCtl,
    656     kCpumMsrRdFn_AmdK8VmHSavePa,
    657     kCpumMsrRdFn_AmdFam10hVmLockKey,
    658     kCpumMsrRdFn_AmdFam10hSmmLockKey,
    659     kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
    660     kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
    661     kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
    662     kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
    663     kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
    664     kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
    665     kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
    666     kCpumMsrRdFn_AmdK7MicrocodeCtl,         /**< Returns range value. */
    667     kCpumMsrRdFn_AmdK7ClusterIdMaybe,       /**< Returns range value. */
    668     kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
    669     kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
    670     kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
    671     kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
    672     kCpumMsrRdFn_AmdK8PatchLevel,           /**< Returns range value. */
    673     kCpumMsrRdFn_AmdK7DebugStatusMaybe,
    674     kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
    675     kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
    676     kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
    677     kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
    678     kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
    679     kCpumMsrRdFn_AmdK7NodeId,
    680     kCpumMsrRdFn_AmdK7DrXAddrMaskN,      /**< Takes register index. */
    681     kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
    682     kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
    683     kCpumMsrRdFn_AmdK7LoadStoreCfg,
    684     kCpumMsrRdFn_AmdK7InstrCacheCfg,
    685     kCpumMsrRdFn_AmdK7DataCacheCfg,
    686     kCpumMsrRdFn_AmdK7BusUnitCfg,
    687     kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
    688     kCpumMsrRdFn_AmdFam15hFpuCfg,
    689     kCpumMsrRdFn_AmdFam15hDecoderCfg,
    690     kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
    691     kCpumMsrRdFn_AmdFam15hCombUnitCfg,
    692     kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
    693     kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
    694     kCpumMsrRdFn_AmdFam15hExecUnitCfg,
    695     kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
    696     kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
    697     kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
    698     kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
    699     kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
    700     kCpumMsrRdFn_AmdFam10hIbsOpRip,
    701     kCpumMsrRdFn_AmdFam10hIbsOpData,
    702     kCpumMsrRdFn_AmdFam10hIbsOpData2,
    703     kCpumMsrRdFn_AmdFam10hIbsOpData3,
    704     kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
    705     kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
    706     kCpumMsrRdFn_AmdFam10hIbsCtl,
    707     kCpumMsrRdFn_AmdFam14hIbsBrTarget,
    708 
    709     kCpumMsrRdFn_Gim,
    710 
    711     /** End of valid MSR read function indexes. */
    712     kCpumMsrRdFn_End
    713 } CPUMMSRRDFN;
    714 
    715 /**
    716  * MSR write functions.
    717  */
    718 typedef enum CPUMMSRWRFN
    719 {
    720     /** Invalid zero value. */
    721     kCpumMsrWrFn_Invalid = 0,
    722     /** Writes are ignored, the fWrGpMask is observed though. */
    723     kCpumMsrWrFn_IgnoreWrite,
    724     /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
    725     kCpumMsrWrFn_ReadOnly,
    726     /** Alias to the MSR range starting at the MSR given by
    727      * CPUMMSRRANGE::uValue.  Must be used in pair with
    728      * kCpumMsrRdFn_MsrAlias. */
    729     kCpumMsrWrFn_MsrAlias,
    730 
    731     kCpumMsrWrFn_Ia32P5McAddr,
    732     kCpumMsrWrFn_Ia32P5McType,
    733     kCpumMsrWrFn_Ia32TimestampCounter,
    734     kCpumMsrWrFn_Ia32ApicBase,
    735     kCpumMsrWrFn_Ia32FeatureControl,
    736     kCpumMsrWrFn_Ia32BiosSignId,
    737     kCpumMsrWrFn_Ia32BiosUpdateTrigger,
    738     kCpumMsrWrFn_Ia32SmmMonitorCtl,
    739     kCpumMsrWrFn_Ia32PmcN,
    740     kCpumMsrWrFn_Ia32MonitorFilterLineSize,
    741     kCpumMsrWrFn_Ia32MPerf,
    742     kCpumMsrWrFn_Ia32APerf,
    743     kCpumMsrWrFn_Ia32MtrrPhysBaseN,         /**< Takes register number. */
    744     kCpumMsrWrFn_Ia32MtrrPhysMaskN,         /**< Takes register number. */
    745     kCpumMsrWrFn_Ia32MtrrFixed,             /**< Takes CPUMCPU offset. */
    746     kCpumMsrWrFn_Ia32MtrrDefType,
    747     kCpumMsrWrFn_Ia32Pat,
    748     kCpumMsrWrFn_Ia32SysEnterCs,
    749     kCpumMsrWrFn_Ia32SysEnterEsp,
    750     kCpumMsrWrFn_Ia32SysEnterEip,
    751     kCpumMsrWrFn_Ia32McgStatus,
    752     kCpumMsrWrFn_Ia32McgCtl,
    753     kCpumMsrWrFn_Ia32DebugCtl,
    754     kCpumMsrWrFn_Ia32SmrrPhysBase,
    755     kCpumMsrWrFn_Ia32SmrrPhysMask,
    756     kCpumMsrWrFn_Ia32PlatformDcaCap,
    757     kCpumMsrWrFn_Ia32Dca0Cap,
    758     kCpumMsrWrFn_Ia32PerfEvtSelN,           /**< Range value indicates the register number. */
    759     kCpumMsrWrFn_Ia32PerfStatus,
    760     kCpumMsrWrFn_Ia32PerfCtl,
    761     kCpumMsrWrFn_Ia32FixedCtrN,             /**< Takes register number of start of range. */
    762     kCpumMsrWrFn_Ia32PerfCapabilities,
    763     kCpumMsrWrFn_Ia32FixedCtrCtrl,
    764     kCpumMsrWrFn_Ia32PerfGlobalStatus,
    765     kCpumMsrWrFn_Ia32PerfGlobalCtrl,
    766     kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
    767     kCpumMsrWrFn_Ia32PebsEnable,
    768     kCpumMsrWrFn_Ia32ClockModulation,
    769     kCpumMsrWrFn_Ia32ThermInterrupt,
    770     kCpumMsrWrFn_Ia32ThermStatus,
    771     kCpumMsrWrFn_Ia32Therm2Ctl,
    772     kCpumMsrWrFn_Ia32MiscEnable,
    773     kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN,  /**< Takes bank number. */
    774     kCpumMsrWrFn_Ia32McNCtl2,               /**< Takes register number of start of range. */
    775     kCpumMsrWrFn_Ia32DsArea,
    776     kCpumMsrWrFn_Ia32TscDeadline,
    777     kCpumMsrWrFn_Ia32X2ApicN,
    778     kCpumMsrWrFn_Ia32DebugInterface,
    779     kCpumMsrWrFn_Ia32SpecCtrl,
    780     kCpumMsrWrFn_Ia32PredCmd,
    781     kCpumMsrWrFn_Ia32FlushCmd,
    782 
    783     kCpumMsrWrFn_Amd64Efer,
    784     kCpumMsrWrFn_Amd64SyscallTarget,
    785     kCpumMsrWrFn_Amd64LongSyscallTarget,
    786     kCpumMsrWrFn_Amd64CompSyscallTarget,
    787     kCpumMsrWrFn_Amd64SyscallFlagMask,
    788     kCpumMsrWrFn_Amd64FsBase,
    789     kCpumMsrWrFn_Amd64GsBase,
    790     kCpumMsrWrFn_Amd64KernelGsBase,
    791     kCpumMsrWrFn_Amd64TscAux,
    792     kCpumMsrWrFn_IntelEblCrPowerOn,
    793     kCpumMsrWrFn_IntelP4EbcHardPowerOn,
    794     kCpumMsrWrFn_IntelP4EbcSoftPowerOn,
    795     kCpumMsrWrFn_IntelP4EbcFrequencyId,
    796     kCpumMsrWrFn_IntelFlexRatio,
    797     kCpumMsrWrFn_IntelPkgCStConfigControl,
    798     kCpumMsrWrFn_IntelPmgIoCaptureBase,
    799     kCpumMsrWrFn_IntelLastBranchFromToN,
    800     kCpumMsrWrFn_IntelLastBranchFromN,
    801     kCpumMsrWrFn_IntelLastBranchToN,
    802     kCpumMsrWrFn_IntelLastBranchTos,
    803     kCpumMsrWrFn_IntelBblCrCtl,
    804     kCpumMsrWrFn_IntelBblCrCtl3,
    805     kCpumMsrWrFn_IntelI7TemperatureTarget,
    806     kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
    807     kCpumMsrWrFn_IntelI7MiscPwrMgmt,
    808     kCpumMsrWrFn_IntelP6CrN,
    809     kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
    810     kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
    811     kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
    812     kCpumMsrWrFn_IntelI7SandyAesNiCtl,
    813     kCpumMsrWrFn_IntelI7TurboRatioLimit,
    814     kCpumMsrWrFn_IntelI7LbrSelect,
    815     kCpumMsrWrFn_IntelI7SandyErrorControl,
    816     kCpumMsrWrFn_IntelI7PowerCtl,
    817     kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
    818     kCpumMsrWrFn_IntelI7PebsLdLat,
    819     kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
    820     kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
    821     kCpumMsrWrFn_IntelI7SandyRaplPowerUnit,  /**< R/O but found writable bits on a Silvermont CPU here. */
    822     kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
    823     kCpumMsrWrFn_IntelI7SandyPkgC2Residency, /**< R/O but found writable bits on a Silvermont CPU here. */
    824     kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
    825     kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
    826     kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
    827     kCpumMsrWrFn_IntelI7RaplPp0Policy,
    828     kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
    829     kCpumMsrWrFn_IntelI7RaplPp1Policy,
    830     kCpumMsrWrFn_IntelI7IvyConfigTdpControl,
    831     kCpumMsrWrFn_IntelI7IvyTurboActivationRatio,
    832     kCpumMsrWrFn_IntelI7UncPerfGlobalCtrl,
    833     kCpumMsrWrFn_IntelI7UncPerfGlobalStatus,
    834     kCpumMsrWrFn_IntelI7UncPerfGlobalOvfCtrl,
    835     kCpumMsrWrFn_IntelI7UncPerfFixedCtrCtrl,
    836     kCpumMsrWrFn_IntelI7UncPerfFixedCtr,
    837     kCpumMsrWrFn_IntelI7UncArbPerfCtrN,
    838     kCpumMsrWrFn_IntelI7UncArbPerfEvtSelN,
    839     kCpumMsrWrFn_IntelCore2EmttmCrTablesN,
    840     kCpumMsrWrFn_IntelCore2SmmCStMiscInfo,
    841     kCpumMsrWrFn_IntelCore1ExtConfig,
    842     kCpumMsrWrFn_IntelCore1DtsCalControl,
    843     kCpumMsrWrFn_IntelCore2PeciControl,
    844 
    845     kCpumMsrWrFn_P6LastIntFromIp,
    846     kCpumMsrWrFn_P6LastIntToIp,
    847 
    848     kCpumMsrWrFn_AmdFam15hTscRate,
    849     kCpumMsrWrFn_AmdFam15hLwpCfg,
    850     kCpumMsrWrFn_AmdFam15hLwpCbAddr,
    851     kCpumMsrWrFn_AmdFam10hMc4MiscN,
    852     kCpumMsrWrFn_AmdK8PerfCtlN,
    853     kCpumMsrWrFn_AmdK8PerfCtrN,
    854     kCpumMsrWrFn_AmdK8SysCfg,
    855     kCpumMsrWrFn_AmdK8HwCr,
    856     kCpumMsrWrFn_AmdK8IorrBaseN,
    857     kCpumMsrWrFn_AmdK8IorrMaskN,
    858     kCpumMsrWrFn_AmdK8TopOfMemN,
    859     kCpumMsrWrFn_AmdK8NbCfg1,
    860     kCpumMsrWrFn_AmdK8McXcptRedir,
    861     kCpumMsrWrFn_AmdK8CpuNameN,
    862     kCpumMsrWrFn_AmdK8HwThermalCtrl,
    863     kCpumMsrWrFn_AmdK8SwThermalCtrl,
    864     kCpumMsrWrFn_AmdK8FidVidControl,
    865     kCpumMsrWrFn_AmdK8McCtlMaskN,
    866     kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
    867     kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
    868     kCpumMsrWrFn_AmdK8IntPendingMessage,
    869     kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
    870     kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
    871     kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
    872     kCpumMsrWrFn_AmdFam10hPStateControl,
    873     kCpumMsrWrFn_AmdFam10hPStateStatus,
    874     kCpumMsrWrFn_AmdFam10hPStateN,
    875     kCpumMsrWrFn_AmdFam10hCofVidControl,
    876     kCpumMsrWrFn_AmdFam10hCofVidStatus,
    877     kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
    878     kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
    879     kCpumMsrWrFn_AmdK8SmmBase,
    880     kCpumMsrWrFn_AmdK8SmmAddr,
    881     kCpumMsrWrFn_AmdK8SmmMask,
    882     kCpumMsrWrFn_AmdK8VmCr,
    883     kCpumMsrWrFn_AmdK8IgnNe,
    884     kCpumMsrWrFn_AmdK8SmmCtl,
    885     kCpumMsrWrFn_AmdK8VmHSavePa,
    886     kCpumMsrWrFn_AmdFam10hVmLockKey,
    887     kCpumMsrWrFn_AmdFam10hSmmLockKey,
    888     kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
    889     kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
    890     kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
    891     kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
    892     kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
    893     kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
    894     kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
    895     kCpumMsrWrFn_AmdK7MicrocodeCtl,
    896     kCpumMsrWrFn_AmdK7ClusterIdMaybe,
    897     kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
    898     kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
    899     kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
    900     kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
    901     kCpumMsrWrFn_AmdK8PatchLoader,
    902     kCpumMsrWrFn_AmdK7DebugStatusMaybe,
    903     kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
    904     kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
    905     kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
    906     kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
    907     kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
    908     kCpumMsrWrFn_AmdK7NodeId,
    909     kCpumMsrWrFn_AmdK7DrXAddrMaskN,      /**< Takes register index. */
    910     kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
    911     kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
    912     kCpumMsrWrFn_AmdK7LoadStoreCfg,
    913     kCpumMsrWrFn_AmdK7InstrCacheCfg,
    914     kCpumMsrWrFn_AmdK7DataCacheCfg,
    915     kCpumMsrWrFn_AmdK7BusUnitCfg,
    916     kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
    917     kCpumMsrWrFn_AmdFam15hFpuCfg,
    918     kCpumMsrWrFn_AmdFam15hDecoderCfg,
    919     kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
    920     kCpumMsrWrFn_AmdFam15hCombUnitCfg,
    921     kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
    922     kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
    923     kCpumMsrWrFn_AmdFam15hExecUnitCfg,
    924     kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
    925     kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
    926     kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
    927     kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
    928     kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
    929     kCpumMsrWrFn_AmdFam10hIbsOpRip,
    930     kCpumMsrWrFn_AmdFam10hIbsOpData,
    931     kCpumMsrWrFn_AmdFam10hIbsOpData2,
    932     kCpumMsrWrFn_AmdFam10hIbsOpData3,
    933     kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
    934     kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
    935     kCpumMsrWrFn_AmdFam10hIbsCtl,
    936     kCpumMsrWrFn_AmdFam14hIbsBrTarget,
    937 
    938     kCpumMsrWrFn_Gim,
    939 
    940     /** End of valid MSR write function indexes. */
    941     kCpumMsrWrFn_End
    942 } CPUMMSRWRFN;
    943 
    944 /**
    945  * MSR range.
    946  */
    947 typedef struct CPUMMSRRANGE
    948 {
    949     /** The first MSR. [0] */
    950     uint32_t    uFirst;
    951     /** The last MSR. [4] */
    952     uint32_t    uLast;
    953     /** The read function (CPUMMSRRDFN). [8] */
    954     uint16_t    enmRdFn;
    955     /** The write function (CPUMMSRWRFN). [10] */
    956     uint16_t    enmWrFn;
    957     /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
    958      * UINT16_MAX if not used by the read and write functions.  [12] */
    959     uint32_t    offCpumCpu : 24;
    960     /** Reserved for future hacks. [15] */
    961     uint32_t    fReserved : 8;
    962     /** The init/read value. [16]
    963      * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
    964      * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
    965      * offset into CPUM. */
    966     uint64_t    uValue;
    967     /** The bits to ignore when writing. [24]   */
    968     uint64_t    fWrIgnMask;
    969     /** The bits that will cause a GP(0) when writing. [32]
    970      * This is always checked prior to calling the write function.  Using
    971      * UINT64_MAX effectively marks the MSR as read-only. */
    972     uint64_t    fWrGpMask;
    973     /** The register name, if applicable. [40] */
    974     char        szName[56];
    975 
    976     /** The number of reads. */
    977     STAMCOUNTER cReads;
    978     /** The number of writes. */
    979     STAMCOUNTER cWrites;
    980     /** The number of times ignored bits were written. */
    981     STAMCOUNTER cIgnoredBits;
    982     /** The number of GPs generated. */
    983     STAMCOUNTER cGps;
    984 } CPUMMSRRANGE;
    985 #ifndef VBOX_FOR_DTRACE_LIB
    986 AssertCompileSize(CPUMMSRRANGE, 128);
    987 #endif
    988 /** Pointer to an MSR range. */
    989 typedef CPUMMSRRANGE *PCPUMMSRRANGE;
    990 /** Pointer to a const MSR range. */
    991 typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
    992 
    993 
    994 /**
    995  * MSRs which are required while exploding features.
    996  */
    997 typedef struct CPUMMSRS
    998 {
    999     union
    1000     {
    1001         VMXMSRS         vmx;
    1002         SVMMSRS         svm;
    1003     } hwvirt;
    1004 } CPUMMSRS;
    1005 /** Pointer to an CPUMMSRS struct. */
    1006 typedef CPUMMSRS *PCPUMMSRS;
    1007 /** Pointer to a const CPUMMSRS struct. */
    1008 typedef CPUMMSRS const *PCCPUMMSRS;
    1009 
    1010 
    1011 /**
    1012  * CPU features and quirks.
    1013  * This is mostly exploded CPUID info.
    1014  */
    1015 typedef struct CPUMFEATURES
    1016 {
    1017     /** The CPU vendor (CPUMCPUVENDOR). */
    1018     uint8_t         enmCpuVendor;
    1019     /** The CPU family. */
    1020     uint8_t         uFamily;
    1021     /** The CPU model. */
    1022     uint8_t         uModel;
    1023     /** The CPU stepping. */
    1024     uint8_t         uStepping;
    1025     /** The microarchitecture. */
    1026 #ifndef VBOX_FOR_DTRACE_LIB
    1027     CPUMMICROARCH   enmMicroarch;
    1028 #else
    1029     uint32_t        enmMicroarch;
    1030 #endif
    1031     /** The maximum physical address width of the CPU. */
    1032     uint8_t         cMaxPhysAddrWidth;
    1033     /** The maximum linear address width of the CPU. */
    1034     uint8_t         cMaxLinearAddrWidth;
    1035     /** Max size of the extended state (or FPU state if no XSAVE). */
    1036     uint16_t        cbMaxExtendedState;
    1037 
    1038     /** Supports MSRs. */
    1039     uint32_t        fMsr : 1;
    1040     /** Supports the page size extension (4/2 MB pages). */
    1041     uint32_t        fPse : 1;
    1042     /** Supports 36-bit page size extension (4 MB pages can map memory above
    1043      *  4GB). */
    1044     uint32_t        fPse36 : 1;
    1045     /** Supports physical address extension (PAE). */
    1046     uint32_t        fPae : 1;
    1047     /** Supports page-global extension (PGE). */
    1048     uint32_t        fPge : 1;
    1049     /** Page attribute table (PAT) support (page level cache control). */
    1050     uint32_t        fPat : 1;
    1051     /** Supports the FXSAVE and FXRSTOR instructions. */
    1052     uint32_t        fFxSaveRstor : 1;
    1053     /** Supports the XSAVE and XRSTOR instructions. */
    1054     uint32_t        fXSaveRstor : 1;
    1055     /** Supports the XSAVEOPT instruction. */
    1056     uint32_t        fXSaveOpt : 1;
    1057     /** The XSAVE/XRSTOR bit in CR4 has been set (only applicable for host!). */
    1058     uint32_t        fOpSysXSaveRstor : 1;
    1059     /** Supports MMX. */
    1060     uint32_t        fMmx : 1;
    1061     /** Supports AMD extensions to MMX instructions. */
    1062     uint32_t        fAmdMmxExts : 1;
    1063     /** Supports SSE. */
    1064     uint32_t        fSse : 1;
    1065     /** Supports SSE2. */
    1066     uint32_t        fSse2 : 1;
    1067     /** Supports SSE3. */
    1068     uint32_t        fSse3 : 1;
    1069     /** Supports SSSE3. */
    1070     uint32_t        fSsse3 : 1;
    1071     /** Supports SSE4.1. */
    1072     uint32_t        fSse41 : 1;
    1073     /** Supports SSE4.2. */
    1074     uint32_t        fSse42 : 1;
    1075     /** Supports AVX. */
    1076     uint32_t        fAvx : 1;
    1077     /** Supports AVX2. */
    1078     uint32_t        fAvx2 : 1;
    1079     /** Supports AVX512 foundation. */
    1080     uint32_t        fAvx512Foundation : 1;
    1081     /** Supports RDTSC. */
    1082     uint32_t        fTsc : 1;
    1083     /** Intel SYSENTER/SYSEXIT support */
    1084     uint32_t        fSysEnter : 1;
    1085     /** First generation APIC. */
    1086     uint32_t        fApic : 1;
    1087     /** Second generation APIC. */
    1088     uint32_t        fX2Apic : 1;
    1089     /** Hypervisor present. */
    1090     uint32_t        fHypervisorPresent : 1;
    1091     /** MWAIT & MONITOR instructions supported. */
    1092     uint32_t        fMonitorMWait : 1;
    1093     /** MWAIT Extensions present. */
    1094     uint32_t        fMWaitExtensions : 1;
    1095     /** Supports CMPXCHG16B in 64-bit mode. */
    1096     uint32_t        fMovCmpXchg16b : 1;
    1097     /** Supports CLFLUSH. */
    1098     uint32_t        fClFlush : 1;
    1099     /** Supports CLFLUSHOPT. */
    1100     uint32_t        fClFlushOpt : 1;
    1101     /** Supports IA32_PRED_CMD.IBPB. */
    1102     uint32_t        fIbpb : 1;
    1103     /** Supports IA32_SPEC_CTRL.IBRS. */
    1104     uint32_t        fIbrs : 1;
    1105     /** Supports IA32_SPEC_CTRL.STIBP. */
    1106     uint32_t        fStibp : 1;
    1107     /** Supports IA32_FLUSH_CMD. */
    1108     uint32_t        fFlushCmd : 1;
    1109     /** Supports IA32_ARCH_CAP. */
    1110     uint32_t        fArchCap : 1;
    1111     /** Supports MD_CLEAR functionality (VERW, IA32_FLUSH_CMD). */
    1112     uint32_t        fMdsClear : 1;
    1113     /** Supports PCID. */
    1114     uint32_t        fPcid : 1;
    1115     /** Supports INVPCID. */
    1116     uint32_t        fInvpcid : 1;
    1117     /** Supports read/write FSGSBASE instructions. */
    1118     uint32_t        fFsGsBase : 1;
    1119     /** Supports BMI1 instructions (ANDN, BEXTR, BLSI, BLSMSK, BLSR, and TZCNT). */
    1120     uint32_t        fBmi1 : 1;
    1121     /** Supports BMI2 instructions (BZHI, MULX, PDEP, PEXT, RORX, SARX, SHRX,
    1122      * and SHLX). */
    1123     uint32_t        fBmi2 : 1;
    1124     /** Supports POPCNT instruction. */
    1125     uint32_t        fPopCnt : 1;
    1126     /** Supports RDRAND instruction. */
    1127     uint32_t        fRdRand : 1;
    1128     /** Supports RDSEED instruction. */
    1129     uint32_t        fRdSeed : 1;
    1130     /** Supports Hardware Lock Elision (HLE). */
    1131     uint32_t        fHle : 1;
    1132     /** Supports Restricted Transactional Memory (RTM - XBEGIN, XEND, XABORT). */
    1133     uint32_t        fRtm : 1;
    1134     /** Supports PCLMULQDQ instruction. */
    1135     uint32_t        fPclMul : 1;
    1136     /** Supports AES-NI (six AESxxx instructions). */
    1137     uint32_t        fAesNi : 1;
    1138     /** Support MOVBE instruction. */
    1139     uint32_t        fMovBe : 1;
    1140     /** Support SHA instructions. */
    1141     uint32_t        fSha : 1;
    1142     /** Support ADX instructions. */
    1143     uint32_t        fAdx : 1;
    1144 
    1145     /** Supports AMD 3DNow instructions. */
    1146     uint32_t        f3DNow : 1;
    1147     /** Supports the 3DNow/AMD64 prefetch instructions (could be nops). */
    1148     uint32_t        f3DNowPrefetch : 1;
    1149 
    1150     /** AMD64: Supports long mode. */
    1151     uint32_t        fLongMode : 1;
    1152     /** AMD64: SYSCALL/SYSRET support. */
    1153     uint32_t        fSysCall : 1;
    1154     /** AMD64: No-execute page table bit. */
    1155     uint32_t        fNoExecute : 1;
    1156     /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
    1157     uint32_t        fLahfSahf : 1;
    1158     /** AMD64: Supports RDTSCP. */
    1159     uint32_t        fRdTscP : 1;
    1160     /** AMD64: Supports MOV CR8 in 32-bit code (lock prefix hack). */
    1161     uint32_t        fMovCr8In32Bit : 1;
    1162     /** AMD64: Supports XOP (similar to VEX3/AVX). */
    1163     uint32_t        fXop : 1;
    1164     /** AMD64: Supports ABM, i.e. the LZCNT instruction. */
    1165     uint32_t        fAbm : 1;
    1166     /** AMD64: Supports TBM (BEXTR, BLCFILL, BLCI, BLCIC, BLCMSK, BLCS,
    1167      *  BLSFILL, BLSIC, T1MSKC, and TZMSK). */
    1168     uint32_t        fTbm : 1;
    1169 
    1170     /** Indicates that FPU instruction and data pointers may leak.
    1171      * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
    1172      * is only saved and restored if an exception is pending. */
    1173     uint32_t        fLeakyFxSR : 1;
    1174 
    1175     /** AMD64: Supports AMD SVM. */
    1176     uint32_t        fSvm : 1;
    1177 
    1178     /** Support for Intel VMX. */
    1179     uint32_t        fVmx : 1;
    1180 
    1181     /** Indicates that speculative execution control CPUID bits and MSRs are exposed.
    1182      * The details are different for Intel and AMD but both have similar
    1183      * functionality. */
    1184     uint32_t        fSpeculationControl : 1;
    1185 
    1186     /** MSR_IA32_ARCH_CAPABILITIES: RDCL_NO (bit 0).
    1187      * @remarks Only safe use after CPUM ring-0 init! */
    1188     uint32_t        fArchRdclNo : 1;
    1189     /** MSR_IA32_ARCH_CAPABILITIES: IBRS_ALL (bit 1).
    1190      * @remarks Only safe use after CPUM ring-0 init! */
    1191     uint32_t        fArchIbrsAll : 1;
    1192     /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 2).
    1193      * @remarks Only safe use after CPUM ring-0 init! */
    1194     uint32_t        fArchRsbOverride : 1;
    1195     /** MSR_IA32_ARCH_CAPABILITIES: RSB Override (bit 3).
    1196      * @remarks Only safe use after CPUM ring-0 init! */
    1197     uint32_t        fArchVmmNeedNotFlushL1d : 1;
    1198     /** MSR_IA32_ARCH_CAPABILITIES: MDS_NO (bit 4).
    1199      * @remarks Only safe use after CPUM ring-0 init! */
    1200     uint32_t        fArchMdsNo : 1;
    1201 
    1202     /** Alignment padding / reserved for future use (96 bits total, plus 12 bytes
    1203      *  prior to the bit fields -> total of 24 bytes) */
    1204     uint32_t        fPadding0 : 24;
    1205 
    1206 
    1207     /** @name SVM
    1208      * @{ */
    1209     /** SVM: Supports Nested-paging. */
    1210     uint32_t        fSvmNestedPaging : 1;
    1211     /** SVM: Support LBR (Last Branch Record) virtualization. */
    1212     uint32_t        fSvmLbrVirt : 1;
    1213     /** SVM: Supports SVM lock. */
    1214     uint32_t        fSvmSvmLock : 1;
    1215     /** SVM: Supports Next RIP save. */
    1216     uint32_t        fSvmNextRipSave : 1;
    1217     /** SVM: Supports TSC rate MSR. */
    1218     uint32_t        fSvmTscRateMsr : 1;
    1219     /** SVM: Supports VMCB clean bits. */
    1220     uint32_t        fSvmVmcbClean : 1;
    1221     /** SVM: Supports Flush-by-ASID. */
    1222     uint32_t        fSvmFlusbByAsid : 1;
    1223     /** SVM: Supports decode assist. */
    1224     uint32_t        fSvmDecodeAssists : 1;
    1225     /** SVM: Supports Pause filter. */
    1226     uint32_t        fSvmPauseFilter : 1;
    1227     /** SVM: Supports Pause filter threshold. */
    1228     uint32_t        fSvmPauseFilterThreshold : 1;
    1229     /** SVM: Supports AVIC (Advanced Virtual Interrupt Controller). */
    1230     uint32_t        fSvmAvic : 1;
    1231     /** SVM: Supports Virtualized VMSAVE/VMLOAD. */
    1232     uint32_t        fSvmVirtVmsaveVmload : 1;
    1233     /** SVM: Supports VGIF (Virtual Global Interrupt Flag). */
    1234     uint32_t        fSvmVGif : 1;
    1235     /** SVM: Supports GMET (Guest Mode Execute Trap Extension). */
    1236     uint32_t        fSvmGmet : 1;
    1237     /** SVM: Supports SSSCheck (SVM Supervisor Shadow Stack). */
    1238     uint32_t        fSvmSSSCheck : 1;
    1239     /** SVM: Supports SPEC_CTRL virtualization. */
    1240     uint32_t        fSvmSpecCtrl : 1;
    1241     /** SVM: Supports HOST_MCE_OVERRIDE. */
    1242     uint32_t        fSvmHostMceOverride : 1;
    1243     /** SVM: Supports TlbiCtl (INVLPGB/TLBSYNC in VMCB and TLBSYNC intercept). */
    1244     uint32_t        fSvmTlbiCtl : 1;
    1245     /** SVM: Padding / reserved for future features (64 bits total w/ max ASID). */
    1246     uint32_t        fSvmPadding0 : 14;
    1247     /** SVM: Maximum supported ASID. */
    1248     uint32_t        uSvmMaxAsid;
    1249     /** @} */
    1250 
    1251 
    1252     /** VMX: Maximum physical address width. */
    1253     uint32_t        cVmxMaxPhysAddrWidth : 8;
    1254 
    1255     /** @name VMX basic controls.
    1256      * @{ */
    1257     /** VMX: Supports INS/OUTS VM-exit instruction info. */
    1258     uint32_t        fVmxInsOutInfo : 1;
    1259     /** @} */
    1260 
    1261     /** @name VMX Pin-based controls.
    1262      * @{ */
    1263     /** VMX: Supports external interrupt VM-exit. */
    1264     uint32_t        fVmxExtIntExit : 1;
    1265     /** VMX: Supports NMI VM-exit. */
    1266     uint32_t        fVmxNmiExit : 1;
    1267     /** VMX: Supports Virtual NMIs. */
    1268     uint32_t        fVmxVirtNmi : 1;
    1269     /** VMX: Supports preemption timer. */
    1270     uint32_t        fVmxPreemptTimer : 1;
    1271     /** VMX: Supports posted interrupts. */
    1272     uint32_t        fVmxPostedInt : 1;
    1273     /** @} */
    1274 
    1275     /** @name VMX Processor-based controls.
    1276      * @{ */
    1277     /** VMX: Supports Interrupt-window exiting. */
    1278     uint32_t        fVmxIntWindowExit : 1;
    1279     /** VMX: Supports TSC offsetting. */
    1280     uint32_t        fVmxTscOffsetting : 1;
    1281     /** VMX: Supports HLT exiting. */
    1282     uint32_t        fVmxHltExit : 1;
    1283     /** VMX: Supports INVLPG exiting. */
    1284     uint32_t        fVmxInvlpgExit : 1;
    1285     /** VMX: Supports MWAIT exiting. */
    1286     uint32_t        fVmxMwaitExit : 1;
    1287     /** VMX: Supports RDPMC exiting. */
    1288     uint32_t        fVmxRdpmcExit : 1;
    1289     /** VMX: Supports RDTSC exiting. */
    1290     uint32_t        fVmxRdtscExit : 1;
    1291     /** VMX: Supports CR3-load exiting. */
    1292     uint32_t        fVmxCr3LoadExit : 1;
    1293     /** VMX: Supports CR3-store exiting. */
    1294     uint32_t        fVmxCr3StoreExit : 1;
    1295     /** VMX: Supports tertiary processor-based VM-execution controls. */
    1296     uint32_t        fVmxTertiaryExecCtls : 1;
    1297     /** VMX: Supports CR8-load exiting. */
    1298     uint32_t        fVmxCr8LoadExit : 1;
    1299     /** VMX: Supports CR8-store exiting. */
    1300     uint32_t        fVmxCr8StoreExit : 1;
    1301     /** VMX: Supports TPR shadow. */
    1302     uint32_t        fVmxUseTprShadow : 1;
    1303     /** VMX: Supports NMI-window exiting. */
    1304     uint32_t        fVmxNmiWindowExit : 1;
    1305     /** VMX: Supports Mov-DRx exiting. */
    1306     uint32_t        fVmxMovDRxExit : 1;
    1307     /** VMX: Supports Unconditional I/O exiting. */
    1308     uint32_t        fVmxUncondIoExit : 1;
    1309     /** VMX: Supportgs I/O bitmaps. */
    1310     uint32_t        fVmxUseIoBitmaps : 1;
    1311     /** VMX: Supports Monitor Trap Flag. */
    1312     uint32_t        fVmxMonitorTrapFlag : 1;
    1313     /** VMX: Supports MSR bitmap. */
    1314     uint32_t        fVmxUseMsrBitmaps : 1;
    1315     /** VMX: Supports MONITOR exiting. */
    1316     uint32_t        fVmxMonitorExit : 1;
    1317     /** VMX: Supports PAUSE exiting. */
    1318     uint32_t        fVmxPauseExit : 1;
    1319     /** VMX: Supports secondary processor-based VM-execution controls. */
    1320     uint32_t        fVmxSecondaryExecCtls : 1;
    1321     /** @} */
    1322 
    1323     /** @name VMX Secondary processor-based controls.
    1324      * @{ */
    1325     /** VMX: Supports virtualize-APIC access. */
    1326     uint32_t        fVmxVirtApicAccess : 1;
    1327     /** VMX: Supports EPT (Extended Page Tables). */
    1328     uint32_t        fVmxEpt : 1;
    1329     /** VMX: Supports descriptor-table exiting. */
    1330     uint32_t        fVmxDescTableExit : 1;
    1331     /** VMX: Supports RDTSCP. */
    1332     uint32_t        fVmxRdtscp : 1;
    1333     /** VMX: Supports virtualize-x2APIC mode. */
    1334     uint32_t        fVmxVirtX2ApicMode : 1;
    1335     /** VMX: Supports VPID. */
    1336     uint32_t        fVmxVpid : 1;
    1337     /** VMX: Supports WBIND exiting. */
    1338     uint32_t        fVmxWbinvdExit : 1;
    1339     /** VMX: Supports Unrestricted guest. */
    1340     uint32_t        fVmxUnrestrictedGuest : 1;
    1341     /** VMX: Supports APIC-register virtualization. */
    1342     uint32_t        fVmxApicRegVirt : 1;
    1343     /** VMX: Supports virtual-interrupt delivery. */
    1344     uint32_t        fVmxVirtIntDelivery : 1;
    1345     /** VMX: Supports Pause-loop exiting. */
    1346     uint32_t        fVmxPauseLoopExit : 1;
    1347     /** VMX: Supports RDRAND exiting. */
    1348     uint32_t        fVmxRdrandExit : 1;
    1349     /** VMX: Supports INVPCID. */
    1350     uint32_t        fVmxInvpcid : 1;
    1351     /** VMX: Supports VM functions. */
    1352     uint32_t        fVmxVmFunc : 1;
    1353     /** VMX: Supports VMCS shadowing. */
    1354     uint32_t        fVmxVmcsShadowing : 1;
    1355     /** VMX: Supports RDSEED exiting. */
    1356     uint32_t        fVmxRdseedExit : 1;
    1357     /** VMX: Supports PML. */
    1358     uint32_t        fVmxPml : 1;
    1359     /** VMX: Supports EPT-violations \#VE. */
    1360     uint32_t        fVmxEptXcptVe : 1;
    1361     /** VMX: Supports conceal VMX from PT. */
    1362     uint32_t        fVmxConcealVmxFromPt : 1;
    1363     /** VMX: Supports XSAVES/XRSTORS. */
    1364     uint32_t        fVmxXsavesXrstors : 1;
    1365     /** VMX: Supports mode-based execute control for EPT. */
    1366     uint32_t        fVmxModeBasedExecuteEpt : 1;
    1367     /** VMX: Supports sub-page write permissions for EPT. */
    1368     uint32_t        fVmxSppEpt : 1;
    1369     /** VMX: Supports Intel PT to output guest-physical addresses for EPT. */
    1370     uint32_t        fVmxPtEpt : 1;
    1371     /** VMX: Supports TSC scaling. */
    1372     uint32_t        fVmxUseTscScaling : 1;
    1373     /** VMX: Supports TPAUSE, UMONITOR, or UMWAIT. */
    1374     uint32_t        fVmxUserWaitPause : 1;
    1375     /** VMX: Supports enclave (ENCLV) exiting. */
    1376     uint32_t        fVmxEnclvExit : 1;
    1377     /** @} */
    1378 
    1379     /** @name VMX Tertiary processor-based controls.
    1380      * @{ */
    1381     /** VMX: Supports LOADIWKEY exiting. */
    1382     uint32_t        fVmxLoadIwKeyExit : 1;
    1383     /** @} */
    1384 
    1385     /** @name VMX VM-entry controls.
    1386      * @{ */
    1387     /** VMX: Supports load-debug controls on VM-entry. */
    1388     uint32_t        fVmxEntryLoadDebugCtls : 1;
    1389     /** VMX: Supports IA32e mode guest. */
    1390     uint32_t        fVmxIa32eModeGuest : 1;
    1391     /** VMX: Supports load guest EFER MSR on VM-entry. */
    1392     uint32_t        fVmxEntryLoadEferMsr : 1;
    1393     /** VMX: Supports load guest PAT MSR on VM-entry. */
    1394     uint32_t        fVmxEntryLoadPatMsr : 1;
    1395     /** @} */
    1396 
    1397     /** @name VMX VM-exit controls.
    1398      * @{ */
    1399     /** VMX: Supports save debug controls on VM-exit. */
    1400     uint32_t        fVmxExitSaveDebugCtls : 1;
    1401     /** VMX: Supports host-address space size. */
    1402     uint32_t        fVmxHostAddrSpaceSize : 1;
    1403     /** VMX: Supports acknowledge external interrupt on VM-exit. */
    1404     uint32_t        fVmxExitAckExtInt : 1;
    1405     /** VMX: Supports save guest PAT MSR on VM-exit. */
    1406     uint32_t        fVmxExitSavePatMsr : 1;
    1407     /** VMX: Supports load hsot PAT MSR on VM-exit. */
    1408     uint32_t        fVmxExitLoadPatMsr : 1;
    1409     /** VMX: Supports save guest EFER MSR on VM-exit. */
    1410     uint32_t        fVmxExitSaveEferMsr : 1;
    1411     /** VMX: Supports load host EFER MSR on VM-exit. */
    1412     uint32_t        fVmxExitLoadEferMsr : 1;
    1413     /** VMX: Supports save VMX preemption timer on VM-exit. */
    1414     uint32_t        fVmxSavePreemptTimer : 1;
    1415     /** VMX: Supports secondary VM-exit controls. */
    1416     uint32_t        fVmxSecondaryExitCtls : 1;
    1417     /** @} */
    1418 
    1419     /** @name VMX Miscellaneous data.
    1420      * @{ */
    1421     /** VMX: Supports storing EFER.LMA into IA32e-mode guest field on VM-exit. */
    1422     uint32_t        fVmxExitSaveEferLma : 1;
    1423     /** VMX: Whether Intel PT (Processor Trace) is supported in VMX mode or not. */
    1424     uint32_t        fVmxPt : 1;
    1425     /** VMX: Supports VMWRITE to any valid VMCS field incl. read-only fields, otherwise
    1426      *  VMWRITE cannot modify read-only VM-exit information fields. */
    1427     uint32_t        fVmxVmwriteAll : 1;
    1428     /** VMX: Supports injection of software interrupts, ICEBP on VM-entry for zero
    1429      *  length instructions. */
    1430     uint32_t        fVmxEntryInjectSoftInt : 1;
    1431     /** @} */
    1432 
    1433     /** VMX: Padding / reserved for future features. */
    1434     uint32_t        fVmxPadding0 : 16;
    1435     /** VMX: Padding / reserved for future, making it a total of 128 bits.  */
    1436     uint32_t        fVmxPadding1;
    1437 } CPUMFEATURES;
    1438 #ifndef VBOX_FOR_DTRACE_LIB
    1439 AssertCompileSize(CPUMFEATURES, 48);
    1440 #endif
    1441 /** Pointer to a CPU feature structure. */
    1442 typedef CPUMFEATURES *PCPUMFEATURES;
    1443 /** Pointer to a const CPU feature structure. */
    1444 typedef CPUMFEATURES const *PCCPUMFEATURES;
    1445 
    1446 /**
    1447  * Chameleon wrapper structure for the host CPU features.
    1448  *
    1449  * This is used for the globally readable g_CpumHostFeatures variable, which is
    1450  * initialized once during VMMR0 load for ring-0 and during CPUMR3Init in
    1451  * ring-3.  To reflect this immutability after load/init, we use this wrapper
    1452  * structure to switch it between const and non-const depending on the context.
    1453  * Only two files sees it as non-const (CPUMR0.cpp and CPUM.cpp).
    1454  */
    1455 typedef struct CPUHOSTFEATURES
    1456 {
    1457     CPUMFEATURES
    1458 #ifndef CPUM_WITH_NONCONST_HOST_FEATURES
    1459     const
    1460 #endif
    1461                     s;
    1462 } CPUHOSTFEATURES;
    1463 /** Pointer to a const host CPU feature structure. */
    1464 typedef CPUHOSTFEATURES const *PCCPUHOSTFEATURES;
    1465 
    1466 /** Host CPU features.
    1467  * @note In ring-3, only valid after CPUMR3Init.  In ring-0, valid after
    1468  *       module init. */
    1469 extern CPUHOSTFEATURES g_CpumHostFeatures;
    1470 
    1471 
    1472 /**
    1473  * CPU database entry.
    1474  */
    1475 typedef struct CPUMDBENTRY
    1476 {
    1477     /** The CPU name. */
    1478     const char     *pszName;
    1479     /** The full CPU name. */
    1480     const char     *pszFullName;
    1481     /** The CPU vendor (CPUMCPUVENDOR). */
    1482     uint8_t         enmVendor;
    1483     /** The CPU family. */
    1484     uint8_t         uFamily;
    1485     /** The CPU model. */
    1486     uint8_t         uModel;
    1487     /** The CPU stepping. */
    1488     uint8_t         uStepping;
    1489     /** The microarchitecture. */
    1490     CPUMMICROARCH   enmMicroarch;
    1491     /** Scalable bus frequency used for reporting other frequencies. */
    1492     uint64_t        uScalableBusFreq;
    1493     /** Flags - CPUMDB_F_XXX. */
    1494     uint32_t        fFlags;
    1495     /** The maximum physical address with of the CPU.  This should correspond to
    1496      * the value in CPUID leaf 0x80000008 when present. */
    1497     uint8_t         cMaxPhysAddrWidth;
    1498     /** The MXCSR mask. */
    1499     uint32_t        fMxCsrMask;
    1500     /** Pointer to an array of CPUID leaves.  */
    1501     PCCPUMCPUIDLEAF paCpuIdLeaves;
    1502     /** The number of CPUID leaves in the array paCpuIdLeaves points to. */
    1503     uint32_t        cCpuIdLeaves;
    1504     /** The method used to deal with unknown CPUID leaves. */
    1505     CPUMUNKNOWNCPUID enmUnknownCpuId;
    1506     /** The default unknown CPUID value. */
    1507     CPUMCPUID       DefUnknownCpuId;
    1508 
    1509     /** MSR mask.  Several microarchitectures ignore the higher bits of ECX in
    1510      *  the RDMSR and WRMSR instructions. */
    1511     uint32_t        fMsrMask;
    1512 
    1513     /** The number of ranges in the table pointed to b paMsrRanges. */
    1514     uint32_t        cMsrRanges;
    1515     /** MSR ranges for this CPU. */
    1516     PCCPUMMSRRANGE  paMsrRanges;
    1517 } CPUMDBENTRY;
    1518 /** Pointer to a const CPU database entry. */
    1519 typedef CPUMDBENTRY const *PCCPUMDBENTRY;
    1520 
    1521 /** @name CPUMDB_F_XXX - CPUDBENTRY::fFlags
    1522  * @{ */
    1523 /** Should execute all in IEM.
    1524  * @todo Implement this - currently done in Main...  */
    1525 #define CPUMDB_F_EXECUTE_ALL_IN_IEM         RT_BIT_32(0)
    1526 /** @} */
    1527 
    1528 
    1529 
    1530 #ifndef VBOX_FOR_DTRACE_LIB
    1531 
    1532 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    1533 VMMDECL(int)            CPUMCpuIdCollectLeavesX86(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
    1534 VMMDECL(CPUMCPUVENDOR)  CPUMCpuIdDetectX86VendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
    1535 #endif
    1536 
    1537 VMM_INT_DECL(bool)      CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu);
    1538 
    153962
    154063/** @name Guest Register Getters.
    154164 * @{ */
    1542 VMMDECL(void)           CPUMGetGuestGDTR(PCVMCPU pVCpu, PVBOXGDTR pGDTR);
    1543 VMMDECL(RTGCPTR)        CPUMGetGuestIDTR(PCVMCPU pVCpu, uint16_t *pcbLimit);
    1544 VMMDECL(RTSEL)          CPUMGetGuestTR(PCVMCPU pVCpu, PCPUMSELREGHID pHidden);
    1545 VMMDECL(RTSEL)          CPUMGetGuestLDTR(PCVMCPU pVCpu);
    1546 VMMDECL(RTSEL)          CPUMGetGuestLdtrEx(PCVMCPU pVCpu, uint64_t *pGCPtrBase, uint32_t *pcbLimit);
    1547 VMMDECL(uint64_t)       CPUMGetGuestCR0(PCVMCPU pVCpu);
    1548 VMMDECL(uint64_t)       CPUMGetGuestCR2(PCVMCPU pVCpu);
    1549 VMMDECL(uint64_t)       CPUMGetGuestCR3(PCVMCPU pVCpu);
    1550 VMMDECL(uint64_t)       CPUMGetGuestCR4(PCVMCPU pVCpu);
    1551 VMMDECL(uint64_t)       CPUMGetGuestCR8(PCVMCPUCC pVCpu);
    1552 VMMDECL(int)            CPUMGetGuestCRx(PCVMCPUCC pVCpu, unsigned iReg, uint64_t *pValue);
    1553 VMMDECL(uint32_t)       CPUMGetGuestEFlags(PCVMCPU pVCpu);
    1554 VMMDECL(uint32_t)       CPUMGetGuestEIP(PCVMCPU pVCpu);
    1555 VMMDECL(uint64_t)       CPUMGetGuestRIP(PCVMCPU pVCpu);
    1556 VMMDECL(uint32_t)       CPUMGetGuestEAX(PCVMCPU pVCpu);
    1557 VMMDECL(uint32_t)       CPUMGetGuestEBX(PCVMCPU pVCpu);
    1558 VMMDECL(uint32_t)       CPUMGetGuestECX(PCVMCPU pVCpu);
    1559 VMMDECL(uint32_t)       CPUMGetGuestEDX(PCVMCPU pVCpu);
    1560 VMMDECL(uint32_t)       CPUMGetGuestESI(PCVMCPU pVCpu);
    1561 VMMDECL(uint32_t)       CPUMGetGuestEDI(PCVMCPU pVCpu);
    1562 VMMDECL(uint32_t)       CPUMGetGuestESP(PCVMCPU pVCpu);
    1563 VMMDECL(uint32_t)       CPUMGetGuestEBP(PCVMCPU pVCpu);
    1564 VMMDECL(RTSEL)          CPUMGetGuestCS(PCVMCPU pVCpu);
    1565 VMMDECL(RTSEL)          CPUMGetGuestDS(PCVMCPU pVCpu);
    1566 VMMDECL(RTSEL)          CPUMGetGuestES(PCVMCPU pVCpu);
    1567 VMMDECL(RTSEL)          CPUMGetGuestFS(PCVMCPU pVCpu);
    1568 VMMDECL(RTSEL)          CPUMGetGuestGS(PCVMCPU pVCpu);
    1569 VMMDECL(RTSEL)          CPUMGetGuestSS(PCVMCPU pVCpu);
    157065VMMDECL(uint64_t)       CPUMGetGuestFlatPC(PVMCPU pVCpu);
    157166VMMDECL(uint64_t)       CPUMGetGuestFlatSP(PVMCPU pVCpu);
    1572 VMMDECL(uint64_t)       CPUMGetGuestDR0(PCVMCPU pVCpu);
    1573 VMMDECL(uint64_t)       CPUMGetGuestDR1(PCVMCPU pVCpu);
    1574 VMMDECL(uint64_t)       CPUMGetGuestDR2(PCVMCPU pVCpu);
    1575 VMMDECL(uint64_t)       CPUMGetGuestDR3(PCVMCPU pVCpu);
    1576 VMMDECL(uint64_t)       CPUMGetGuestDR6(PCVMCPU pVCpu);
    1577 VMMDECL(uint64_t)       CPUMGetGuestDR7(PCVMCPU pVCpu);
    1578 VMMDECL(int)            CPUMGetGuestDRx(PCVMCPU pVCpu, uint32_t iReg, uint64_t *pValue);
    1579 VMMDECL(void)           CPUMGetGuestCpuId(PVMCPUCC pVCpu, uint32_t iLeaf, uint32_t iSubLeaf, int f64BitMode,
    1580                                           uint32_t *pEax, uint32_t *pEbx, uint32_t *pEcx, uint32_t *pEdx);
    1581 VMMDECL(uint64_t)       CPUMGetGuestEFER(PCVMCPU pVCpu);
    1582 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32FeatCtrl(PCVMCPUCC pVCpu);
    1583 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32MtrrCap(PCVMCPU pVCpu);
    1584 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32SmmMonitorCtl(PCVMCPUCC pVCpu);
    1585 VMM_INT_DECL(uint64_t)  CPUMGetGuestIa32VmxEptVpidCap(PCVMCPUCC pVCpu);
    1586 VMMDECL(VBOXSTRICTRC)   CPUMQueryGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *puValue);
    1587 VMMDECL(VBOXSTRICTRC)   CPUMSetGuestMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t uValue);
    158867VMMDECL(CPUMCPUVENDOR)  CPUMGetGuestCpuVendor(PVM pVM);
    158968VMMDECL(CPUMMICROARCH)  CPUMGetGuestMicroarch(PCVM pVM);
    159069VMMDECL(void)           CPUMGetGuestAddrWidths(PCVM pVM, uint8_t *pcPhysAddrWidth, uint8_t *pcLinearAddrWidth);
    1591 VMMDECL(CPUMCPUVENDOR)  CPUMGetHostCpuVendor(PVM pVM);
    1592 VMMDECL(CPUMMICROARCH)  CPUMGetHostMicroarch(PCVM pVM);
    159370/** @} */
    1594 
    1595 /** @name Guest Register Setters.
    1596  * @{ */
    1597 VMMDECL(int)           CPUMSetGuestGDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
    1598 VMMDECL(int)           CPUMSetGuestIDTR(PVMCPU pVCpu, uint64_t GCPtrBase, uint16_t cbLimit);
    1599 VMMDECL(int)           CPUMSetGuestTR(PVMCPU pVCpu, uint16_t tr);
    1600 VMMDECL(int)           CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr);
    1601 VMMDECL(int)           CPUMSetGuestCR0(PVMCPUCC pVCpu, uint64_t cr0);
    1602 VMMDECL(int)           CPUMSetGuestCR2(PVMCPU pVCpu, uint64_t cr2);
    1603 VMMDECL(int)           CPUMSetGuestCR3(PVMCPU pVCpu, uint64_t cr3);
    1604 VMMDECL(int)           CPUMSetGuestCR4(PVMCPU pVCpu, uint64_t cr4);
    1605 VMMDECL(int)           CPUMSetGuestDR0(PVMCPUCC pVCpu, uint64_t uDr0);
    1606 VMMDECL(int)           CPUMSetGuestDR1(PVMCPUCC pVCpu, uint64_t uDr1);
    1607 VMMDECL(int)           CPUMSetGuestDR2(PVMCPUCC pVCpu, uint64_t uDr2);
    1608 VMMDECL(int)           CPUMSetGuestDR3(PVMCPUCC pVCpu, uint64_t uDr3);
    1609 VMMDECL(int)           CPUMSetGuestDR6(PVMCPU pVCpu, uint64_t uDr6);
    1610 VMMDECL(int)           CPUMSetGuestDR7(PVMCPUCC pVCpu, uint64_t uDr7);
    1611 VMMDECL(int)           CPUMSetGuestDRx(PVMCPUCC pVCpu, uint32_t iReg, uint64_t Value);
    1612 VMM_INT_DECL(int)      CPUMSetGuestXcr0(PVMCPUCC pVCpu, uint64_t uNewValue);
    1613 VMMDECL(int)           CPUMSetGuestEFlags(PVMCPU pVCpu, uint32_t eflags);
    1614 VMMDECL(int)           CPUMSetGuestEIP(PVMCPU pVCpu, uint32_t eip);
    1615 VMMDECL(int)           CPUMSetGuestEAX(PVMCPU pVCpu, uint32_t eax);
    1616 VMMDECL(int)           CPUMSetGuestEBX(PVMCPU pVCpu, uint32_t ebx);
    1617 VMMDECL(int)           CPUMSetGuestECX(PVMCPU pVCpu, uint32_t ecx);
    1618 VMMDECL(int)           CPUMSetGuestEDX(PVMCPU pVCpu, uint32_t edx);
    1619 VMMDECL(int)           CPUMSetGuestESI(PVMCPU pVCpu, uint32_t esi);
    1620 VMMDECL(int)           CPUMSetGuestEDI(PVMCPU pVCpu, uint32_t edi);
    1621 VMMDECL(int)           CPUMSetGuestESP(PVMCPU pVCpu, uint32_t esp);
    1622 VMMDECL(int)           CPUMSetGuestEBP(PVMCPU pVCpu, uint32_t ebp);
    1623 VMMDECL(int)           CPUMSetGuestCS(PVMCPU pVCpu, uint16_t cs);
    1624 VMMDECL(int)           CPUMSetGuestDS(PVMCPU pVCpu, uint16_t ds);
    1625 VMMDECL(int)           CPUMSetGuestES(PVMCPU pVCpu, uint16_t es);
    1626 VMMDECL(int)           CPUMSetGuestFS(PVMCPU pVCpu, uint16_t fs);
    1627 VMMDECL(int)           CPUMSetGuestGS(PVMCPU pVCpu, uint16_t gs);
    1628 VMMDECL(int)           CPUMSetGuestSS(PVMCPU pVCpu, uint16_t ss);
    1629 VMMDECL(void)          CPUMSetGuestEFER(PVMCPU pVCpu, uint64_t val);
    1630 VMMR3_INT_DECL(void)   CPUMR3SetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
    1631 VMMR3_INT_DECL(void)   CPUMR3ClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
    1632 VMMR3_INT_DECL(bool)   CPUMR3GetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature);
    1633 VMMDECL(bool)          CPUMSetGuestCpuIdPerCpuApicFeature(PVMCPU pVCpu, bool fVisible);
    1634 VMMDECL(void)          CPUMSetGuestCtx(PVMCPU pVCpu, const PCPUMCTX pCtx);
    1635 VMM_INT_DECL(void)     CPUMSetGuestTscAux(PVMCPUCC pVCpu, uint64_t uValue);
    1636 VMM_INT_DECL(uint64_t) CPUMGetGuestTscAux(PVMCPUCC pVCpu);
    1637 VMM_INT_DECL(void)     CPUMSetGuestSpecCtrl(PVMCPUCC pVCpu, uint64_t uValue);
    1638 VMM_INT_DECL(uint64_t) CPUMGetGuestSpecCtrl(PVMCPUCC pVCpu);
    1639 VMM_INT_DECL(uint64_t) CPUMGetGuestCR4ValidMask(PVM pVM);
    1640 VMM_INT_DECL(void)     CPUMSetGuestPaePdpes(PVMCPU pVCpu, PCX86PDPE paPaePdpes);
    1641 VMM_INT_DECL(void)     CPUMGetGuestPaePdpes(PVMCPU pVCpu, PX86PDPE paPaePdpes);
    1642 /** @} */
    1643 
    164471
    164572/** @name Misc Guest Predicate Functions.
    164673 * @{  */
    1647 VMMDECL(bool)       CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
    1648 VMMDECL(bool)       CPUMIsGuestNXEnabled(PCVMCPU pVCpu);
    1649 VMMDECL(bool)       CPUMIsGuestPageSizeExtEnabled(PCVMCPU pVCpu);
    1650 VMMDECL(bool)       CPUMIsGuestPagingEnabled(PCVMCPU pVCpu);
    1651 VMMDECL(bool)       CPUMIsGuestR0WriteProtEnabled(PCVMCPU pVCpu);
    1652 VMMDECL(bool)       CPUMIsGuestInRealMode(PCVMCPU pVCpu);
    1653 VMMDECL(bool)       CPUMIsGuestInRealOrV86Mode(PCVMCPU pVCpu);
    1654 VMMDECL(bool)       CPUMIsGuestInProtectedMode(PCVMCPU pVCpu);
    1655 VMMDECL(bool)       CPUMIsGuestInPagedProtectedMode(PCVMCPU pVCpu);
    1656 VMMDECL(bool)       CPUMIsGuestInLongMode(PCVMCPU pVCpu);
    1657 VMMDECL(bool)       CPUMIsGuestInPAEMode(PCVMCPU pVCpu);
     74VMMDECL(bool)           CPUMIsGuestIn64BitCode(PVMCPU pVCpu);
    165875/** @} */
    165976
    1660 /** @name Nested Hardware-Virtualization Helpers.
    1661  * @{  */
    1662 VMM_INT_DECL(bool)      CPUMIsGuestPhysIntrEnabled(PVMCPU pVCpu);
    1663 VMM_INT_DECL(bool)      CPUMIsGuestVirtIntrEnabled(PVMCPU pVCpu);
    1664 VMM_INT_DECL(uint64_t)  CPUMApplyNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
    1665 VMM_INT_DECL(uint64_t)  CPUMRemoveNestedGuestTscOffset(PCVMCPU pVCpu, uint64_t uTscValue);
    1666 
    1667 /* SVM helpers. */
    1668 VMM_INT_DECL(bool)      CPUMIsGuestSvmPhysIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
    1669 VMM_INT_DECL(bool)      CPUMIsGuestSvmVirtIntrEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx);
    1670 VMM_INT_DECL(uint8_t)   CPUMGetGuestSvmVirtIntrVector(PCCPUMCTX pCtx);
    1671 VMM_INT_DECL(void)      CPUMSvmVmExitRestoreHostState(PVMCPUCC pVCpu, PCPUMCTX pCtx);
    1672 VMM_INT_DECL(void)      CPUMSvmVmRunSaveHostState(PCPUMCTX pCtx, uint8_t cbInstr);
    1673 VMM_INT_DECL(bool)      CPUMIsSvmIoInterceptSet(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
    1674                                                 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
    1675                                                 PSVMIOIOEXITINFO pIoExitInfo);
    1676 VMM_INT_DECL(int)       CPUMGetSvmMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit);
    1677 
    1678 /* VMX helpers. */
    1679 VMM_INT_DECL(bool)      CPUMIsGuestVmxVmcsFieldValid(PVMCC pVM, uint64_t u64VmcsField);
    1680 VMM_INT_DECL(bool)      CPUMIsGuestVmxIoInterceptSet(PCVMCPU pVCpu, uint16_t u16Port, uint8_t cbAccess);
    1681 VMM_INT_DECL(bool)      CPUMIsGuestVmxMovToCr3InterceptSet(PVMCPU pVCpu, uint64_t uNewCr3);
    1682 VMM_INT_DECL(bool)      CPUMIsGuestVmxVmreadVmwriteInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint64_t u64FieldEnc);
    1683 VMM_INT_DECL(int)       CPUMStartGuestVmxPremptTimer(PVMCPUCC pVCpu, uint32_t uTimer, uint8_t cShift, uint64_t *pu64EntryTick);
    1684 VMM_INT_DECL(int)       CPUMStopGuestVmxPremptTimer(PVMCPUCC pVCpu);
    1685 VMM_INT_DECL(uint32_t)  CPUMGetVmxMsrPermission(void const *pvMsrBitmap, uint32_t idMsr);
    1686 VMM_INT_DECL(bool)      CPUMIsGuestVmxEptPagingEnabled(PCVMCPUCC pVCpu);
    1687 VMM_INT_DECL(bool)      CPUMIsGuestVmxEptPaePagingEnabled(PCVMCPUCC pVCpu);
    1688 VMM_INT_DECL(uint64_t)  CPUMGetGuestVmxApicAccessPageAddr(PCVMCPUCC pVCpu);
    1689 /** @} */
    1690 
    1691 /** @name Externalized State Helpers.
    1692  * @{ */
    1693 /** @def CPUM_ASSERT_NOT_EXTRN
    1694  * Macro for asserting that @a a_fNotExtrn are present.
    1695  *
    1696  * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
    1697  * @param   a_fNotExtrn     Mask of CPUMCTX_EXTRN_XXX bits to check.
    1698  *
    1699  * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
    1700  */
    1701 #define CPUM_ASSERT_NOT_EXTRN(a_pVCpu, a_fNotExtrn) \
    1702     AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fNotExtrn)), \
    1703               ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fNotExtrn)))
    1704 
    1705 /** @def CPUMCTX_ASSERT_NOT_EXTRN
    1706  * Macro for asserting that @a a_fNotExtrn are present in @a a_pCtx.
    1707  *
    1708  * @param   a_pCtx          The CPU context of the calling EMT.
    1709  * @param   a_fNotExtrn     Mask of CPUMCTX_EXTRN_XXX bits to check.
    1710  */
    1711 #define CPUMCTX_ASSERT_NOT_EXTRN(a_pCtx, a_fNotExtrn) \
    1712     AssertMsg(!((a_pCtx)->fExtrn & (a_fNotExtrn)), \
    1713               ("%#RX64; a_fNotExtrn=%#RX64\n", (a_pCtx)->fExtrn, (a_fNotExtrn)))
    1714 
    1715 /** @def CPUM_IMPORT_EXTRN_RET
    1716  * Macro for making sure the state specified by @a fExtrnImport is present,
    1717  * calling CPUMImportGuestStateOnDemand() to get it if necessary.
    1718  *
    1719  * Will return if CPUMImportGuestStateOnDemand() fails.
    1720  *
    1721  * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
    1722  * @param   a_fExtrnImport  Mask of CPUMCTX_EXTRN_XXX bits to get.
    1723  * @thread  EMT(a_pVCpu)
    1724  *
    1725  * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
    1726  */
    1727 #define CPUM_IMPORT_EXTRN_RET(a_pVCpu, a_fExtrnImport) \
    1728     do { \
    1729         if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
    1730         { /* already present, consider this likely */ } \
    1731         else \
    1732         { \
    1733             int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    1734             AssertRCReturn(rcCpumImport, rcCpumImport); \
    1735         } \
    1736     } while (0)
    1737 
    1738 /** @def CPUM_IMPORT_EXTRN_RCSTRICT
    1739  * Macro for making sure the state specified by @a fExtrnImport is present,
    1740  * calling CPUMImportGuestStateOnDemand() to get it if necessary.
    1741  *
    1742  * Will update a_rcStrict if CPUMImportGuestStateOnDemand() fails.
    1743  *
    1744  * @param   a_pVCpu         The cross context virtual CPU structure of the calling EMT.
    1745  * @param   a_fExtrnImport  Mask of CPUMCTX_EXTRN_XXX bits to get.
    1746  * @param   a_rcStrict      Strict status code variable to update on failure.
    1747  * @thread  EMT(a_pVCpu)
    1748  *
    1749  * @remarks Requires VMCPU_INCL_CPUM_GST_CTX to be defined.
    1750  */
    1751 #define CPUM_IMPORT_EXTRN_RCSTRICT(a_pVCpu, a_fExtrnImport, a_rcStrict) \
    1752     do { \
    1753         if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
    1754         { /* already present, consider this likely */ } \
    1755         else \
    1756         { \
    1757             int rcCpumImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    1758             AssertStmt(RT_SUCCESS(rcCpumImport) || RT_FAILURE_NP(a_rcStrict), a_rcStrict = rcCpumImport); \
    1759         } \
    1760     } while (0)
    1761 
    1762 VMM_INT_DECL(int) CPUMImportGuestStateOnDemand(PVMCPUCC pVCpu, uint64_t fExtrnImport);
    1763 /** @} */
    1764 
    1765 #if !defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) || defined(DOXYGEN_RUNNING)
    1766 /** @name Inlined Guest Getters and predicates Functions.
    1767  * @{ */
    1768 
    1769 /**
    1770  * Gets valid CR0 bits for the guest.
    1771  *
    1772  * @returns Valid CR0 bits.
    1773  */
    1774 DECLINLINE(uint64_t) CPUMGetGuestCR0ValidMask(void)
    1775 {
    1776     return (  X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
    1777             | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
    1778             | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG);
    1779 }
    1780 
    1781 /**
    1782  * Tests if the guest is running in real mode or not.
    1783  *
    1784  * @returns true if in real mode, otherwise false.
    1785  * @param   pCtx    Current CPU context.
    1786  */
    1787 DECLINLINE(bool) CPUMIsGuestInRealModeEx(PCCPUMCTX pCtx)
    1788 {
    1789     return !(pCtx->cr0 & X86_CR0_PE);
    1790 }
    1791 
    1792 /**
    1793  * Tests if the guest is running in real or virtual 8086 mode.
    1794  *
    1795  * @returns @c true if it is, @c false if not.
    1796  * @param   pCtx    Current CPU context.
    1797  */
    1798 DECLINLINE(bool) CPUMIsGuestInRealOrV86ModeEx(PCCPUMCTX pCtx)
    1799 {
    1800     return !(pCtx->cr0 & X86_CR0_PE)
    1801         || pCtx->eflags.Bits.u1VM;  /* Cannot be set in long mode. Intel spec 2.3.1 "System Flags and Fields in IA-32e Mode". */
    1802 }
    1803 
    1804 /**
    1805  * Tests if the guest is running in virtual 8086 mode.
    1806  *
    1807  * @returns @c true if it is, @c false if not.
    1808  * @param   pCtx    Current CPU context.
    1809  */
    1810 DECLINLINE(bool) CPUMIsGuestInV86ModeEx(PCCPUMCTX pCtx)
    1811 {
    1812     return (pCtx->eflags.Bits.u1VM == 1);
    1813 }
    1814 
    1815 /**
    1816  * Tests if the guest is running in paged protected or not.
    1817  *
    1818  * @returns true if in paged protected mode, otherwise false.
    1819  * @param   pCtx    Current CPU context.
    1820  */
    1821 DECLINLINE(bool) CPUMIsGuestInPagedProtectedModeEx(PCPUMCTX pCtx)
    1822 {
    1823     return (pCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
    1824 }
    1825 
    1826 /**
    1827  * Tests if the guest is running in long mode or not.
    1828  *
    1829  * @returns true if in long mode, otherwise false.
    1830  * @param   pCtx    Current CPU context.
    1831  */
    1832 DECLINLINE(bool) CPUMIsGuestInLongModeEx(PCCPUMCTX pCtx)
    1833 {
    1834     return (pCtx->msrEFER & MSR_K6_EFER_LMA) == MSR_K6_EFER_LMA;
    1835 }
    1836 
    1837 VMM_INT_DECL(bool) CPUMIsGuestIn64BitCodeSlow(PCPUMCTX pCtx);
    1838 
    1839 /**
    1840  * Tests if the guest is running in 64 bits mode or not.
    1841  *
    1842  * @returns true if in 64 bits protected mode, otherwise false.
    1843  * @param   pCtx    Current CPU context.
    1844  */
    1845 DECLINLINE(bool) CPUMIsGuestIn64BitCodeEx(PCPUMCTX pCtx)
    1846 {
    1847     if (!(pCtx->msrEFER & MSR_K6_EFER_LMA))
    1848         return false;
    1849     if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(NULL, &pCtx->cs))
    1850         return CPUMIsGuestIn64BitCodeSlow(pCtx);
    1851     return pCtx->cs.Attr.n.u1Long;
    1852 }
    1853 
    1854 /**
    1855  * Tests if the guest has paging enabled or not.
    1856  *
    1857  * @returns true if paging is enabled, otherwise false.
    1858  * @param   pCtx    Current CPU context.
    1859  */
    1860 DECLINLINE(bool) CPUMIsGuestPagingEnabledEx(PCCPUMCTX pCtx)
    1861 {
    1862     return !!(pCtx->cr0 & X86_CR0_PG);
    1863 }
    1864 
    1865 /**
    1866  * Tests if PAE paging is enabled given the relevant control registers.
    1867  *
    1868  * @returns @c true if in PAE mode, @c false otherwise.
    1869  * @param   uCr0        The CR0 value.
    1870  * @param   uCr4        The CR4 value.
    1871  * @param   uEferMsr    The EFER value.
    1872  */
    1873 DECLINLINE(bool) CPUMIsPaePagingEnabled(uint64_t uCr0, uint64_t uCr4, uint64_t uEferMsr)
    1874 {
    1875     /* Intel mentions EFER.LMA and EFER.LME in different parts of their spec. We shall use EFER.LMA rather
    1876        than EFER.LME as it reflects if the CPU has entered paging with EFER.LME set.  */
    1877     return (   (uCr4 & X86_CR4_PAE)
    1878             && (uCr0 & X86_CR0_PG)
    1879             && !(uEferMsr & MSR_K6_EFER_LMA));
    1880 }
    1881 
    1882 /**
    1883  * Tests if the guest is running in PAE mode or not.
    1884  *
    1885  * @returns @c true if in PAE mode, @c false otherwise.
    1886  * @param   pCtx    Current CPU context.
    1887  */
    1888 DECLINLINE(bool) CPUMIsGuestInPAEModeEx(PCCPUMCTX pCtx)
    1889 {
    1890     return CPUMIsPaePagingEnabled(pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
    1891 }
    1892 
    1893 /**
    1894  * Tests if the guest has AMD SVM enabled or not.
    1895  *
    1896  * @returns true if SMV is enabled, otherwise false.
    1897  * @param   pCtx    Current CPU context.
    1898  */
    1899 DECLINLINE(bool) CPUMIsGuestSvmEnabled(PCCPUMCTX pCtx)
    1900 {
    1901     return RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_SVME);
    1902 }
    1903 
    1904 /**
    1905  * Tests if the guest has Intel VT-x enabled or not.
    1906  *
    1907  * @returns true if VMX is enabled, otherwise false.
    1908  * @param   pCtx    Current CPU context.
    1909  */
    1910 DECLINLINE(bool) CPUMIsGuestVmxEnabled(PCCPUMCTX pCtx)
    1911 {
    1912     return RT_BOOL(pCtx->cr4 & X86_CR4_VMXE);
    1913 }
    1914 
    1915 /**
    1916  * Returns the guest's global-interrupt (GIF) flag.
    1917  *
    1918  * @returns true when global-interrupts are enabled, otherwise false.
    1919  * @param   pCtx    Current CPU context.
    1920  */
    1921 DECLINLINE(bool) CPUMGetGuestGif(PCCPUMCTX pCtx)
    1922 {
    1923     return pCtx->hwvirt.fGif;
    1924 }
    1925 
    1926 /**
    1927  * Sets the guest's global-interrupt flag (GIF).
    1928  *
    1929  * @param   pCtx    Current CPU context.
    1930  * @param   fGif    The value to set.
    1931  */
    1932 DECLINLINE(void) CPUMSetGuestGif(PCPUMCTX pCtx, bool fGif)
    1933 {
    1934     pCtx->hwvirt.fGif = fGif;
    1935 }
    1936 
    1937 /**
    1938  * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS.
    1939  *
    1940  * This also inhibit NMIs, except perhaps for nested guests.
    1941  *
    1942  * @returns true if interrupts are inhibited by interrupt shadow, false if not.
    1943  * @param   pCtx    Current guest CPU context.
    1944  * @note    Requires pCtx->rip to be up to date.
    1945  * @note    Does NOT clear CPUMCTX_INHIBIT_SHADOW when CPUMCTX::uRipInhibitInt
    1946  *          differs from CPUMCTX::rip.
    1947  */
    1948 DECLINLINE(bool) CPUMIsInInterruptShadow(PCCPUMCTX pCtx)
    1949 {
    1950     if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW))
    1951         return false;
    1952 
    1953     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    1954     return pCtx->uRipInhibitInt == pCtx->rip;
    1955 }
    1956 
    1957 /**
    1958  * Checks if we're in an "interrupt shadow", i.e. after a STI, POP SS or MOV SS,
    1959  * updating the state if stale.
    1960  *
    1961  * This also inhibit NMIs, except perhaps for nested guests.
    1962  *
    1963  * @retval  true if interrupts are inhibited by interrupt shadow.
    1964  * @retval  false if not.
    1965  * @param   pCtx    Current guest CPU context.
    1966  * @note    Requires pCtx->rip to be up to date.
    1967  */
    1968 DECLINLINE(bool) CPUMIsInInterruptShadowWithUpdate(PCPUMCTX pCtx)
    1969 {
    1970     if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW))
    1971         return false;
    1972 
    1973     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    1974     if (pCtx->uRipInhibitInt == pCtx->rip)
    1975         return true;
    1976 
    1977     pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
    1978     return false;
    1979 }
    1980 
    1981 /**
    1982  * Checks if we're in an "interrupt shadow" due to a POP SS or MOV SS
    1983  * instruction.
    1984  *
    1985  * This also inhibit NMIs, except perhaps for nested guests.
    1986  *
    1987  * @retval  true if interrupts are inhibited due to POP/MOV SS.
    1988  * @retval  false if not.
    1989  * @param   pCtx    Current guest CPU context.
    1990  * @note    Requires pCtx->rip to be up to date.
    1991  * @note    Does NOT clear CPUMCTX_INHIBIT_SHADOW when CPUMCTX::uRipInhibitInt
    1992  *          differs from CPUMCTX::rip.
    1993  * @note    Both CPUMIsInInterruptShadowAfterSti() and this function may return
    1994  *          true depending on the execution engine being used.
    1995  */
    1996 DECLINLINE(bool) CPUMIsInInterruptShadowAfterSs(PCCPUMCTX pCtx)
    1997 {
    1998     if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS))
    1999         return false;
    2000 
    2001     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    2002     return pCtx->uRipInhibitInt == pCtx->rip;
    2003 }
    2004 
    2005 /**
    2006  * Checks if we're in an "interrupt shadow" due to an STI instruction.
    2007  *
    2008  * This also inhibit NMIs, except perhaps for nested guests.
    2009  *
    2010  * @retval  true if interrupts are inhibited due to STI.
    2011  * @retval  false if not.
    2012  * @param   pCtx    Current guest CPU context.
    2013  * @note    Requires pCtx->rip to be up to date.
    2014  * @note    Does NOT clear CPUMCTX_INHIBIT_SHADOW when CPUMCTX::uRipInhibitInt
    2015  *          differs from CPUMCTX::rip.
    2016  * @note    Both CPUMIsInInterruptShadowAfterSs() and this function may return
    2017  *          true depending on the execution engine being used.
    2018  */
    2019 DECLINLINE(bool) CPUMIsInInterruptShadowAfterSti(PCCPUMCTX pCtx)
    2020 {
    2021     if (!(pCtx->eflags.uBoth & CPUMCTX_INHIBIT_SHADOW_STI))
    2022         return false;
    2023 
    2024     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    2025     return pCtx->uRipInhibitInt == pCtx->rip;
    2026 }
    2027 
    2028 /**
    2029  * Sets the "interrupt shadow" flag, after a STI, POP SS or MOV SS instruction.
    2030  *
    2031  * @param   pCtx    Current guest CPU context.
    2032  * @note    Requires pCtx->rip to be up to date.
    2033  */
    2034 DECLINLINE(void) CPUMSetInInterruptShadow(PCPUMCTX pCtx)
    2035 {
    2036     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    2037     pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_SHADOW;
    2038     pCtx->uRipInhibitInt = pCtx->rip;
    2039 }
    2040 
    2041 /**
    2042  * Sets the "interrupt shadow" flag, after a STI, POP SS or MOV SS instruction,
    2043  * extended version.
    2044  *
    2045  * @param   pCtx    Current guest CPU context.
    2046  * @param   rip     The RIP for which it is inhibited.
    2047  */
    2048 DECLINLINE(void) CPUMSetInInterruptShadowEx(PCPUMCTX pCtx, uint64_t rip)
    2049 {
    2050     pCtx->eflags.uBoth  |= CPUMCTX_INHIBIT_SHADOW;
    2051     pCtx->uRipInhibitInt = rip;
    2052 }
    2053 
    2054 /**
    2055  * Sets the "interrupt shadow" flag after a POP SS or MOV SS instruction.
    2056  *
    2057  * @param   pCtx    Current guest CPU context.
    2058  * @note    Requires pCtx->rip to be up to date.
    2059  */
    2060 DECLINLINE(void) CPUMSetInInterruptShadowSs(PCPUMCTX pCtx)
    2061 {
    2062     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    2063     pCtx->eflags.uBoth  |= CPUMCTX_INHIBIT_SHADOW_SS;
    2064     pCtx->uRipInhibitInt = pCtx->rip;
    2065 }
    2066 
    2067 /**
    2068  * Sets the "interrupt shadow" flag after an STI instruction.
    2069  *
    2070  * @param   pCtx    Current guest CPU context.
    2071  * @note    Requires pCtx->rip to be up to date.
    2072  */
    2073 DECLINLINE(void) CPUMSetInInterruptShadowSti(PCPUMCTX pCtx)
    2074 {
    2075     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    2076     pCtx->eflags.uBoth  |= CPUMCTX_INHIBIT_SHADOW_STI;
    2077     pCtx->uRipInhibitInt = pCtx->rip;
    2078 }
    2079 
    2080 /**
    2081  * Clears the "interrupt shadow" flag.
    2082  *
    2083  * @param   pCtx    Current guest CPU context.
    2084  */
    2085 DECLINLINE(void) CPUMClearInterruptShadow(PCPUMCTX pCtx)
    2086 {
    2087     pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
    2088 }
    2089 
    2090 /**
    2091  * Update the "interrupt shadow" flag.
    2092  *
    2093  * @param   pCtx        Current guest CPU context.
    2094  * @param   fInhibited  The new state.
    2095  * @note    Requires pCtx->rip to be up to date.
    2096  */
    2097 DECLINLINE(void) CPUMUpdateInterruptShadow(PCPUMCTX pCtx, bool fInhibited)
    2098 {
    2099     CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RIP);
    2100     if (!fInhibited)
    2101         pCtx->eflags.uBoth  &= ~CPUMCTX_INHIBIT_SHADOW;
    2102     else
    2103     {
    2104         pCtx->eflags.uBoth  |= CPUMCTX_INHIBIT_SHADOW;
    2105         pCtx->uRipInhibitInt = pCtx->rip;
    2106     }
    2107 }
    2108 
    2109 /**
    2110  * Update the "interrupt shadow" flag, extended version.
    2111  *
    2112  * @returns fInhibited.
    2113  * @param   pCtx        Current guest CPU context.
    2114  * @param   fInhibited  The new state.
    2115  * @param   rip         The RIP for which it is inhibited.
    2116  */
    2117 DECLINLINE(bool) CPUMUpdateInterruptShadowEx(PCPUMCTX pCtx, bool fInhibited, uint64_t rip)
    2118 {
    2119     if (!fInhibited)
    2120         pCtx->eflags.uBoth  &= ~CPUMCTX_INHIBIT_SHADOW;
    2121     else
    2122     {
    2123         pCtx->eflags.uBoth  |= CPUMCTX_INHIBIT_SHADOW;
    2124         pCtx->uRipInhibitInt = rip;
    2125     }
    2126     return fInhibited;
    2127 }
    2128 
    2129 /**
    2130  * Update the two "interrupt shadow" flags separately, extended version.
    2131  *
    2132  * @param   pCtx            Current guest CPU context.
    2133  * @param   fInhibitedBySs  The new state for the MOV SS & POP SS aspect.
    2134  * @param   fInhibitedBySti The new state for the STI aspect.
    2135  * @param   rip             The RIP for which it is inhibited.
    2136  */
    2137 DECLINLINE(void) CPUMUpdateInterruptShadowSsStiEx(PCPUMCTX pCtx, bool fInhibitedBySs, bool fInhibitedBySti, uint64_t rip)
    2138 {
    2139     if (!(fInhibitedBySs | fInhibitedBySti))
    2140         pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_SHADOW;
    2141     else
    2142     {
    2143         pCtx->eflags.uBoth |= (fInhibitedBySs  ? CPUMCTX_INHIBIT_SHADOW_SS  : UINT32_C(0))
    2144                            |  (fInhibitedBySti ? CPUMCTX_INHIBIT_SHADOW_STI : UINT32_C(0));
    2145         pCtx->uRipInhibitInt = rip;
    2146     }
    2147 }
    2148 
    2149 /* VMX forward declarations used by extended function versions: */
    2150 DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx);
    2151 DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls);
    2152 DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx);
    2153 DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking);
    2154 
    2155 /**
    2156  * Checks whether interrupts, include NMIs, are inhibited by pending NMI
    2157  * delivery.
    2158  *
    2159  * This only checks the inhibit mask.
    2160  *
    2161  * @retval  true if interrupts are inhibited by NMI handling.
    2162  * @retval  false if interrupts are not inhibited by NMI handling.
    2163  * @param   pCtx        Current guest CPU context.
    2164  */
    2165 DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmi(PCCPUMCTX pCtx)
    2166 {
    2167     return (pCtx->eflags.uBoth & CPUMCTX_INHIBIT_NMI) != 0;
    2168 }
    2169 
    2170 /**
    2171  * Extended version of CPUMAreInterruptsInhibitedByNmi() that takes VMX non-root
    2172  * mode into account when check whether interrupts are inhibited by NMI.
    2173  *
    2174  * @retval  true if interrupts are inhibited by NMI handling.
    2175  * @retval  false if interrupts are not inhibited by NMI handling.
    2176  * @param   pCtx        Current guest CPU context.
    2177  */
    2178 DECLINLINE(bool) CPUMAreInterruptsInhibitedByNmiEx(PCCPUMCTX pCtx)
    2179 {
    2180     /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
    2181     if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
    2182         || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
    2183         return CPUMAreInterruptsInhibitedByNmi(pCtx);
    2184     return CPUMIsGuestVmxVirtNmiBlocking(pCtx);
    2185 }
    2186 
    2187 /**
    2188  * Marks interrupts, include NMIs, as inhibited by pending NMI delivery.
    2189  *
    2190  * @param   pCtx        Current guest CPU context.
    2191  */
    2192 DECLINLINE(void) CPUMSetInterruptInhibitingByNmi(PCPUMCTX pCtx)
    2193 {
    2194     pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_NMI;
    2195 }
    2196 
    2197 /**
    2198  * Extended version of CPUMSetInterruptInhibitingByNmi() that takes VMX non-root
    2199  * mode into account when marking interrupts as inhibited by NMI.
    2200  *
    2201  * @param   pCtx        Current guest CPU context.
    2202  */
    2203 DECLINLINE(void) CPUMSetInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
    2204 {
    2205     /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
    2206     if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
    2207         || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
    2208         CPUMSetInterruptInhibitingByNmi(pCtx);
    2209     else
    2210         CPUMSetGuestVmxVirtNmiBlocking(pCtx, true);
    2211 }
    2212 
    2213 /**
    2214  * Marks interrupts, include NMIs, as no longer inhibited by pending NMI
    2215  * delivery.
    2216  *
    2217  * @param   pCtx        Current guest CPU context.
    2218  */
    2219 DECLINLINE(void) CPUMClearInterruptInhibitingByNmi(PCPUMCTX pCtx)
    2220 {
    2221     pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_NMI;
    2222 }
    2223 
    2224 /**
    2225  * Extended version of CPUMClearInterruptInhibitingByNmi() that takes VMX
    2226  * non-root mode into account when doing the updating.
    2227  *
    2228  * @param   pCtx        Current guest CPU context.
    2229  */
    2230 DECLINLINE(void) CPUMClearInterruptInhibitingByNmiEx(PCPUMCTX pCtx)
    2231 {
    2232     /* See CPUMUpdateInterruptInhibitingByNmiEx for comments. */
    2233     if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
    2234         || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
    2235         CPUMClearInterruptInhibitingByNmi(pCtx);
    2236     else
    2237         CPUMSetGuestVmxVirtNmiBlocking(pCtx, false);
    2238 }
    2239 
    2240 /**
    2241  * Update whether interrupts, include NMIs, are inhibited by pending NMI
    2242  * delivery.
    2243  *
    2244  * @param   pCtx        Current guest CPU context.
    2245  * @param   fInhibited  The new state.
    2246  */
    2247 DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmi(PCPUMCTX pCtx, bool fInhibited)
    2248 {
    2249     if (!fInhibited)
    2250         pCtx->eflags.uBoth &= ~CPUMCTX_INHIBIT_NMI;
    2251     else
    2252         pCtx->eflags.uBoth |= CPUMCTX_INHIBIT_NMI;
    2253 }
    2254 
    2255 /**
    2256  * Extended version of CPUMUpdateInterruptInhibitingByNmi() that takes VMX
    2257  * non-root mode into account when doing the updating.
    2258  *
    2259  * @param   pCtx        Current guest CPU context.
    2260  * @param   fInhibited  The new state.
    2261  */
    2262 DECLINLINE(void) CPUMUpdateInterruptInhibitingByNmiEx(PCPUMCTX pCtx, bool fInhibited)
    2263 {
    2264     /*
    2265      * Set the state of guest-NMI blocking in any of the following cases:
    2266      *   - We're not executing a nested-guest.
    2267      *   - We're executing an SVM nested-guest[1].
    2268      *   - We're executing a VMX nested-guest without virtual-NMIs enabled.
    2269      *
    2270      * [1] -- SVM does not support virtual-NMIs or virtual-NMI blocking.
    2271      *        SVM hypervisors must track NMI blocking themselves by intercepting
    2272      *        the IRET instruction after injection of an NMI.
    2273      */
    2274     if (   !CPUMIsGuestInVmxNonRootMode(pCtx)
    2275         || !CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI))
    2276         CPUMUpdateInterruptInhibitingByNmi(pCtx, fInhibited);
    2277     /*
    2278      * Set the state of virtual-NMI blocking, if we are executing a
    2279      * VMX nested-guest with virtual-NMIs enabled.
    2280      */
    2281     else
    2282         CPUMSetGuestVmxVirtNmiBlocking(pCtx, fInhibited);
    2283 }
    2284 
    2285 
    2286 /**
    2287  * Checks if we are executing inside an SVM nested hardware-virtualized guest.
    2288  *
    2289  * @returns @c true if in SVM nested-guest mode, @c false otherwise.
    2290  * @param   pCtx    Current CPU context.
    2291  */
    2292 DECLINLINE(bool) CPUMIsGuestInSvmNestedHwVirtMode(PCCPUMCTX pCtx)
    2293 {
    2294     /*
    2295      * With AMD-V, the VMRUN intercept is a pre-requisite to entering SVM guest-mode.
    2296      * See AMD spec. 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
    2297      */
    2298 #ifndef IN_RC
    2299     if (   pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM
    2300         || !(pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN))
    2301         return false;
    2302     return true;
    2303 #else
    2304     NOREF(pCtx);
    2305     return false;
    2306 #endif
    2307 }
    2308 
    2309 /**
    2310  * Checks if the guest is in VMX non-root operation.
    2311  *
    2312  * @returns @c true if in VMX non-root operation, @c false otherwise.
    2313  * @param   pCtx    Current CPU context.
    2314  */
    2315 DECLINLINE(bool) CPUMIsGuestInVmxNonRootMode(PCCPUMCTX pCtx)
    2316 {
    2317 #ifndef IN_RC
    2318     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
    2319         return false;
    2320     Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
    2321     return pCtx->hwvirt.vmx.fInVmxNonRootMode;
    2322 #else
    2323     NOREF(pCtx);
    2324     return false;
    2325 #endif
    2326 }
    2327 
    2328 /**
    2329  * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
    2330  * guest.
    2331  *
    2332  * @returns @c true if in nested-guest mode, @c false otherwise.
    2333  * @param   pCtx    Current CPU context.
    2334  */
    2335 DECLINLINE(bool) CPUMIsGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
    2336 {
    2337 #if 0
    2338     return CPUMIsGuestInVmxNonRootMode(pCtx) || CPUMIsGuestInSvmNestedHwVirtMode(pCtx);
    2339 #else
    2340     if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_NONE)
    2341         return false;
    2342     if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX)
    2343     {
    2344         Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
    2345         return pCtx->hwvirt.vmx.fInVmxNonRootMode;
    2346     }
    2347     Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
    2348     return RT_BOOL(pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN);
    2349 #endif
    2350 }
    2351 
    2352 /**
    2353  * Checks if we are executing inside an SVM or VMX nested hardware-virtualized
    2354  * guest.
    2355  *
    2356  * @retval  CPUMHWVIRT_NONE if not in SVM or VMX non-root mode.
    2357  * @retval  CPUMHWVIRT_VMX if in VMX non-root mode.
    2358  * @retval  CPUMHWVIRT_SVM if in SVM non-root mode.
    2359  * @param   pCtx    Current CPU context.
    2360  */
    2361 DECLINLINE(CPUMHWVIRT) CPUMGetGuestInNestedHwvirtMode(PCCPUMCTX pCtx)
    2362 {
    2363     if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_NONE)
    2364         return CPUMHWVIRT_NONE;
    2365     if (pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_VMX)
    2366     {
    2367         Assert(!pCtx->hwvirt.vmx.fInVmxNonRootMode || pCtx->hwvirt.vmx.fInVmxRootMode);
    2368         return pCtx->hwvirt.vmx.fInVmxNonRootMode ? CPUMHWVIRT_VMX : CPUMHWVIRT_NONE;
    2369     }
    2370     Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
    2371     return pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN ? CPUMHWVIRT_SVM : CPUMHWVIRT_NONE;
    2372 }
    2373 
    2374 /**
    2375  * Checks if the guest is in VMX root operation.
    2376  *
    2377  * @returns @c true if in VMX root operation, @c false otherwise.
    2378  * @param   pCtx    Current CPU context.
    2379  */
    2380 DECLINLINE(bool) CPUMIsGuestInVmxRootMode(PCCPUMCTX pCtx)
    2381 {
    2382 #ifndef IN_RC
    2383     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_VMX)
    2384         return false;
    2385     return pCtx->hwvirt.vmx.fInVmxRootMode;
    2386 #else
    2387     NOREF(pCtx);
    2388     return false;
    2389 #endif
    2390 }
    2391 
    2392 # ifndef IN_RC
    2393 
    2394 /**
    2395  * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
    2396  * active.
    2397  *
    2398  * @returns @c true if in intercept is set, @c false otherwise.
    2399  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2400  * @param   pCtx        Current CPU context.
    2401  * @param   fIntercept  The SVM control/instruction intercept, see
    2402  *                      SVM_CTRL_INTERCEPT_*.
    2403  */
    2404 DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint64_t fIntercept)
    2405 {
    2406     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2407         return false;
    2408     uint64_t u64Intercepts;
    2409     if (!HMGetGuestSvmCtrlIntercepts(pVCpu, &u64Intercepts))
    2410         u64Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl;
    2411     return RT_BOOL(u64Intercepts & fIntercept);
    2412 }
    2413 
    2414 /**
    2415  * Checks if the nested-guest VMCB has the specified CR read intercept active.
    2416  *
    2417  * @returns @c true if in intercept is set, @c false otherwise.
    2418  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2419  * @param   pCtx    Current CPU context.
    2420  * @param   uCr     The CR register number (0 to 15).
    2421  */
    2422 DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
    2423 {
    2424     Assert(uCr < 16);
    2425     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2426         return false;
    2427     uint16_t u16Intercepts;
    2428     if (!HMGetGuestSvmReadCRxIntercepts(pVCpu, &u16Intercepts))
    2429         u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdCRx;
    2430     return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
    2431 }
    2432 
    2433 /**
    2434  * Checks if the nested-guest VMCB has the specified CR write intercept active.
    2435  *
    2436  * @returns @c true if in intercept is set, @c false otherwise.
    2437  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2438  * @param   pCtx    Current CPU context.
    2439  * @param   uCr     The CR register number (0 to 15).
    2440  */
    2441 DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
    2442 {
    2443     Assert(uCr < 16);
    2444     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2445         return false;
    2446     uint16_t u16Intercepts;
    2447     if (!HMGetGuestSvmWriteCRxIntercepts(pVCpu, &u16Intercepts))
    2448         u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrCRx;
    2449     return RT_BOOL(u16Intercepts & (UINT16_C(1) << uCr));
    2450 }
    2451 
    2452 /**
    2453  * Checks if the nested-guest VMCB has the specified DR read intercept active.
    2454  *
    2455  * @returns @c true if in intercept is set, @c false otherwise.
    2456  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2457  * @param   pCtx    Current CPU context.
    2458  * @param   uDr     The DR register number (0 to 15).
    2459  */
    2460 DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
    2461 {
    2462     Assert(uDr < 16);
    2463     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2464         return false;
    2465     uint16_t u16Intercepts;
    2466     if (!HMGetGuestSvmReadDRxIntercepts(pVCpu, &u16Intercepts))
    2467         u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptRdDRx;
    2468     return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
    2469 }
    2470 
    2471 /**
    2472  * Checks if the nested-guest VMCB has the specified DR write intercept active.
    2473  *
    2474  * @returns @c true if in intercept is set, @c false otherwise.
    2475  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2476  * @param   pCtx    Current CPU context.
    2477  * @param   uDr     The DR register number (0 to 15).
    2478  */
    2479 DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
    2480 {
    2481     Assert(uDr < 16);
    2482     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2483         return false;
    2484     uint16_t u16Intercepts;
    2485     if (!HMGetGuestSvmWriteDRxIntercepts(pVCpu, &u16Intercepts))
    2486         u16Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u16InterceptWrDRx;
    2487     return RT_BOOL(u16Intercepts & (UINT16_C(1) << uDr));
    2488 }
    2489 
    2490 /**
    2491  * Checks if the nested-guest VMCB has the specified exception intercept active.
    2492  *
    2493  * @returns @c true if in intercept is active, @c false otherwise.
    2494  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2495  * @param   pCtx        Current CPU context.
    2496  * @param   uVector     The exception / interrupt vector.
    2497  */
    2498 DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
    2499 {
    2500     Assert(uVector <= X86_XCPT_LAST);
    2501     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2502         return false;
    2503     uint32_t u32Intercepts;
    2504     if (!HMGetGuestSvmXcptIntercepts(pVCpu, &u32Intercepts))
    2505         u32Intercepts = pCtx->hwvirt.svm.Vmcb.ctrl.u32InterceptXcpt;
    2506     return RT_BOOL(u32Intercepts & RT_BIT(uVector));
    2507 }
    2508 
    2509 /**
    2510  * Checks if the nested-guest VMCB has virtual-interrupt masking enabled.
    2511  *
    2512  * @returns @c true if virtual-interrupts are masked, @c false otherwise.
    2513  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2514  * @param   pCtx    Current CPU context.
    2515  *
    2516  * @remarks Should only be called when SVM feature is exposed to the guest.
    2517  */
    2518 DECLINLINE(bool) CPUMIsGuestSvmVirtIntrMasking(PCVMCPU pVCpu, PCCPUMCTX pCtx)
    2519 {
    2520     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2521         return false;
    2522     bool fVIntrMasking;
    2523     if (!HMGetGuestSvmVirtIntrMasking(pVCpu, &fVIntrMasking))
    2524         fVIntrMasking = pCtx->hwvirt.svm.Vmcb.ctrl.IntCtrl.n.u1VIntrMasking;
    2525     return fVIntrMasking;
    2526 }
    2527 
    2528 /**
    2529  * Checks if the nested-guest VMCB has nested-paging enabled.
    2530  *
    2531  * @returns @c true if nested-paging is enabled, @c false otherwise.
    2532  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2533  * @param   pCtx    Current CPU context.
    2534  *
    2535  * @remarks Should only be called when SVM feature is exposed to the guest.
    2536  */
    2537 DECLINLINE(bool) CPUMIsGuestSvmNestedPagingEnabled(PCVMCPU pVCpu, PCCPUMCTX pCtx)
    2538 {
    2539     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2540         return false;
    2541     bool fNestedPaging;
    2542     if (!HMGetGuestSvmNestedPaging(pVCpu, &fNestedPaging))
    2543         fNestedPaging = pCtx->hwvirt.svm.Vmcb.ctrl.NestedPagingCtrl.n.u1NestedPaging;
    2544     return fNestedPaging;
    2545 }
    2546 
    2547 /**
    2548  * Gets the nested-guest VMCB pause-filter count.
    2549  *
    2550  * @returns The pause-filter count.
    2551  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2552  * @param   pCtx    Current CPU context.
    2553  *
    2554  * @remarks Should only be called when SVM feature is exposed to the guest.
    2555  */
    2556 DECLINLINE(uint16_t) CPUMGetGuestSvmPauseFilterCount(PCVMCPU pVCpu, PCCPUMCTX pCtx)
    2557 {
    2558     if (pCtx->hwvirt.enmHwvirt != CPUMHWVIRT_SVM)
    2559         return false;
    2560     uint16_t u16PauseFilterCount;
    2561     if (!HMGetGuestSvmPauseFilterCount(pVCpu, &u16PauseFilterCount))
    2562         u16PauseFilterCount = pCtx->hwvirt.svm.Vmcb.ctrl.u16PauseFilterCount;
    2563     return u16PauseFilterCount;
    2564 }
    2565 
    2566 /**
    2567  * Updates the NextRIP (NRIP) field in the nested-guest VMCB.
    2568  *
    2569  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2570  * @param   pCtx        Current CPU context.
    2571  * @param   cbInstr     The length of the current instruction in bytes.
    2572  *
    2573  * @remarks Should only be called when SVM feature is exposed to the guest.
    2574  */
    2575 DECLINLINE(void) CPUMGuestSvmUpdateNRip(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr)
    2576 {
    2577     RT_NOREF(pVCpu);
    2578     Assert(pCtx->hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
    2579     pCtx->hwvirt.svm.Vmcb.ctrl.u64NextRIP = pCtx->rip + cbInstr;
    2580 }
    2581 
    2582 /**
    2583  * Checks whether one of the given Pin-based VM-execution controls are set when
    2584  * executing a nested-guest.
    2585  *
    2586  * @returns @c true if set, @c false otherwise.
    2587  * @param   pCtx        Current CPU context.
    2588  * @param   uPinCtls    The Pin-based VM-execution controls to check.
    2589  *
    2590  * @remarks This does not check if all given controls are set if more than one
    2591  *          control is passed in @a uPinCtl.
    2592  */
    2593 DECLINLINE(bool) CPUMIsGuestVmxPinCtlsSet(PCCPUMCTX pCtx, uint32_t uPinCtls)
    2594 {
    2595     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2596     return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & uPinCtls);
    2597 }
    2598 
    2599 /**
    2600  * Checks whether one of the given Processor-based VM-execution controls are set
    2601  * when executing a nested-guest.
    2602  *
    2603  * @returns @c true if set, @c false otherwise.
    2604  * @param   pCtx        Current CPU context.
    2605  * @param   uProcCtls   The Processor-based VM-execution controls to check.
    2606  *
    2607  * @remarks This does not check if all given controls are set if more than one
    2608  *          control is passed in @a uProcCtls.
    2609  */
    2610 DECLINLINE(bool) CPUMIsGuestVmxProcCtlsSet(PCCPUMCTX pCtx, uint32_t uProcCtls)
    2611 {
    2612     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2613     return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls & uProcCtls);
    2614 }
    2615 
    2616 /**
    2617  * Checks whether one of the given Secondary Processor-based VM-execution controls
    2618  * are set when executing a nested-guest.
    2619  *
    2620  * @returns @c true if set, @c false otherwise.
    2621  * @param   pCtx        Current CPU context.
    2622  * @param   uProcCtls2  The Secondary Processor-based VM-execution controls to
    2623  *                      check.
    2624  *
    2625  * @remarks This does not check if all given controls are set if more than one
    2626  *          control is passed in @a uProcCtls2.
    2627  */
    2628 DECLINLINE(bool) CPUMIsGuestVmxProcCtls2Set(PCCPUMCTX pCtx, uint32_t uProcCtls2)
    2629 {
    2630     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2631     return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ProcCtls2 & uProcCtls2);
    2632 }
    2633 
    2634 /**
    2635  * Checks whether one of the given Tertiary Processor-based VM-execution controls
    2636  * are set when executing a nested-guest.
    2637  *
    2638  * @returns @c true if set, @c false otherwise.
    2639  * @param   pCtx        Current CPU context.
    2640  * @param   uProcCtls3  The Tertiary Processor-based VM-execution controls to
    2641  *                      check.
    2642  *
    2643  * @remarks This does not check if all given controls are set if more than one
    2644  *          control is passed in @a uProcCtls3.
    2645  */
    2646 DECLINLINE(bool) CPUMIsGuestVmxProcCtls3Set(PCCPUMCTX pCtx, uint64_t uProcCtls3)
    2647 {
    2648     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2649     return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u64ProcCtls3.u & uProcCtls3);
    2650 }
    2651 
    2652 /**
    2653  * Checks whether one of the given VM-exit controls are set when executing a
    2654  * nested-guest.
    2655  *
    2656  * @returns @c true if set, @c false otherwise.
    2657  * @param   pCtx        Current CPU context.
    2658  * @param   uExitCtls   The VM-exit controls to check.
    2659  *
    2660  * @remarks This does not check if all given controls are set if more than one
    2661  *          control is passed in @a uExitCtls.
    2662  */
    2663 DECLINLINE(bool) CPUMIsGuestVmxExitCtlsSet(PCCPUMCTX pCtx, uint32_t uExitCtls)
    2664 {
    2665     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2666     return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32ExitCtls & uExitCtls);
    2667 }
    2668 
    2669 /**
    2670  * Checks whether one of the given VM-entry controls are set when executing a
    2671  * nested-guest.
    2672  *
    2673  * @returns @c true if set, @c false otherwise.
    2674  * @param   pCtx        Current CPU context.
    2675  * @param   uEntryCtls  The VM-entry controls to check.
    2676  *
    2677  * @remarks This does not check if all given controls are set if more than one
    2678  *          control is passed in @a uEntryCtls.
    2679  */
    2680 DECLINLINE(bool) CPUMIsGuestVmxEntryCtlsSet(PCCPUMCTX pCtx, uint32_t uEntryCtls)
    2681 {
    2682     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2683     return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32EntryCtls & uEntryCtls);
    2684 }
    2685 
    2686 /**
    2687  * Checks whether events injected in the nested-guest are subject to VM-exit checks.
    2688  *
    2689  * @returns @c true if set, @c false otherwise.
    2690  * @param   pCtx    Current CPU context.
    2691  */
    2692 DECLINLINE(bool) CPUMIsGuestVmxInterceptEvents(PCCPUMCTX pCtx)
    2693 {
    2694     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2695     return pCtx->hwvirt.vmx.fInterceptEvents;
    2696 }
    2697 
    2698 /**
    2699  * Sets whether events injected in the nested-guest are subject to VM-exit checks.
    2700  *
    2701  * @param   pCtx        Current CPU context.
    2702  * @param   fIntercept  Whether to subject injected events to VM-exits or not.
    2703  */
    2704 DECLINLINE(void) CPUMSetGuestVmxInterceptEvents(PCPUMCTX pCtx, bool fInterceptEvents)
    2705 {
    2706     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2707     pCtx->hwvirt.vmx.fInterceptEvents = fInterceptEvents;
    2708 }
    2709 
    2710 /**
    2711  * Checks whether the given exception causes a VM-exit.
    2712  *
    2713  * The exception type include hardware exceptions, software exceptions (#BP, #OF)
    2714  * and privileged software exceptions (#DB generated by INT1/ICEBP).
    2715  *
    2716  * Software interrupts do -not- cause VM-exits and hence must not be used with this
    2717  * function.
    2718  *
    2719  * @returns @c true if the exception causes a VM-exit, @c false otherwise.
    2720  * @param   pCtx        Current CPU context.
    2721  * @param   uVector     The exception vector.
    2722  * @param   uErrCode    The error code associated with the exception. Pass 0 if not
    2723  *                      applicable.
    2724  */
    2725 DECLINLINE(bool) CPUMIsGuestVmxXcptInterceptSet(PCCPUMCTX pCtx, uint8_t uVector, uint32_t uErrCode)
    2726 {
    2727     Assert(uVector <= X86_XCPT_LAST);
    2728 
    2729     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2730 
    2731     /* NMIs have a dedicated VM-execution control for causing VM-exits. */
    2732     if (uVector == X86_XCPT_NMI)
    2733         return RT_BOOL(pCtx->hwvirt.vmx.Vmcs.u32PinCtls & VMX_PIN_CTLS_NMI_EXIT);
    2734 
    2735     /* Page-faults are subject to masking using its error code. */
    2736     uint32_t fXcptBitmap = pCtx->hwvirt.vmx.Vmcs.u32XcptBitmap;
    2737     if (uVector == X86_XCPT_PF)
    2738     {
    2739         uint32_t const fXcptPFMask  = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMask;
    2740         uint32_t const fXcptPFMatch = pCtx->hwvirt.vmx.Vmcs.u32XcptPFMatch;
    2741         if ((uErrCode & fXcptPFMask) != fXcptPFMatch)
    2742             fXcptBitmap ^= RT_BIT(X86_XCPT_PF);
    2743     }
    2744 
    2745     /* Consult the exception bitmap for all other exceptions. */
    2746     if (fXcptBitmap & RT_BIT(uVector))
    2747         return true;
    2748     return false;
    2749 }
    2750 
    2751 
    2752 /**
    2753  * Checks whether the guest is in VMX non-root mode and using EPT paging.
    2754  *
    2755  * @returns @c true if in VMX non-root operation with EPT, @c false otherwise.
    2756  * @param   pCtx   Current CPU context.
    2757  */
    2758 DECLINLINE(bool) CPUMIsGuestVmxEptPagingEnabledEx(PCCPUMCTX pCtx)
    2759 {
    2760     return    CPUMIsGuestInVmxNonRootMode(pCtx)
    2761            && CPUMIsGuestVmxProcCtls2Set(pCtx, VMX_PROC_CTLS2_EPT);
    2762 }
    2763 
    2764 
    2765 /**
    2766  * Implements VMSucceed for VMX instruction success.
    2767  *
    2768  * @param   pCtx    Current CPU context.
    2769  */
    2770 DECLINLINE(void) CPUMSetGuestVmxVmSucceed(PCPUMCTX pCtx)
    2771 {
    2772     pCtx->eflags.uBoth &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
    2773 }
    2774 
    2775 /**
    2776  * Implements VMFailInvalid for VMX instruction failure.
    2777  *
    2778  * @param   pCtx    Current CPU context.
    2779  */
    2780 DECLINLINE(void) CPUMSetGuestVmxVmFailInvalid(PCPUMCTX pCtx)
    2781 {
    2782     pCtx->eflags.uBoth &= ~(X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
    2783     pCtx->eflags.uBoth |= X86_EFL_CF;
    2784 }
    2785 
    2786 /**
    2787  * Implements VMFailValid for VMX instruction failure.
    2788  *
    2789  * @param   pCtx        Current CPU context.
    2790  * @param   enmInsErr   The VM instruction error.
    2791  */
    2792 DECLINLINE(void) CPUMSetGuestVmxVmFailValid(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
    2793 {
    2794     pCtx->eflags.uBoth &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
    2795     pCtx->eflags.uBoth |= X86_EFL_ZF;
    2796     pCtx->hwvirt.vmx.Vmcs.u32RoVmInstrError = enmInsErr;
    2797 }
    2798 
    2799 /**
    2800  * Implements VMFail for VMX instruction failure.
    2801  *
    2802  * @param   pCtx        Current CPU context.
    2803  * @param   enmInsErr   The VM instruction error.
    2804  */
    2805 DECLINLINE(void) CPUMSetGuestVmxVmFail(PCPUMCTX pCtx, VMXINSTRERR enmInsErr)
    2806 {
    2807     if (pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
    2808         CPUMSetGuestVmxVmFailValid(pCtx, enmInsErr);
    2809     else
    2810         CPUMSetGuestVmxVmFailInvalid(pCtx);
    2811 }
    2812 
    2813 /**
    2814  * Returns the guest-physical address of the APIC-access page when executing a
    2815  * nested-guest.
    2816  *
    2817  * @returns The APIC-access page guest-physical address.
    2818  * @param   pCtx    Current CPU context.
    2819  */
    2820 DECLINLINE(uint64_t) CPUMGetGuestVmxApicAccessPageAddrEx(PCCPUMCTX pCtx)
    2821 {
    2822     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2823     return pCtx->hwvirt.vmx.Vmcs.u64AddrApicAccess.u;
    2824 }
    2825 
    2826 /**
    2827  * Gets the nested-guest CR0 subject to the guest/host mask and the read-shadow.
    2828  *
    2829  * @returns The nested-guest CR0.
    2830  * @param   pCtx            Current CPU context.
    2831  * @param   fGstHostMask    The CR0 guest/host mask to use.
    2832  */
    2833 DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr0(PCCPUMCTX pCtx, uint64_t fGstHostMask)
    2834 {
    2835     /*
    2836      * For each CR0 bit owned by the host, the corresponding bit from the
    2837      * CR0 read shadow is loaded. For each CR0 bit that is not owned by the host,
    2838      * the corresponding bit from the guest CR0 is loaded.
    2839      *
    2840      * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
    2841      */
    2842     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2843     uint64_t const uGstCr0      = pCtx->cr0;
    2844     uint64_t const fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
    2845     return (fReadShadow & fGstHostMask) | (uGstCr0 & ~fGstHostMask);
    2846 }
    2847 
    2848 /**
    2849  * Gets the nested-guest CR4 subject to the guest/host mask and the read-shadow.
    2850  *
    2851  * @returns The nested-guest CR4.
    2852  * @param   pCtx            Current CPU context.
    2853  * @param   fGstHostMask    The CR4 guest/host mask to use.
    2854  */
    2855 DECLINLINE(uint64_t) CPUMGetGuestVmxMaskedCr4(PCCPUMCTX pCtx, uint64_t fGstHostMask)
    2856 {
    2857     /*
    2858      * For each CR4 bit owned by the host, the corresponding bit from the
    2859      * CR4 read shadow is loaded. For each CR4 bit that is not owned by the host,
    2860      * the corresponding bit from the guest CR4 is loaded.
    2861      *
    2862      * See Intel Spec. 25.3 "Changes To Instruction Behavior In VMX Non-root Operation".
    2863      */
    2864     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2865     uint64_t const uGstCr4      = pCtx->cr4;
    2866     uint64_t const fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
    2867     return (fReadShadow & fGstHostMask) | (uGstCr4 & ~fGstHostMask);
    2868 }
    2869 
    2870 /**
    2871  * Checks whether the LMSW access causes a VM-exit or not.
    2872  *
    2873  * @returns @c true if the LMSW access causes a VM-exit, @c false otherwise.
    2874  * @param   pCtx        Current CPU context.
    2875  * @param   uNewMsw     The LMSW source operand (the Machine Status Word).
    2876  */
    2877 DECLINLINE(bool) CPUMIsGuestVmxLmswInterceptSet(PCCPUMCTX pCtx, uint16_t uNewMsw)
    2878 {
    2879     /*
    2880      * LMSW VM-exits are subject to the CR0 guest/host mask and the CR0 read shadow.
    2881      *
    2882      * See Intel spec. 24.6.6 "Guest/Host Masks and Read Shadows for CR0 and CR4".
    2883      * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
    2884      */
    2885     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2886 
    2887     uint32_t const fGstHostMask = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
    2888     uint32_t const fReadShadow  = (uint32_t)pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
    2889 
    2890     /*
    2891      * LMSW can never clear CR0.PE but it may set it. Hence, we handle the
    2892      * CR0.PE case first, before the rest of the bits in the MSW.
    2893      *
    2894      * If CR0.PE is owned by the host and CR0.PE differs between the
    2895      * MSW (source operand) and the read-shadow, we must cause a VM-exit.
    2896      */
    2897     if (    (fGstHostMask & X86_CR0_PE)
    2898         &&  (uNewMsw      & X86_CR0_PE)
    2899         && !(fReadShadow  & X86_CR0_PE))
    2900         return true;
    2901 
    2902     /*
    2903      * If CR0.MP, CR0.EM or CR0.TS is owned by the host, and the corresponding
    2904      * bits differ between the MSW (source operand) and the read-shadow, we must
    2905      * cause a VM-exit.
    2906      */
    2907     uint32_t const fGstHostLmswMask = fGstHostMask & (X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
    2908     if ((fReadShadow & fGstHostLmswMask) != (uNewMsw & fGstHostLmswMask))
    2909         return true;
    2910 
    2911     return false;
    2912 }
    2913 
    2914 /**
    2915  * Checks whether the Mov-to-CR0/CR4 access causes a VM-exit or not.
    2916  *
    2917  * @returns @c true if the Mov CRX access causes a VM-exit, @c false otherwise.
    2918  * @param   pCtx        Current CPU context.
    2919  * @param   iCrReg      The control register number (must be 0 or 4).
    2920  * @param   uNewCrX     The CR0/CR4 value being written.
    2921  */
    2922 DECLINLINE(bool) CPUMIsGuestVmxMovToCr0Cr4InterceptSet(PCCPUMCTX pCtx, uint8_t iCrReg, uint64_t uNewCrX)
    2923 {
    2924     /*
    2925      * For any CR0/CR4 bit owned by the host (in the CR0/CR4 guest/host mask), if the
    2926      * corresponding bits differ between the source operand and the read-shadow,
    2927      * we must cause a VM-exit.
    2928      *
    2929      * See Intel spec. 25.1.3 "Instructions That Cause VM Exits Conditionally".
    2930      */
    2931     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2932     Assert(iCrReg == 0 || iCrReg == 4);
    2933 
    2934     uint64_t fGstHostMask;
    2935     uint64_t fReadShadow;
    2936     if (iCrReg == 0)
    2937     {
    2938         fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr0Mask.u;
    2939         fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr0ReadShadow.u;
    2940     }
    2941     else
    2942     {
    2943         fGstHostMask = pCtx->hwvirt.vmx.Vmcs.u64Cr4Mask.u;
    2944         fReadShadow  = pCtx->hwvirt.vmx.Vmcs.u64Cr4ReadShadow.u;
    2945     }
    2946 
    2947     if ((fReadShadow & fGstHostMask) != (uNewCrX & fGstHostMask))
    2948     {
    2949         Assert(fGstHostMask != 0);
    2950         return true;
    2951     }
    2952 
    2953     return false;
    2954 }
    2955 
    2956 /**
    2957  * Returns whether the guest has an active, current VMCS.
    2958  *
    2959  * @returns @c true if the guest has an active, current VMCS, @c false otherwise.
    2960  * @param   pCtx    Current CPU context.
    2961  */
    2962 DECLINLINE(bool) CPUMIsGuestVmxCurrentVmcsValid(PCCPUMCTX pCtx)
    2963 {
    2964     return pCtx->hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS;
    2965 }
    2966 
    2967 # endif /* !IN_RC */
    2968 
    2969 /**
    2970  * Checks whether the VMX nested-guest is in a state to receive physical (APIC)
    2971  * interrupts.
    2972  *
    2973  * @returns @c true if it's ready, @c false otherwise.
    2974  * @param   pCtx    The guest-CPU context.
    2975  */
    2976 DECLINLINE(bool) CPUMIsGuestVmxPhysIntrEnabled(PCCPUMCTX pCtx)
    2977 {
    2978 #ifdef IN_RC
    2979     AssertReleaseFailedReturn(false);
    2980 #else
    2981     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    2982     if (CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
    2983         return true;
    2984     return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
    2985 #endif
    2986 }
    2987 
    2988 /**
    2989  * Checks whether the VMX nested-guest is blocking virtual-NMIs.
    2990  *
    2991  * @returns @c true if it's blocked, @c false otherwise.
    2992  * @param   pCtx    The guest-CPU context.
    2993  */
    2994 DECLINLINE(bool) CPUMIsGuestVmxVirtNmiBlocking(PCCPUMCTX pCtx)
    2995 {
    2996 #ifdef IN_RC
    2997     RT_NOREF(pCtx);
    2998     AssertReleaseFailedReturn(false);
    2999 #else
    3000     /*
    3001      * Return the state of virtual-NMI blocking, if we are executing a
    3002      * VMX nested-guest with virtual-NMIs enabled.
    3003      */
    3004     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    3005     Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
    3006     return pCtx->hwvirt.vmx.fVirtNmiBlocking;
    3007 #endif
    3008 }
    3009 
    3010 /**
    3011  * Sets or clears VMX nested-guest virtual-NMI blocking.
    3012  *
    3013  * @param   pCtx        The guest-CPU context.
    3014  * @param   fBlocking   Whether virtual-NMI blocking is in effect or not.
    3015  */
    3016 DECLINLINE(void) CPUMSetGuestVmxVirtNmiBlocking(PCPUMCTX pCtx, bool fBlocking)
    3017 {
    3018 #ifdef IN_RC
    3019     RT_NOREF2(pCtx, fBlocking);
    3020     AssertReleaseFailedReturnVoid();
    3021 #else
    3022     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    3023     Assert(CPUMIsGuestVmxPinCtlsSet(pCtx, VMX_PIN_CTLS_VIRT_NMI));
    3024     pCtx->hwvirt.vmx.fVirtNmiBlocking = fBlocking;
    3025 #endif
    3026 }
    3027 
    3028 /**
    3029  * Checks whether the VMX nested-guest is in a state to receive virtual interrupts
    3030  * (those injected with the "virtual-interrupt delivery" feature).
    3031  *
    3032  * @returns @c true if it's ready, @c false otherwise.
    3033  * @param   pCtx    The guest-CPU context.
    3034  */
    3035 DECLINLINE(bool) CPUMIsGuestVmxVirtIntrEnabled(PCCPUMCTX pCtx)
    3036 {
    3037 #ifdef IN_RC
    3038     RT_NOREF2(pCtx);
    3039     AssertReleaseFailedReturn(false);
    3040 #else
    3041     Assert(CPUMIsGuestInVmxNonRootMode(pCtx));
    3042     return RT_BOOL(pCtx->eflags.u & X86_EFL_IF);
    3043 #endif
    3044 }
    3045 
    3046 /** @} */
    3047 #endif /* !IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS || DOXYGEN_RUNNING */
    3048 
    3049 
    3050 
    3051 /** @name Hypervisor Register Getters.
    3052  * @{ */
    3053 VMMDECL(RTGCUINTREG)    CPUMGetHyperDR0(PVMCPU pVCpu);
    3054 VMMDECL(RTGCUINTREG)    CPUMGetHyperDR1(PVMCPU pVCpu);
    3055 VMMDECL(RTGCUINTREG)    CPUMGetHyperDR2(PVMCPU pVCpu);
    3056 VMMDECL(RTGCUINTREG)    CPUMGetHyperDR3(PVMCPU pVCpu);
    3057 VMMDECL(RTGCUINTREG)    CPUMGetHyperDR6(PVMCPU pVCpu);
    3058 VMMDECL(RTGCUINTREG)    CPUMGetHyperDR7(PVMCPU pVCpu);
    3059 VMMDECL(uint32_t)       CPUMGetHyperCR3(PVMCPU pVCpu);
    3060 /** @} */
    3061 
    3062 /** @name Hypervisor Register Setters.
    3063  * @{ */
    3064 VMMDECL(void)           CPUMSetHyperCR3(PVMCPU pVCpu, uint32_t cr3);
    3065 VMMDECL(void)           CPUMSetHyperDR0(PVMCPU pVCpu, RTGCUINTREG uDr0);
    3066 VMMDECL(void)           CPUMSetHyperDR1(PVMCPU pVCpu, RTGCUINTREG uDr1);
    3067 VMMDECL(void)           CPUMSetHyperDR2(PVMCPU pVCpu, RTGCUINTREG uDr2);
    3068 VMMDECL(void)           CPUMSetHyperDR3(PVMCPU pVCpu, RTGCUINTREG uDr3);
    3069 VMMDECL(void)           CPUMSetHyperDR6(PVMCPU pVCpu, RTGCUINTREG uDr6);
    3070 VMMDECL(void)           CPUMSetHyperDR7(PVMCPU pVCpu, RTGCUINTREG uDr7);
    3071 VMMDECL(int)            CPUMRecalcHyperDRx(PVMCPUCC pVCpu, uint8_t iGstReg);
    3072 /** @} */
    3073 
    3074 VMMDECL(PCPUMCTX)       CPUMQueryGuestCtxPtr(PVMCPU pVCpu);
    3075 #ifdef VBOX_INCLUDED_vmm_cpumctx_h
    3076 VMM_INT_DECL(PCPUMCTXMSRS) CPUMQueryGuestCtxMsrsPtr(PVMCPU pVCpu);
    3077 #endif
    3078 
    3079 /** @name Changed flags.
    3080  * These flags are used to keep track of which important register that
    3081  * have been changed since last they were reset. The only one allowed
    3082  * to clear them is REM!
    3083  *
    3084  * @todo This is obsolete, but remains as it will be refactored for coordinating
    3085  *       IEM and NEM/HM later. Probably.
    3086  * @{
    3087  */
    3088 #define CPUM_CHANGED_FPU_REM                    RT_BIT(0)
    3089 #define CPUM_CHANGED_CR0                        RT_BIT(1)
    3090 #define CPUM_CHANGED_CR4                        RT_BIT(2)
    3091 #define CPUM_CHANGED_GLOBAL_TLB_FLUSH           RT_BIT(3)
    3092 #define CPUM_CHANGED_CR3                        RT_BIT(4)
    3093 #define CPUM_CHANGED_GDTR                       RT_BIT(5)
    3094 #define CPUM_CHANGED_IDTR                       RT_BIT(6)
    3095 #define CPUM_CHANGED_LDTR                       RT_BIT(7)
    3096 #define CPUM_CHANGED_TR                         RT_BIT(8)  /**@< Currently unused. */
    3097 #define CPUM_CHANGED_SYSENTER_MSR               RT_BIT(9)
    3098 #define CPUM_CHANGED_HIDDEN_SEL_REGS            RT_BIT(10) /**@< Currently unused. */
    3099 #define CPUM_CHANGED_CPUID                      RT_BIT(11)
    3100 #define CPUM_CHANGED_ALL                        (  CPUM_CHANGED_FPU_REM \
    3101                                                  | CPUM_CHANGED_CR0 \
    3102                                                  | CPUM_CHANGED_CR4 \
    3103                                                  | CPUM_CHANGED_GLOBAL_TLB_FLUSH \
    3104                                                  | CPUM_CHANGED_CR3 \
    3105                                                  | CPUM_CHANGED_GDTR \
    3106                                                  | CPUM_CHANGED_IDTR \
    3107                                                  | CPUM_CHANGED_LDTR \
    3108                                                  | CPUM_CHANGED_TR \
    3109                                                  | CPUM_CHANGED_SYSENTER_MSR \
    3110                                                  | CPUM_CHANGED_HIDDEN_SEL_REGS \
    3111                                                  | CPUM_CHANGED_CPUID )
    3112 /** @} */
    3113 
    3114 VMMDECL(void)           CPUMSetChangedFlags(PVMCPU pVCpu, uint32_t fChangedAdd);
    3115 VMMDECL(bool)           CPUMSupportsXSave(PVM pVM);
    3116 VMMDECL(bool)           CPUMIsHostUsingSysEnter(PVM pVM);
    3117 VMMDECL(bool)           CPUMIsHostUsingSysCall(PVM pVM);
    3118 VMMDECL(bool)           CPUMIsGuestFPUStateActive(PVMCPU pVCpu);
    3119 VMMDECL(bool)           CPUMIsGuestFPUStateLoaded(PVMCPU pVCpu);
    3120 VMMDECL(bool)           CPUMIsHostFPUStateSaved(PVMCPU pVCpu);
    3121 VMMDECL(bool)           CPUMIsGuestDebugStateActive(PVMCPU pVCpu);
    3122 VMMDECL(void)           CPUMDeactivateGuestDebugState(PVMCPU pVCpu);
    3123 VMMDECL(bool)           CPUMIsHyperDebugStateActive(PVMCPU pVCpu);
    3124 VMMDECL(uint32_t)       CPUMGetGuestCPL(PVMCPU pVCpu);
    3125 VMMDECL(CPUMMODE)       CPUMGetGuestMode(PVMCPU pVCpu);
    3126 VMMDECL(uint32_t)       CPUMGetGuestCodeBits(PVMCPU pVCpu);
    3127 VMMDECL(DISCPUMODE)     CPUMGetGuestDisMode(PVMCPU pVCpu);
    3128 VMMDECL(uint32_t)       CPUMGetGuestMxCsrMask(PVM pVM);
    3129 VMMDECL(uint64_t)       CPUMGetGuestScalableBusFrequency(PVM pVM);
    3130 VMMDECL(uint64_t)       CPUMGetGuestEferMsrValidMask(PVM pVM);
    3131 VMMDECL(int)            CPUMIsGuestEferMsrWriteValid(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer,
    3132                                                      uint64_t *puValidEfer);
    3133 VMMDECL(void)           CPUMSetGuestEferMsrNoChecks(PVMCPUCC pVCpu, uint64_t uOldEfer, uint64_t uValidEfer);
    3134 VMMDECL(bool)           CPUMIsPatMsrValid(uint64_t uValue);
    3135 
    3136 
    3137 /** Guest CPU interruptibility level, see CPUMGetGuestInterruptibility(). */
    3138 typedef enum CPUMINTERRUPTIBILITY
    3139 {
    3140     CPUMINTERRUPTIBILITY_INVALID = 0,
    3141     CPUMINTERRUPTIBILITY_UNRESTRAINED,
    3142     CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED,
    3143     CPUMINTERRUPTIBILITY_INT_DISABLED,
    3144     CPUMINTERRUPTIBILITY_INT_INHIBITED, /**< @todo rename as it inhibits NMIs too. */
    3145     CPUMINTERRUPTIBILITY_NMI_INHIBIT,
    3146     CPUMINTERRUPTIBILITY_GLOBAL_INHIBIT,
    3147     CPUMINTERRUPTIBILITY_END,
    3148     CPUMINTERRUPTIBILITY_32BIT_HACK = 0x7fffffff
    3149 } CPUMINTERRUPTIBILITY;
    3150 
    3151 VMM_INT_DECL(CPUMINTERRUPTIBILITY) CPUMGetGuestInterruptibility(PVMCPU pVCpu);
    3152 
    3153 /** @name Typical scalable bus frequency values.
    3154  * @{ */
    3155 /** Special internal value indicating that we don't know the frequency.
    3156  *  @internal  */
    3157 #define CPUM_SBUSFREQ_UNKNOWN       UINT64_C(1)
    3158 #define CPUM_SBUSFREQ_100MHZ        UINT64_C(100000000)
    3159 #define CPUM_SBUSFREQ_133MHZ        UINT64_C(133333333)
    3160 #define CPUM_SBUSFREQ_167MHZ        UINT64_C(166666666)
    3161 #define CPUM_SBUSFREQ_200MHZ        UINT64_C(200000000)
    3162 #define CPUM_SBUSFREQ_267MHZ        UINT64_C(266666666)
    3163 #define CPUM_SBUSFREQ_333MHZ        UINT64_C(333333333)
    3164 #define CPUM_SBUSFREQ_400MHZ        UINT64_C(400000000)
    3165 /** @} */
    3166 
     77VMMDECL(CPUMCPUVENDOR)  CPUMGetHostCpuVendor(PVM pVM);
     78VMMDECL(CPUMMICROARCH)  CPUMGetHostMicroarch(PCVM pVM);
    316779
    316880#ifdef IN_RING3
     
    317991VMMR3DECL(void)         CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
    318092VMMDECL(bool)           CPUMR3IsStateRestorePending(PVM pVM);
    3181 VMMR3DECL(int)          CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd);
    3182 
    3183 VMMR3DECL(int)              CPUMR3CpuIdInsert(PVM pVM, PCPUMCPUIDLEAF pNewLeaf);
    3184 VMMR3DECL(int)              CPUMR3CpuIdGetLeaf(PVM pVM, PCPUMCPUIDLEAF pLeaf, uint32_t uLeaf, uint32_t uSubLeaf);
    3185 VMMR3_INT_DECL(PCCPUMCPUIDLEAF) CPUMR3CpuIdGetPtr(PVM pVM, uint32_t *pcLeaves);
    3186 VMMDECL(CPUMMICROARCH)      CPUMCpuIdDetermineX86MicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
    3187                                                              uint8_t bModel, uint8_t bStepping);
    318893VMMDECL(const char *)       CPUMMicroarchName(CPUMMICROARCH enmMicroarch);
    3189 VMMR3DECL(int)              CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
    3190 VMMR3DECL(const char *)     CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
    319194VMMR3DECL(const char *)     CPUMCpuVendorName(CPUMCPUVENDOR enmVendor);
    3192 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    3193 VMMR3DECL(uint32_t)         CPUMR3DeterminHostMxCsrMask(void);
    3194 #endif
    3195 
    3196 VMMR3DECL(int)              CPUMR3MsrRangesInsert(PVM pVM, PCCPUMMSRRANGE pNewRange);
    319795
    319896VMMR3DECL(uint32_t)         CPUMR3DbGetEntries(void);
     
    3211109#endif /* IN_RING3 */
    3212110
    3213 #ifdef IN_RING0
    3214 /** @defgroup grp_cpum_r0    The CPUM ring-0 API
    3215  * @{
    3216  */
    3217 VMMR0_INT_DECL(int)     CPUMR0ModuleInit(void);
    3218 VMMR0_INT_DECL(int)     CPUMR0ModuleTerm(void);
    3219 VMMR0_INT_DECL(void)    CPUMR0InitPerVMData(PGVM pGVM);
    3220 VMMR0_INT_DECL(int)     CPUMR0InitVM(PVMCC pVM);
    3221 DECLASM(void)           CPUMR0RegisterVCpuThread(PVMCPUCC pVCpu);
    3222 DECLASM(void)           CPUMR0TouchHostFpu(void);
    3223 VMMR0_INT_DECL(int)     CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu);
    3224 VMMR0_INT_DECL(int)     CPUMR0LoadGuestFPU(PVMCC pVM, PVMCPUCC pVCpu);
    3225 VMMR0_INT_DECL(bool)    CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu);
    3226 VMMR0_INT_DECL(int)     CPUMR0SaveHostDebugState(PVMCC pVM, PVMCPUCC pVCpu);
    3227 VMMR0_INT_DECL(bool)    CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(PVMCPUCC pVCpu, bool fDr6);
    3228 VMMR0_INT_DECL(bool)    CPUMR0DebugStateMaybeSaveGuest(PVMCPUCC pVCpu, bool fDr6);
    3229 
    3230 VMMR0_INT_DECL(void)    CPUMR0LoadGuestDebugState(PVMCPUCC pVCpu, bool fDr6);
    3231 VMMR0_INT_DECL(void)    CPUMR0LoadHyperDebugState(PVMCPUCC pVCpu, bool fDr6);
    3232 /** @} */
    3233 #endif /* IN_RING0 */
    3234 
    3235 /** @defgroup grp_cpum_rz    The CPUM raw-mode and ring-0 context API
    3236  * @{
    3237  */
    3238 VMMRZ_INT_DECL(void)    CPUMRZFpuStatePrepareHostCpuForUse(PVMCPUCC pVCpu);
    3239 VMMRZ_INT_DECL(void)    CPUMRZFpuStateActualizeForRead(PVMCPUCC pVCpu);
    3240 VMMRZ_INT_DECL(void)    CPUMRZFpuStateActualizeForChange(PVMCPUCC pVCpu);
    3241 VMMRZ_INT_DECL(void)    CPUMRZFpuStateActualizeSseForRead(PVMCPUCC pVCpu);
    3242 VMMRZ_INT_DECL(void)    CPUMRZFpuStateActualizeAvxForRead(PVMCPUCC pVCpu);
    3243 /** @} */
    3244 
    3245 
    3246111#endif /* !VBOX_FOR_DTRACE_LIB */
    3247112/** @} */
  • trunk/include/VBox/vmm/cpumctx-armv8.h

    r98955 r98970  
    4141
    4242#ifndef VBOX_FOR_DTRACE_LIB
     43# include <iprt/assertcompile.h>
    4344# include <VBox/types.h>
    4445#else
     
    144145    /** Floating point status register. */
    145146    uint64_t        fpsr;
     147    /** The internal PSTATE value (accessible in CPSR with AARCH32 and through
     148     * NZCV and DAIF special purpose registers. */
     149    uint32_t        fPState;
     150
     151    uint32_t        fPadding0;
    146152
    147153    /** Externalized state tracker, CPUMCTX_EXTRN_XXX. */
    148154    uint64_t        fExtrn;
    149155
     156    uint64_t        au64Padding1[6];
    150157} CPUMCTX;
    151158
  • trunk/include/VBox/vmm/vm.h

    r98122 r98970  
    306306    union VMCPUUNIONCPUM
    307307    {
    308 #ifdef VMM_INCLUDED_SRC_include_CPUMInternal_h
     308#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
    309309        struct CPUMCPU      s;
    310310#endif
     
    12621262    union
    12631263    {
    1264 #ifdef VMM_INCLUDED_SRC_include_CPUMInternal_h
     1264#if defined(VMM_INCLUDED_SRC_include_CPUMInternal_h) || defined(VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h)
    12651265        struct CPUM s;
    12661266#endif
  • trunk/src/VBox/VMM/Makefile.kmk

    r98960 r98970  
    347347        VBoxVMM.d \
    348348        VMMR3/VMMR3VTable.cpp \
    349         VMMR3/APIC.cpp \
    350349        VMMR3/CFGM.cpp \
    351         VMMR3/CPUM.cpp \
    352         VMMR3/CPUMR3CpuId.cpp \
    353         VMMR3/CPUMR3Db.cpp \
    354         VMMR3/CPUMDbg.cpp \
     350        VMMR3/CPUM-armv8.cpp \
    355351        VMMR3/DBGF.cpp \
    356352        VMMR3/DBGFAddr.cpp \
     
    358354        VMMR3/DBGFR3Bp.cpp \
    359355        VMMR3/DBGFR3BugCheck.cpp \
    360         VMMR3/DBGFCoreWrite.cpp \
    361356        VMMR3/DBGFCpu.cpp \
    362357        VMMR3/DBGFDisas.cpp \
  • trunk/src/VBox/VMM/VMMR3/DBGFR3Bp.cpp

    r98103 r98970  
    162162#define LOG_GROUP LOG_GROUP_DBGF
    163163#define VMCPU_INCL_CPUM_GST_CTX
     164#include <VBox/vmm/cpum.h>
    164165#include <VBox/vmm/dbgf.h>
    165166#include <VBox/vmm/selm.h>
     
    17751776    RT_NOREF(pvUser);
    17761777
     1778#if defined(VBOX_VMM_TARGET_ARMV8)
     1779    RT_NOREF(pVM, pVCpu);
     1780    AssertReleaseFailed();
     1781    return VERR_NOT_IMPLEMENTED;
     1782#else
    17771783    /*
    17781784     * CPU 0 updates the enabled hardware breakpoint counts.
     
    17941800
    17951801    return CPUMRecalcHyperDRx(pVCpu, UINT8_MAX);
     1802#endif
    17961803}
    17971804
     
    19801987            {
    19811988                uint8_t abInstr[DBGF_BP_INSN_MAX];
    1982                 RTGCPTR const GCPtrInstr = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
     1989                RTGCPTR const GCPtrInstr = CPUMGetGuestFlatPC(pVCpu);
    19831990                int rc = PGMPhysSimpleReadGCPtr(pVCpu, &abInstr[0], GCPtrInstr, sizeof(abInstr));
    19841991                AssertRC(rc);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette