VirtualBox

Changeset 107650 in vbox


Ignore:
Timestamp:
Jan 10, 2025 1:42:28 PM (8 days ago)
Author:
vboxsync
Message:

VMM/CPUM,++: Made the HostFeatures match the host when targeting x86 guests on arm64 hosts. Merged and deduplicated code targeting x86 & amd64. jiraref:VBP-1470

Location:
trunk
Files:
25 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum-armv8.h

    r107389 r107650  
    197197
    198198
    199 /**
    200  * CPU ID registers.
    201  */
    202 typedef struct CPUMIDREGS
    203 {
    204     /** Content of the ID_AA64PFR0_EL1 register. */
    205     uint64_t        u64RegIdAa64Pfr0El1;
    206     /** Content of the ID_AA64PFR1_EL1 register. */
    207     uint64_t        u64RegIdAa64Pfr1El1;
    208     /** Content of the ID_AA64DFR0_EL1 register. */
    209     uint64_t        u64RegIdAa64Dfr0El1;
    210     /** Content of the ID_AA64DFR1_EL1 register. */
    211     uint64_t        u64RegIdAa64Dfr1El1;
    212     /** Content of the ID_AA64AFR0_EL1 register. */
    213     uint64_t        u64RegIdAa64Afr0El1;
    214     /** Content of the ID_AA64AFR1_EL1 register. */
    215     uint64_t        u64RegIdAa64Afr1El1;
    216     /** Content of the ID_AA64ISAR0_EL1 register. */
    217     uint64_t        u64RegIdAa64Isar0El1;
    218     /** Content of the ID_AA64ISAR1_EL1 register. */
    219     uint64_t        u64RegIdAa64Isar1El1;
    220     /** Content of the ID_AA64ISAR2_EL1 register. */
    221     uint64_t        u64RegIdAa64Isar2El1;
    222     /** Content of the ID_AA64MMFR0_EL1 register. */
    223     uint64_t        u64RegIdAa64Mmfr0El1;
    224     /** Content of the ID_AA64MMFR1_EL1 register. */
    225     uint64_t        u64RegIdAa64Mmfr1El1;
    226     /** Content of the ID_AA64MMFR2_EL1 register. */
    227     uint64_t        u64RegIdAa64Mmfr2El1;
    228     /** Content of the CLIDR_EL1 register. */
    229     uint64_t        u64RegClidrEl1;
    230     /** Content of the CTR_EL0 register. */
    231     uint64_t        u64RegCtrEl0;
    232     /** Content of the DCZID_EL0 register. */
    233     uint64_t        u64RegDczidEl0;
    234 } CPUMIDREGS;
    235 /** Pointer to CPU ID registers. */
    236 typedef CPUMIDREGS *PCPUMIDREGS;
    237 /** Pointer to a const CPU ID registers structure. */
    238 typedef CPUMIDREGS const *PCCPUMIDREGS;
    239 
    240199
    241200/** @name Changed flags.
     
    280239
    281240VMMR3DECL(int)          CPUMR3SysRegRangesInsert(PVM pVM, PCCPUMSYSREGRANGE pNewRange);
    282 VMMR3DECL(int)          CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMIDREGS pIdRegs);
    283 
    284 VMMR3_INT_DECL(int)     CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMIDREGS *ppIdRegs);
     241VMMR3DECL(int)          CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMARMV8IDREGS pIdRegs);
     242
     243VMMR3_INT_DECL(int)     CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMARMV8IDREGS *ppIdRegs);
    285244
    286245/** @} */
  • trunk/include/VBox/vmm/cpum-x86-amd64.h

    r107389 r107650  
    812812#ifndef VBOX_FOR_DTRACE_LIB
    813813
    814 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    815 VMMDECL(int)            CPUMCpuIdCollectLeavesX86(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
    816 VMMDECL(CPUMCPUVENDOR)  CPUMCpuIdDetectX86VendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
    817 #endif
    818 
    819814VMM_INT_DECL(bool)      CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu);
    820815
     
    23292324/** @} */
    23302325
    2331 VMMDECL(bool)           CPUMSupportsXSave(PVM pVM);
    23322326VMMDECL(bool)           CPUMIsHostUsingSysEnter(PVM pVM);
    23332327VMMDECL(bool)           CPUMIsHostUsingSysCall(PVM pVM);
     
    23882382VMMDECL(CPUMMICROARCH)      CPUMCpuIdDetermineX86MicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily,
    23892383                                                             uint8_t bModel, uint8_t bStepping);
    2390 VMMDECL(const char *)       CPUMMicroarchName(CPUMMICROARCH enmMicroarch);
    23912384VMMR3DECL(int)              CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown);
    23922385VMMR3DECL(const char *)     CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod);
  • trunk/include/VBox/vmm/cpum.h

    r107389 r107650  
    354354typedef struct CPUMFEATURESCOMMON
    355355{
    356     /** The CPU vendor (CPUMCPUVENDOR). */
    357     uint8_t         enmCpuVendor;
    358     /** The CPU family. */
    359     uint8_t         uFamily;
    360     /** The CPU model. */
    361     uint8_t         uModel;
    362     /** The CPU stepping. */
    363     uint8_t         uStepping;
    364356    /** The microarchitecture. */
    365357#ifndef VBOX_FOR_DTRACE_LIB
     
    368360    uint32_t        enmMicroarch;
    369361#endif
     362    /** The CPU vendor (CPUMCPUVENDOR). */
     363    uint8_t         enmCpuVendor;
    370364    /** The maximum physical address width of the CPU. */
    371365    uint8_t         cMaxPhysAddrWidth;
     
    381375typedef struct CPUMFEATURESX86
    382376{
     377    /** The microarchitecture. */
     378#ifndef VBOX_FOR_DTRACE_LIB
     379    CPUMMICROARCH   enmMicroarch;
     380#else
     381    uint32_t        enmMicroarch;
     382#endif
    383383    /** The CPU vendor (CPUMCPUVENDOR). */
    384384    uint8_t         enmCpuVendor;
     385    /** The maximum physical address width of the CPU. */
     386    uint8_t         cMaxPhysAddrWidth;
     387    /** The maximum linear address width of the CPU. */
     388    uint8_t         cMaxLinearAddrWidth;
     389
    385390    /** The CPU family. */
    386391    uint8_t         uFamily;
     
    389394    /** The CPU stepping. */
    390395    uint8_t         uStepping;
    391     /** The microarchitecture. */
    392 #ifndef VBOX_FOR_DTRACE_LIB
    393     CPUMMICROARCH   enmMicroarch;
    394 #else
    395     uint32_t        enmMicroarch;
    396 #endif
    397     /** The maximum physical address width of the CPU. */
    398     uint8_t         cMaxPhysAddrWidth;
    399     /** The maximum linear address width of the CPU. */
    400     uint8_t         cMaxLinearAddrWidth;
    401396    /** Max size of the extended state (or FPU state if no XSAVE). */
    402397    uint16_t        cbMaxExtendedState;
     
    844839    /** VMX: Padding / reserved for future, making it a total of 128 bits.  */
    845840    uint32_t        fVmxPadding1;
     841    uint32_t        auPadding[4];
    846842} CPUMFEATURESX86;
    847843#ifndef VBOX_FOR_DTRACE_LIB
    848 AssertCompileSize(CPUMFEATURESX86, 48);
     844AssertCompileSize(CPUMFEATURESX86, 64);
    849845AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmCpuVendor,          CPUMFEATURESX86, enmCpuVendor);
    850 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uFamily,               CPUMFEATURESX86, uFamily);
    851 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uModel,                CPUMFEATURESX86, uModel);
    852 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uStepping,             CPUMFEATURESX86, uStepping);
    853846AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmMicroarch,          CPUMFEATURESX86, enmMicroarch);
    854847AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, cMaxPhysAddrWidth,     CPUMFEATURESX86, cMaxPhysAddrWidth);
     
    863856typedef struct CPUMFEATURESARMV8
    864857{
    865     /** The CPU vendor (CPUMCPUVENDOR). */
    866     uint8_t         enmCpuVendor;
    867     /** The CPU family. */
    868     uint8_t         uFamily;
    869     /** The CPU model. */
    870     uint8_t         uModel;
    871     /** The CPU stepping. */
    872     uint8_t         uStepping;
    873858    /** The microarchitecture. */
    874859#ifndef VBOX_FOR_DTRACE_LIB
     
    877862    uint32_t        enmMicroarch;
    878863#endif
     864    /** The CPU vendor (CPUMCPUVENDOR). */
     865    uint8_t         enmCpuVendor;
    879866    /** The maximum physical address width of the CPU. */
    880867    uint8_t         cMaxPhysAddrWidth;
    881868    /** The maximum linear address width of the CPU. */
    882869    uint8_t         cMaxLinearAddrWidth;
    883     uint16_t        uPadding;
     870
     871    /** The CPU implementer value (from MIDR_EL1). */
     872    uint8_t         uImplementeter;
     873    /** The CPU part number (from MIDR_EL1). */
     874    uint16_t        uPartNum;
     875    /** The CPU variant (from MIDR_EL1). */
     876    uint8_t         uVariant;
     877    /** The CPU revision (from MIDR_EL1). */
     878    uint8_t         uRevision;
    884879
    885880    /** @name Granule sizes supported.
     
    13951390    /** @} */
    13961391
    1397     /** Padding to the required size to match CPUMFEATURES for x86/amd64. */
    1398     uint8_t         abPadding[4];
     1392    /** Padding to the required size to match CPUMFEATURESX86. */
     1393    uint32_t        auPadding[5];
    13991394} CPUMFEATURESARMV8;
    14001395#ifndef VBOX_FOR_DTRACE_LIB
    1401 AssertCompileSize(CPUMFEATURESARMV8, 48);
     1396AssertCompileSize(CPUMFEATURESARMV8, 64);
     1397AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmMicroarch,          CPUMFEATURESARMV8, enmMicroarch);
    14021398AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmCpuVendor,          CPUMFEATURESARMV8, enmCpuVendor);
    1403 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uFamily,               CPUMFEATURESARMV8, uFamily);
    1404 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uModel,                CPUMFEATURESARMV8, uModel);
    1405 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uStepping,             CPUMFEATURESARMV8, uStepping);
    1406 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmMicroarch,          CPUMFEATURESARMV8, enmMicroarch);
    14071399AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, cMaxPhysAddrWidth,     CPUMFEATURESARMV8, cMaxPhysAddrWidth);
    14081400AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, cMaxLinearAddrWidth,   CPUMFEATURESARMV8, cMaxLinearAddrWidth);
     
    14361428                    s;
    14371429} CPUHOSTFEATURES;
     1430#ifndef VBOX_FOR_DTRACE_LIB
     1431AssertCompileSize(CPUHOSTFEATURES, 64);
     1432#endif
    14381433/** Pointer to a const host CPU feature structure. */
    14391434typedef CPUHOSTFEATURES const *PCCPUHOSTFEATURES;
     
    14581453
    14591454
     1455
     1456/**
     1457 * ARMv8 CPU ID registers.
     1458 */
     1459typedef struct CPUMARMV8IDREGS
     1460{
     1461    /** Content of the ID_AA64PFR0_EL1 register. */
     1462    uint64_t        u64RegIdAa64Pfr0El1;
     1463    /** Content of the ID_AA64PFR1_EL1 register. */
     1464    uint64_t        u64RegIdAa64Pfr1El1;
     1465    /** Content of the ID_AA64DFR0_EL1 register. */
     1466    uint64_t        u64RegIdAa64Dfr0El1;
     1467    /** Content of the ID_AA64DFR1_EL1 register. */
     1468    uint64_t        u64RegIdAa64Dfr1El1;
     1469    /** Content of the ID_AA64AFR0_EL1 register. */
     1470    uint64_t        u64RegIdAa64Afr0El1;
     1471    /** Content of the ID_AA64AFR1_EL1 register. */
     1472    uint64_t        u64RegIdAa64Afr1El1;
     1473    /** Content of the ID_AA64ISAR0_EL1 register. */
     1474    uint64_t        u64RegIdAa64Isar0El1;
     1475    /** Content of the ID_AA64ISAR1_EL1 register. */
     1476    uint64_t        u64RegIdAa64Isar1El1;
     1477    /** Content of the ID_AA64ISAR2_EL1 register. */
     1478    uint64_t        u64RegIdAa64Isar2El1;
     1479    /** Content of the ID_AA64MMFR0_EL1 register. */
     1480    uint64_t        u64RegIdAa64Mmfr0El1;
     1481    /** Content of the ID_AA64MMFR1_EL1 register. */
     1482    uint64_t        u64RegIdAa64Mmfr1El1;
     1483    /** Content of the ID_AA64MMFR2_EL1 register. */
     1484    uint64_t        u64RegIdAa64Mmfr2El1;
     1485    /** Content of the CLIDR_EL1 register. */
     1486    uint64_t        u64RegClidrEl1;
     1487    /** Content of the CTR_EL0 register. */
     1488    uint64_t        u64RegCtrEl0;
     1489    /** Content of the DCZID_EL0 register. */
     1490    uint64_t        u64RegDczidEl0;
     1491    /** @todo we need MIDR_EL1 here, possibly also MPIDR_EL1 and REVIDR_EL1. */
     1492} CPUMARMV8IDREGS;
     1493/** Pointer to CPU ID registers. */
     1494typedef CPUMARMV8IDREGS *PCPUMARMV8IDREGS;
     1495/** Pointer to a const CPU ID registers structure. */
     1496typedef CPUMARMV8IDREGS const *PCCPUMARMV8IDREGS;
     1497
     1498
    14601499/*
    14611500 * Include the target specific header.
     
    14991538VMMDECL(CPUMMICROARCH)  CPUMGetHostMicroarch(PCVM pVM);
    15001539
     1540VMMDECL(const char *)   CPUMMicroarchName(CPUMMICROARCH enmMicroarch);
     1541VMMDECL(const char *)   CPUMCpuVendorName(CPUMCPUVENDOR enmVendor);
     1542
     1543VMMDECL(CPUMCPUVENDOR)  CPUMCpuIdDetectX86VendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
     1544#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
     1545VMMDECL(int)            CPUMCpuIdCollectLeavesFromX86Host(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);
     1546#endif
     1547#if defined(RT_ARCH_ARM64)
     1548VMMDECL(int)            CPUMCpuIdCollectIdRegistersFromArmV8Host(PCPUMARMV8IDREGS pIdRegs);
     1549#endif
     1550
    15011551#ifdef IN_RING3
    15021552/** @defgroup grp_cpum_r3    The CPUM ring-3 API
     
    15121562VMMR3DECL(void)         CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu);
    15131563VMMDECL(bool)           CPUMR3IsStateRestorePending(PVM pVM);
    1514 VMMDECL(const char *)       CPUMMicroarchName(CPUMMICROARCH enmMicroarch);
    1515 VMMR3DECL(const char *)     CPUMCpuVendorName(CPUMCPUVENDOR enmVendor);
    15161564
    15171565VMMR3DECL(uint32_t)         CPUMR3DbGetEntries(void);
  • trunk/include/VBox/vmm/vm.h

    r107227 r107650  
    13601360        struct
    13611361        {
    1362             /** Padding for hidden fields. */
    1363             uint8_t                 abHidden0[64 + 48];
    13641362            /** Guest CPU feature information. */
    13651363            CPUMFEATURES            GuestFeatures;
  • trunk/include/iprt/armv8.h

    r106463 r107650  
    581581/** CSSELR_EL1 register - RW. */
    582582#define ARMV8_AARCH64_SYSREG_CSSELR_EL1             ARMV8_AARCH64_SYSREG_ID_CREATE(3, 2,  0,  0, 0)
     583
     584/** CTR_EL0 - Cache Type Register - RO. */
     585#define ARMV8_AARCH64_SYSREG_CTR_EL0                ARMV8_AARCH64_SYSREG_ID_CREATE(3, 3, 0, 0, 1)
     586/** DCZID_EL0 - Data Cache Zero ID Register - RO. */
     587#define ARMV8_AARCH64_SYSREG_DCZID_EL0              ARMV8_AARCH64_SYSREG_ID_CREATE(3, 3, 0, 0, 7)
     588
    583589
    584590/** NZCV - Status Flags - ??. */
  • trunk/src/VBox/VMM/Makefile.kmk

    r107308 r107650  
    416416        VMMR3/CPUM-armv8.cpp \
    417417        VMMR3/CPUMDbg-armv8.cpp \
     418        VMMAll/CPUMAllCpuId.cpp \
    418419        VMMR3/CPUMR3CpuId-armv8.cpp \
    419420        VMMR3/CPUMR3Db-armv8.cpp \
     
    10741075 ##
    10751076 # Turn the header $2 into the DTrace library script $1.
     1077 # @todo ARM: this needs adjusting for the non-native VBoxVMM variant!
    10761078 #
    10771079 define def_vmm_lib_dtrace_preprocess
     
    10831085                -D VBOX_FOR_DTRACE_LIB \
    10841086                -D VBOX_FOR_DTRACE_LIB_$(toupper $(KBUILD_TARGET_ARCH)) \
     1087                -D $(if-expr "$(KBUILD_TARGET_ARCH)" == "amd64",VBOX_VMM_TARGET_X86,VBOX_VMM_TARGET_ARMV8) \
    10851088                -D IN_RING0 \
    10861089                -D RT_C_DECLS_BEGIN= \
  • trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp

    r106061 r107650  
    4646#include <iprt/string.h>
    4747#include <iprt/x86-helpers.h>
     48#if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8)
     49# include <iprt/armv8.h>
     50#endif
    4851
    4952
     
    5154*   Global Variables                                                                                                             *
    5255*********************************************************************************************************************************/
     56#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
    5357/**
    5458 * The intel pentium family.
     
    505509}
    506510
     511#endif /* if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */
     512
     513
    507514
    508515/**
     
    685692}
    686693
     694#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
    687695
    688696/**
     
    750758    else
    751759    {
    752 #ifdef IN_VBOX_CPU_REPORT
     760# ifdef IN_VBOX_CPU_REPORT
    753761        AssertReleaseFailed();
    754 #else
    755 # ifdef IN_RING3
     762# else
     763#  ifdef IN_RING3
    756764        Assert(ppaLeaves == &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
    757765        Assert(*ppaLeaves == pVM->cpum.s.GuestInfo.aCpuIdLeaves);
     
    761769        { }
    762770        else
    763 # endif
     771#  endif
    764772        {
    765773            *ppaLeaves = NULL;
    766774            LogRel(("CPUM: cpumR3CpuIdEnsureSpace: Out of CPUID space!\n"));
    767775        }
    768 #endif
     776# endif
    769777    }
    770778    return *ppaLeaves;
     
    772780
    773781
    774 #ifdef VBOX_STRICT
     782# ifdef VBOX_STRICT
    775783/**
    776784 * Checks that we've updated the CPUID leaves array correctly.
     
    796804        }
    797805}
    798 #endif
     806# endif
     807
     808#endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */
    799809
    800810#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
     
    982992 *                              success.
    983993 */
    984 VMMDECL(int) CPUMCpuIdCollectLeavesX86(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
     994VMMDECL(int) CPUMCpuIdCollectLeavesFromX86Host(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
    985995{
    986996    *ppaLeaves = NULL;
     
    11501160    return VINF_SUCCESS;
    11511161}
     1162
    11521163#endif /* RT_ARCH_X86 || RT_ARCH_AMD64 */
    11531164
    1154 
     1165#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
    11551166/**
    11561167 * Detect the CPU vendor give n the
     
    11931204    return CPUMCPUVENDOR_UNKNOWN;
    11941205}
     1206#endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */
    11951207
    11961208
     
    12241236}
    12251237
     1238#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
    12261239
    12271240static PCCPUMCPUIDLEAF cpumCpuIdFindLeaf(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf)
     
    12601273
    12611274
    1262 static void cpumExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, PCPUMFEATURES pFeatures)
     1275static void cpumExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, CPUMFEATURESX86 *pFeatures)
    12631276{
    12641277    Assert(pVmxMsrs);
     
    13901403
    13911404
    1392 int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURES pFeatures)
     1405int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, CPUMFEATURESX86 *pFeatures)
    13931406{
    13941407    Assert(pMsrs);
     
    16311644}
    16321645
     1646#endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */
     1647
     1648#if defined(RT_ARCH_ARM64)
     1649/**
     1650 * Collects the ID registers from an ARMv8 host.
     1651 *
     1652 * This isn't trivial an all hosts when running in userland and there is no
     1653 * support driver handy.
     1654 */
     1655VMMDECL(int) CPUMCpuIdCollectIdRegistersFromArmV8Host(PCPUMARMV8IDREGS pIdRegs)
     1656{
     1657#  ifdef _MSC_VER
     1658#   define READ_SYS_REG(a_u64Dst, a_SysRegName) do { \
     1659            (a_u64Dst) = (uint64_t)_ReadStatusReg(RT_CONCAT(ARMV8_AARCH64_SYSREG_,a_SysRegName) & 0x7fff); \
     1660        } while (0)
     1661#  else
     1662#   define READ_SYS_REG(a_u64Dst, a_SysRegName) do { \
     1663            __asm__ __volatile__ ("mrs %0, " #a_SysRegName : "=r" (a_u64Dst)); \
     1664        } while (0)
     1665#  endif
     1666
     1667    RT_ZERO(*pIdRegs);
     1668
     1669    /*
     1670     * CTR_EL0 can be trapped when executed in L0 (SCTLR_EL0.UCT) and macOS
     1671     * & Windows does so by default.  Linux OTOH typically exposes all the
     1672     * feature registers to user land with some sanitizing.
     1673     */
     1674# if !defined(IN_RING3) || defined(RT_OS_LINUX)
     1675    READ_SYS_REG(pIdRegs->u64RegCtrEl0,         CTR_EL0);
     1676# endif
     1677    READ_SYS_REG(pIdRegs->u64RegDczidEl0,       DCZID_EL0);
     1678
     1679# if defined(IN_RING0) || defined(RT_OS_LINUX)
     1680#  ifdef IN_RING3
     1681    if (getauxval(AT_HWCAP) & HWCAP_CPUID)
     1682#  endif
     1683    {
     1684        READ_SYS_REG(pIdRegs->u64RegIdAa64Pfr0El1,  ID_AA64PFR0_EL1);
     1685        READ_SYS_REG(pIdRegs->u64RegIdAa64Pfr1El1,  ID_AA64PFR1_EL1);
     1686        READ_SYS_REG(pIdRegs->u64RegIdAa64Dfr0El1,  ID_AA64DFR0_EL1);
     1687        READ_SYS_REG(pIdRegs->u64RegIdAa64Dfr1El1,  ID_AA64DFR1_EL1);
     1688        /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Dfr2El1,  ID_AA64DFR2_EL1);
     1689        READ_SYS_REG(pIdRegs->u64RegIdAa64Afr0El1,  ID_AA64AFR0_EL1);
     1690        READ_SYS_REG(pIdRegs->u64RegIdAa64Afr1El1,  ID_AA64AFR1_EL1);
     1691        READ_SYS_REG(pIdRegs->u64RegIdAa64Isar0El1, ID_AA64ISAR0_EL1);
     1692        READ_SYS_REG(pIdRegs->u64RegIdAa64Isar1El1, ID_AA64ISAR1_EL1);
     1693        READ_SYS_REG(pIdRegs->u64RegIdAa64Isar2El1, ID_AA64ISAR2_EL1);
     1694        /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Isar3El1, ID_AA64ISAR3_EL1);
     1695        READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr0El1, ID_AA64MMFR0_EL1);
     1696        READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr1El1, ID_AA64MMFR1_EL1);
     1697        READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr2El1, ID_AA64MMFR2_EL1);
     1698        /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr3El1, ID_AA64MMFR3_EL1);
     1699        /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr4El1, ID_AA64MMFR4_EL1);
     1700        READ_SYS_REG(pIdRegs->u64RegClidrEl1,       CLIDR_EL1);
     1701
     1702        /// @todo READ_SYS_REG(pIdRegs->uMainIdRegEl1,        MIDR_EL1);
     1703        /// @todo READ_SYS_REG(pIdRegs->uMpIdRegEl1,          MPIDR_EL1);
     1704        /// @todo READ_SYS_REG(pIdRegs->uRevIdRegEl1,         REVIDR_EL1);
     1705        return VINF_SUCCESS;
     1706    }
     1707# endif
     1708# ifndef IN_RING0
     1709    /** @todo  On darwin we should just cache the information (CPU DB) and figure
     1710     *         out which Apple Mx we're running on. */
     1711    /** @todo  Make the info available via the support driver...   */
     1712    return VINF_SUCCESS;
     1713# endif
     1714}
     1715#endif /* defined(RT_ARCH_ARM64) */
     1716
     1717#if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8)
     1718/**
     1719 * Explode the CPU features from the given ID registers.
     1720 *
     1721 * @returns VBox status code.
     1722 * @param   pIdRegs             The ID registers to explode the features from.
     1723 * @param   pFeatures           Where to store the features to.
     1724 */
     1725int cpumCpuIdExplodeFeaturesArmV8(PCCPUMARMV8IDREGS pIdRegs, CPUMFEATURESARMV8 *pFeatures)
     1726{
     1727    uint64_t u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;
     1728
     1729    static uint8_t s_aPaRange[] = { 32, 36, 40, 42, 44, 48, 52 };
     1730    AssertLogRelMsgReturn(RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE) < RT_ELEMENTS(s_aPaRange),
     1731                          ("CPUM: Invalid/Unsupported PARange value in ID_AA64MMFR0_EL1 register: %u\n",
     1732                          RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)),
     1733                          VERR_CPUM_IPE_1);
     1734
     1735    pFeatures->cMaxPhysAddrWidth = s_aPaRange[RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)];
     1736    pFeatures->fTGran4K          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN4)  != ARMV8_ID_AA64MMFR0_EL1_TGRAN4_NOT_IMPL;
     1737    pFeatures->fTGran16K         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN16) != ARMV8_ID_AA64MMFR0_EL1_TGRAN16_NOT_IMPL;
     1738    pFeatures->fTGran64K         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN64) != ARMV8_ID_AA64MMFR0_EL1_TGRAN64_NOT_IMPL;
     1739
     1740    /* ID_AA64ISAR0_EL1 features. */
     1741    u64IdReg = pIdRegs->u64RegIdAa64Isar0El1;
     1742    pFeatures->fAes              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES)     >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED;
     1743    pFeatures->fPmull            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES)     >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED_PMULL;
     1744    pFeatures->fSha1             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA1)    >= ARMV8_ID_AA64ISAR0_EL1_SHA1_SUPPORTED;
     1745    pFeatures->fSha256           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2)    >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256;
     1746    pFeatures->fSha512           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2)    >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256_SHA512;
     1747    pFeatures->fCrc32            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_CRC32)   >= ARMV8_ID_AA64ISAR0_EL1_CRC32_SUPPORTED;
     1748    pFeatures->fLse              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_ATOMIC)  >= ARMV8_ID_AA64ISAR0_EL1_ATOMIC_SUPPORTED;
     1749    pFeatures->fTme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TME)     >= ARMV8_ID_AA64ISAR0_EL1_TME_SUPPORTED;
     1750    pFeatures->fRdm              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RDM)     >= ARMV8_ID_AA64ISAR0_EL1_RDM_SUPPORTED;
     1751    pFeatures->fSha3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA3)    >= ARMV8_ID_AA64ISAR0_EL1_SHA3_SUPPORTED;
     1752    pFeatures->fSm3              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM3)     >= ARMV8_ID_AA64ISAR0_EL1_SM3_SUPPORTED;
     1753    pFeatures->fSm4              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM4)     >= ARMV8_ID_AA64ISAR0_EL1_SM4_SUPPORTED;
     1754    pFeatures->fDotProd          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_DP)      >= ARMV8_ID_AA64ISAR0_EL1_DP_SUPPORTED;
     1755    pFeatures->fFhm              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_FHM)     >= ARMV8_ID_AA64ISAR0_EL1_FHM_SUPPORTED;
     1756    pFeatures->fFlagM            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS)      >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED;
     1757    pFeatures->fFlagM2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS)      >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED_2;
     1758    pFeatures->fTlbios           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB)     >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED;
     1759    pFeatures->fTlbirange        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB)     >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED_RANGE;
     1760    pFeatures->fRng              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RNDR)    >= ARMV8_ID_AA64ISAR0_EL1_RNDR_SUPPORTED;
     1761
     1762    /* ID_AA64ISAR1_EL1 features. */
     1763    u64IdReg = pIdRegs->u64RegIdAa64Isar1El1;
     1764    pFeatures->fDpb              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB)     >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED;
     1765    pFeatures->fDpb2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB)     >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED_2;
     1766
     1767    /* PAuth using QARMA5. */
     1768    pFeatures->fPacQarma5        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     != ARMV8_ID_AA64ISAR1_EL1_APA_NOT_IMPL;
     1769    if (pFeatures->fPacQarma5)
     1770    {
     1771        pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH;
     1772        pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_EPAC;
     1773        pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH2;
     1774        pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPAC;
     1775        pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPACCOMBINE;
     1776    }
     1777
     1778    /* PAuth using implementation defined algorithm. */
     1779    pFeatures->fPacImp           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     != ARMV8_ID_AA64ISAR1_EL1_API_NOT_IMPL;
     1780    if (pFeatures->fPacQarma5)
     1781    {
     1782        pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH;
     1783        pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_EPAC;
     1784        pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH2;
     1785        pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPAC;
     1786        pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPACCOMBINE;
     1787    }
     1788
     1789    pFeatures->fJscvt            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FJCVTZS) >= ARMV8_ID_AA64ISAR1_EL1_FJCVTZS_SUPPORTED;
     1790    pFeatures->fFcma             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FCMA)    >= ARMV8_ID_AA64ISAR1_EL1_FCMA_SUPPORTED;
     1791    pFeatures->fLrcpc            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC)   >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED;
     1792    pFeatures->fLrcpc2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC)   >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED_2;
     1793    pFeatures->fFrintts          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FRINTTS) >= ARMV8_ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED;
     1794    pFeatures->fSb               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SB)      >= ARMV8_ID_AA64ISAR1_EL1_SB_SUPPORTED;
     1795    pFeatures->fSpecres          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SPECRES) >= ARMV8_ID_AA64ISAR1_EL1_SPECRES_SUPPORTED;
     1796    pFeatures->fBf16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16)    >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_BF16;
     1797    pFeatures->fEbf16            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16)    >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_EBF16;
     1798    pFeatures->fDgh              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DGH)     >= ARMV8_ID_AA64ISAR1_EL1_DGH_SUPPORTED;
     1799    pFeatures->fI8mm             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_I8MM)    >= ARMV8_ID_AA64ISAR1_EL1_I8MM_SUPPORTED;
     1800    pFeatures->fXs               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_XS)      >= ARMV8_ID_AA64ISAR1_EL1_XS_SUPPORTED;
     1801    pFeatures->fLs64             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED;
     1802    pFeatures->fLs64V            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_V;
     1803    pFeatures->fLs64Accdata      = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_ACCDATA;
     1804
     1805    /* ID_AA64ISAR2_EL1 features. */
     1806    u64IdReg = pIdRegs->u64RegIdAa64Isar2El1;
     1807    pFeatures->fWfxt             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_WFXT)    >= ARMV8_ID_AA64ISAR2_EL1_WFXT_SUPPORTED;
     1808    pFeatures->fRpres            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_RPRES)   >= ARMV8_ID_AA64ISAR2_EL1_RPRES_SUPPORTED;
     1809
     1810    /* PAuth using QARMA3. */
     1811    pFeatures->fPacQarma3        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_GPA3)    >= ARMV8_ID_AA64ISAR2_EL1_GPA3_SUPPORTED;
     1812    pFeatures->fPacQarma3        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    != ARMV8_ID_AA64ISAR2_EL1_APA3_NOT_IMPL;
     1813    if (pFeatures->fPacQarma5)
     1814    {
     1815        pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH;
     1816        pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_EPAC;
     1817        pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH2;
     1818        pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPAC;
     1819        pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPACCOMBINE;
     1820    }
     1821
     1822    pFeatures->fMops             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_MOPS)    >= ARMV8_ID_AA64ISAR2_EL1_MOPS_SUPPORTED;
     1823    pFeatures->fHbc              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_BC)      >= ARMV8_ID_AA64ISAR2_EL1_BC_SUPPORTED;
     1824    pFeatures->fConstPacField    = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_PACFRAC) >= ARMV8_ID_AA64ISAR2_EL1_PACFRAC_TRUE;
     1825
     1826    /* ID_AA64PFR0_EL1 */
     1827    u64IdReg = pIdRegs->u64RegIdAa64Pfr0El1;
     1828    /* The FP and AdvSIMD field must have the same value. */
     1829    Assert(RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) == RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD));
     1830    pFeatures->fFp               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP)       != ARMV8_ID_AA64PFR0_EL1_FP_NOT_IMPL;
     1831    pFeatures->fFp16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP)       == ARMV8_ID_AA64PFR0_EL1_FP_IMPL_SP_DP_HP;
     1832    pFeatures->fAdvSimd          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD)  != ARMV8_ID_AA64PFR0_EL1_ADVSIMD_NOT_IMPL;
     1833    pFeatures->fFp16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD)  == ARMV8_ID_AA64PFR0_EL1_ADVSIMD_IMPL_SP_DP_HP;
     1834    pFeatures->fRas              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS)      >= ARMV8_ID_AA64PFR0_EL1_RAS_SUPPORTED;
     1835    pFeatures->fRasV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS)      >= ARMV8_ID_AA64PFR0_EL1_RAS_V1P1;
     1836    pFeatures->fSve              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SVE)      >= ARMV8_ID_AA64PFR0_EL1_SVE_SUPPORTED;
     1837    pFeatures->fSecEl2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SEL2)     >= ARMV8_ID_AA64PFR0_EL1_SEL2_SUPPORTED;
     1838    pFeatures->fAmuV1            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU)      >= ARMV8_ID_AA64PFR0_EL1_AMU_V1;
     1839    pFeatures->fAmuV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU)      >= ARMV8_ID_AA64PFR0_EL1_AMU_V1P1;
     1840    pFeatures->fDit              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_DIT)      >= ARMV8_ID_AA64PFR0_EL1_DIT_SUPPORTED;
     1841    pFeatures->fRme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RME)      >= ARMV8_ID_AA64PFR0_EL1_RME_SUPPORTED;
     1842    pFeatures->fCsv2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2)     >= ARMV8_ID_AA64PFR0_EL1_CSV2_SUPPORTED;
     1843    pFeatures->fCsv2v3           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2)     >= ARMV8_ID_AA64PFR0_EL1_CSV2_3_SUPPORTED;
     1844
     1845    /* ID_AA64PFR1_EL1 */
     1846    u64IdReg = pIdRegs->u64RegIdAa64Pfr1El1;
     1847    pFeatures->fBti              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_BT)       >= ARMV8_ID_AA64PFR1_EL1_BT_SUPPORTED;
     1848    pFeatures->fSsbs             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS)     >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED;
     1849    pFeatures->fSsbs2            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS)     >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED_MSR_MRS;
     1850    pFeatures->fMte              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_INSN_ONLY;
     1851    pFeatures->fMte2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL;
     1852    pFeatures->fMte3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL_ASYM_TAG_FAULT_CHK;
     1853    /** @todo RAS_frac, MPAM_frac, CSV2_frac. */
     1854    pFeatures->fSme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME)      >= ARMV8_ID_AA64PFR1_EL1_SME_SUPPORTED;
     1855    pFeatures->fSme2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME)      >= ARMV8_ID_AA64PFR1_EL1_SME_SME2;
     1856    pFeatures->fRngTrap          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_RNDRTRAP) >= ARMV8_ID_AA64PFR1_EL1_RNDRTRAP_SUPPORTED;
     1857    pFeatures->fNmi              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_NMI)      >= ARMV8_ID_AA64PFR1_EL1_NMI_SUPPORTED;
     1858
     1859    /* ID_AA64MMFR0_EL1 */
     1860    u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;
     1861    pFeatures->fExs              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_EXS)     >= ARMV8_ID_AA64MMFR0_EL1_EXS_SUPPORTED;
     1862    pFeatures->fFgt              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_FGT)     >= ARMV8_ID_AA64MMFR0_EL1_FGT_SUPPORTED;
     1863    pFeatures->fEcv              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_ECV)     >= ARMV8_ID_AA64MMFR0_EL1_ECV_SUPPORTED;
     1864
     1865    /* ID_AA64MMFR1_EL1 */
     1866    u64IdReg = pIdRegs->u64RegIdAa64Mmfr1El1;
     1867    pFeatures->fHafdbs           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HAFDBS)  >= ARMV8_ID_AA64MMFR1_EL1_HAFDBS_SUPPORTED;
     1868    pFeatures->fVmid16           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VMIDBITS) >= ARMV8_ID_AA64MMFR1_EL1_VMIDBITS_16;
     1869    pFeatures->fVhe              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VHE)     >= ARMV8_ID_AA64MMFR1_EL1_VHE_SUPPORTED;
     1870    pFeatures->fHpds             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS)    >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED;
     1871    pFeatures->fHpds2            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS)    >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED_2;
     1872    pFeatures->fLor              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_LO)      >= ARMV8_ID_AA64MMFR1_EL1_LO_SUPPORTED;
     1873    pFeatures->fPan              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED;
     1874    pFeatures->fPan2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_2;
     1875    pFeatures->fPan3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_3;
     1876    pFeatures->fXnx              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_XNX)     >= ARMV8_ID_AA64MMFR1_EL1_XNX_SUPPORTED;
     1877    pFeatures->fTwed             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TWED)    >= ARMV8_ID_AA64MMFR1_EL1_TWED_SUPPORTED;
     1878    pFeatures->fEts2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_ETS)     >= ARMV8_ID_AA64MMFR1_EL1_ETS_SUPPORTED;
     1879    pFeatures->fHcx              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HCX)     >= ARMV8_ID_AA64MMFR1_EL1_HCX_SUPPORTED;
     1880    pFeatures->fAfp              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_AFP)     >= ARMV8_ID_AA64MMFR1_EL1_AFP_SUPPORTED;
     1881    pFeatures->fNTlbpa           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_NTLBPA)  >= ARMV8_ID_AA64MMFR1_EL1_NTLBPA_INCLUDE_COHERENT_ONLY;
     1882    pFeatures->fTidcp1           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TIDCP1)  >= ARMV8_ID_AA64MMFR1_EL1_TIDCP1_SUPPORTED;
     1883    pFeatures->fCmow             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_CMOW)    >= ARMV8_ID_AA64MMFR1_EL1_CMOW_SUPPORTED;
     1884
     1885    /* ID_AA64MMFR2_EL1 */
     1886    u64IdReg = pIdRegs->u64RegIdAa64Mmfr2El1;
     1887    pFeatures->fTtcnp            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CNP)     >= ARMV8_ID_AA64MMFR2_EL1_CNP_SUPPORTED;
     1888    pFeatures->fUao              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_UAO)     >= ARMV8_ID_AA64MMFR2_EL1_UAO_SUPPORTED;
     1889    pFeatures->fLsmaoc           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_LSM)     >= ARMV8_ID_AA64MMFR2_EL1_LSM_SUPPORTED;
     1890    pFeatures->fIesb             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IESB)    >= ARMV8_ID_AA64MMFR2_EL1_IESB_SUPPORTED;
     1891    pFeatures->fLva              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_VARANGE) >= ARMV8_ID_AA64MMFR2_EL1_VARANGE_52BITS_64KB_GRAN;
     1892    pFeatures->fCcidx            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CCIDX)   >= ARMV8_ID_AA64MMFR2_EL1_CCIDX_64BIT;
     1893    pFeatures->fNv               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV)      >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED;
     1894    pFeatures->fNv2              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV)      >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED_2;
     1895    pFeatures->fTtst             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_ST)      >= ARMV8_ID_AA64MMFR2_EL1_ST_SUPPORTED;
     1896    pFeatures->fLse2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_AT)      >= ARMV8_ID_AA64MMFR2_EL1_AT_SUPPORTED;
     1897    pFeatures->fIdst             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IDS)     >= ARMV8_ID_AA64MMFR2_EL1_IDS_EC_18H;
     1898    pFeatures->fS2Fwb            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_FWB)     >= ARMV8_ID_AA64MMFR2_EL1_FWB_SUPPORTED;
     1899    pFeatures->fTtl              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_TTL)     >= ARMV8_ID_AA64MMFR2_EL1_TTL_SUPPORTED;
     1900    pFeatures->fEvt              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_EVT)     >= ARMV8_ID_AA64MMFR2_EL1_EVT_SUPPORTED;
     1901    pFeatures->fE0Pd             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_E0PD)    >= ARMV8_ID_AA64MMFR2_EL1_E0PD_SUPPORTED;
     1902
     1903    /* ID_AA64DFR0_EL1 */
     1904    u64IdReg = pIdRegs->u64RegIdAa64Dfr0El1;
     1905    pFeatures->fDebugV8p1        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8_VHE;
     1906    pFeatures->fDebugV8p2        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p2;
     1907    pFeatures->fDebugV8p4        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p4;
     1908    pFeatures->fDebugV8p8        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p8;
     1909    pFeatures->fPmuV3            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3;
     1910    pFeatures->fPmuV3p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P1;
     1911    pFeatures->fPmuV3p4          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P4;
     1912    pFeatures->fPmuV3p5          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P5;
     1913    pFeatures->fPmuV3p7          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P7;
     1914    pFeatures->fPmuV3p8          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P8;
     1915    pFeatures->fSpe              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED;
     1916    pFeatures->fSpeV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P1;
     1917    pFeatures->fSpeV1p2          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P2;
     1918    pFeatures->fSpeV1p3          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P3;
     1919    pFeatures->fDoubleLock       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK)  == ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK_SUPPORTED;
     1920    pFeatures->fTrf              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEFILT)   >= ARMV8_ID_AA64DFR0_EL1_TRACEFILT_SUPPORTED;
     1921    pFeatures->fTrbe             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER) >= ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER_SUPPORTED;
     1922    pFeatures->fMtPmu            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_MTPMU)    == ARMV8_ID_AA64DFR0_EL1_MTPMU_SUPPORTED;
     1923    pFeatures->fBrbe             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE)     >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED;
     1924    pFeatures->fBrbeV1p1         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE)     >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED_V1P1;
     1925    pFeatures->fHpmn0            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_HPMN0)    >= ARMV8_ID_AA64DFR0_EL1_HPMN0_SUPPORTED;
     1926
     1927    return VINF_SUCCESS;
     1928}
     1929#endif /* defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8) */
     1930
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs-armv8.cpp

    r107113 r107650  
    268268}
    269269
     270#if 0 /* unused atm */
    270271
    271272/**
     
    306307}
    307308
     309#endif
    308310
    309311/**
     
    491493}
    492494
    493 
    494 /**
    495  * Translates a microarchitecture enum value to the corresponding string
    496  * constant.
    497  *
    498  * @returns Read-only string constant (omits "kCpumMicroarch_" prefix). Returns
    499  *          NULL if the value is invalid.
    500  *
    501  * @param   enmMicroarch    The enum value to convert.
    502  *
    503  * @todo Doesn't really belong here but for now there is no other Armv8 CPUM source file.
    504  */
    505 VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch)
    506 {
    507     switch (enmMicroarch)
    508     {
    509 #define CASE_RET_STR(enmValue)  case enmValue: return #enmValue + (sizeof("kCpumMicroarch_") - 1)
    510         CASE_RET_STR(kCpumMicroarch_Apple_M1);
    511 #undef CASE_RET_STR
    512         default:
    513             break;
    514     }
    515 
    516     return NULL;
    517 }
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r107113 r107650  
    10751075VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
    10761076{
    1077     return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
     1077    return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.Common.enmCpuVendor;
    10781078}
    10791079
     
    10871087VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM)
    10881088{
    1089     return pVM->cpum.s.HostFeatures.enmMicroarch;
     1089    return pVM->cpum.s.HostFeatures.Common.enmMicroarch;
    10901090}
    10911091
     
    16311631{
    16321632    pVCpu->cpum.s.fChanged |= fChangedAdd;
    1633 }
    1634 
    1635 
    1636 /**
    1637  * Checks if the CPU supports the XSAVE and XRSTOR instruction.
    1638  *
    1639  * @returns true if supported.
    1640  * @returns false if not supported.
    1641  * @param   pVM     The cross context VM structure.
    1642  */
    1643 VMMDECL(bool) CPUMSupportsXSave(PVM pVM)
    1644 {
    1645     return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;
    16461633}
    16471634
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r107389 r107650  
    147147    PCPUMCPUIDLEAF  paLeaves;
    148148    uint32_t        cLeaves;
    149     rc = CPUMCpuIdCollectLeavesX86(&paLeaves, &cLeaves);
     149    rc = CPUMCpuIdCollectLeavesFromX86Host(&paLeaves, &cLeaves);
    150150    AssertLogRelRCReturn(rc, rc);
    151151
     
    217217{
    218218    /* Copy the ring-0 host feature set to the shared part so ring-3 can pick it up. */
    219     pGVM->cpum.s.HostFeatures = g_CpumHostFeatures.s;
     219    pGVM->cpum.s.HostFeatures.s = g_CpumHostFeatures.s;
    220220}
    221221
     
    363363         * Note! we assume this happens after the CPUMR3Init is done, so CPUID bits are settled.
    364364         */
    365         pVM->cpum.s.HostFeatures.fArchRdclNo             = 0;
    366         pVM->cpum.s.HostFeatures.fArchIbrsAll            = 0;
    367         pVM->cpum.s.HostFeatures.fArchRsbOverride        = 0;
    368         pVM->cpum.s.HostFeatures.fArchVmmNeedNotFlushL1d = 0;
    369         pVM->cpum.s.HostFeatures.fArchMdsNo              = 0;
     365        pVM->cpum.s.HostFeatures.s.fArchRdclNo             = 0;
     366        pVM->cpum.s.HostFeatures.s.fArchIbrsAll            = 0;
     367        pVM->cpum.s.HostFeatures.s.fArchRsbOverride        = 0;
     368        pVM->cpum.s.HostFeatures.s.fArchVmmNeedNotFlushL1d = 0;
     369        pVM->cpum.s.HostFeatures.s.fArchMdsNo              = 0;
    370370        uint32_t const cStdRange = ASMCpuId_EAX(0);
    371371        if (   RTX86IsValidStdRange(cStdRange)
     
    380380                uint64_t const fHostArchVal = ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES);
    381381                uint64_t fArchVal = fHostArchVal;
    382                 pVM->cpum.s.HostFeatures.fArchRdclNo             = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO);
    383                 pVM->cpum.s.HostFeatures.fArchIbrsAll            = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL);
    384                 pVM->cpum.s.HostFeatures.fArchRsbOverride        = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO);
    385                 pVM->cpum.s.HostFeatures.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D);
    386                 pVM->cpum.s.HostFeatures.fArchMdsNo              = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO);
     382                pVM->cpum.s.HostFeatures.s.fArchRdclNo             = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO);
     383                pVM->cpum.s.HostFeatures.s.fArchIbrsAll            = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL);
     384                pVM->cpum.s.HostFeatures.s.fArchRsbOverride        = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO);
     385                pVM->cpum.s.HostFeatures.s.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D);
     386                pVM->cpum.s.HostFeatures.s.fArchMdsNo              = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO);
    387387
    388388                /* guest: */
     
    401401            else
    402402            {
    403                 pVM->cpum.s.HostFeatures.fArchCap = 0;
     403                pVM->cpum.s.HostFeatures.s.fArchCap = 0;
    404404                LogRel(("CPUM: IA32_ARCH_CAPABILITIES unsupported\n"));
    405405            }
     
    471471VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu)
    472472{
    473     Assert(pVM->cpum.s.HostFeatures.fFxSaveRstor);
     473    Assert(pVM->cpum.s.HostFeatures.s.fFxSaveRstor);
    474474    Assert(ASMGetCR4() & X86_CR4_OSFXSR);
    475475
     
    542542     *        wrt. extended state (linux). */
    543543
    544     if (!pVM->cpum.s.HostFeatures.fLeakyFxSR)
     544    if (!pVM->cpum.s.HostFeatures.s.fLeakyFxSR)
    545545    {
    546546        Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE));
     
    583583{
    584584    bool fSavedGuest;
    585     Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.HostFeatures.fFxSaveRstor);
     585    Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.HostFeatures.s.fFxSaveRstor);
    586586    Assert(ASMGetCR4() & X86_CR4_OSFXSR);
    587587    if (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST))
     
    614614    else
    615615        fSavedGuest = false;
    616     Assert(!(  pVCpu->cpum.s.fUseFlags
    617              & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST | CPUM_USED_MANUAL_XMM_RESTORE)));
     616    AssertMsg(!(  pVCpu->cpum.s.fUseFlags
     617                & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST | CPUM_USED_MANUAL_XMM_RESTORE)), ("%#x\n", pVCpu->cpum.s.fUseFlags));
    618618    Assert(!pVCpu->cpum.s.Guest.fUsedFpuGuest);
    619619    return fSavedGuest;
  • trunk/src/VBox/VMM/VMMR3/CPUM-armv8.cpp

    r107032 r107650  
    706706
    707707    /* Load CPUID and explode guest features. */
    708     return cpumR3LoadCpuId(pVM, pSSM, uVersion);
     708    return cpumR3LoadCpuIdArmV8(pVM, pSSM, uVersion);
    709709}
    710710
     
    11001100}
    11011101
    1102 
     1102#if 0 /* nobody is are using these atm, they are for AMD64/darwin only */
    11031103/**
    11041104 * Marks the guest debug state as active.
     
    11311131    ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
    11321132}
     1133#endif
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r107220 r107650  
    220220static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    221221static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     222#ifdef RT_ARCH_AMD64
    222223static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     224#endif
    223225
    224226
     
    226228*   Global Variables                                                                                                             *
    227229*********************************************************************************************************************************/
    228 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    229230/** Host CPU features. */
    230231DECL_HIDDEN_DATA(CPUHOSTFEATURES) g_CpumHostFeatures;
    231 #endif
    232232
    233233/** Saved state field descriptors for CPUMCTX. */
     
    12111211{
    12121212    RT_NOREF(pszArgs);
    1213     PCCPUMFEATURES pHostFeatures  = &pVM->cpum.s.HostFeatures;
     1213#ifdef RT_ARCH_AMD64
     1214    PCCPUMFEATURES pHostFeatures  = &pVM->cpum.s.HostFeatures.s;
     1215#else
     1216    PCCPUMFEATURES pHostFeatures  = &pVM->cpum.s.GuestFeatures;
     1217#endif
    12141218    PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
    12151219    if (   pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL
     
    12171221        || pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_SHANGHAI)
    12181222    {
    1219 #define VMXFEATDUMP(a_szDesc, a_Var) \
     1223#ifdef RT_ARCH_AMD64
     1224# define VMXFEATDUMP(a_szDesc, a_Var) \
    12201225        pHlp->pfnPrintf(pHlp, "  %s = %u (%u)\n", a_szDesc, pGuestFeatures->a_Var, pHostFeatures->a_Var)
     1226#else
     1227# define VMXFEATDUMP(a_szDesc, a_Var) \
     1228        pHlp->pfnPrintf(pHlp, "  %s = %u\n", a_szDesc, pGuestFeatures->a_Var)
     1229#endif
    12211230
    12221231        pHlp->pfnPrintf(pHlp, "Nested hardware virtualization - VMX features\n");
     1232#ifdef RT_ARCH_AMD64
    12231233        pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest (host)\n");
     1234#else
     1235        pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest\n");
     1236#endif
    12241237        VMXFEATDUMP("VMX - Virtual-Machine Extensions                       ", fVmx);
    12251238        /* Basic. */
     
    18451858        if (!VM_IS_HM_ENABLED(pVM) && !VM_IS_EXEC_ENGINE_IEM(pVM))
    18461859            pszWhy = "execution engine is neither HM nor IEM";
     1860#ifdef RT_ARCH_AMD64
    18471861        else if (VM_IS_HM_ENABLED(pVM) && !HMIsNestedPagingActive(pVM))
    18481862            pszWhy = "nested paging is not enabled for the VM or it is not supported by the host";
    1849         else if (VM_IS_HM_ENABLED(pVM) && !pVM->cpum.s.HostFeatures.fNoExecute)
     1863        else if (VM_IS_HM_ENABLED(pVM) && !pVM->cpum.s.HostFeatures.s.fNoExecute)
    18501864            pszWhy = "NX is not available on the host";
     1865#endif
    18511866        if (pszWhy)
    18521867        {
     
    19581973     * by the hardware, hence we merge our emulated features with the host features below.
    19591974     */
    1960     PCCPUMFEATURES pBaseFeat  = cpumR3IsHwAssistNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures : &EmuFeat;
    1961     PCPUMFEATURES  pGuestFeat = &pVM->cpum.s.GuestFeatures;
     1975#ifdef RT_ARCH_AMD64
     1976    PCCPUMFEATURES const pBaseFeat  = cpumR3IsHwAssistNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures.s : &EmuFeat;
     1977#else
     1978    PCCPUMFEATURES const pBaseFeat  = &EmuFeat;
     1979#endif
     1980    PCPUMFEATURES const  pGuestFeat = &pVM->cpum.s.GuestFeatures;
    19621981    Assert(pBaseFeat->fVmx);
    19631982#define CPUMVMX_SET_GST_FEAT(a_Feat) \
     
    21862205    AssertCompileSizeAlignment(CPUMCTX, 64);
    21872206    AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
     2207#ifdef RT_ARCH_AMD64
    21882208    AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
     2209#endif
    21892210    AssertCompileMemberAlignment(VM, cpum, 64);
    21902211    AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
     
    22122233    AssertLogRelRCReturn(rc, rc);
    22132234
     2235    /* Use the host features detected by CPUMR0ModuleInit if available. */
     2236    if (pVM->cpum.s.HostFeatures.Common.enmCpuVendor != CPUMCPUVENDOR_INVALID)
     2237        g_CpumHostFeatures.s = pVM->cpum.s.HostFeatures.s;
     2238    else
     2239    {
    22142240#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    2215     /* Use the host features detected by CPUMR0ModuleInit if available. */
    2216     if (pVM->cpum.s.HostFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID)
    2217         g_CpumHostFeatures.s = pVM->cpum.s.HostFeatures;
    2218     else
    2219     {
    22202241        PCPUMCPUIDLEAF  paLeaves;
    22212242        uint32_t        cLeaves;
    2222         rc = CPUMCpuIdCollectLeavesX86(&paLeaves, &cLeaves);
     2243        rc = CPUMCpuIdCollectLeavesFromX86Host(&paLeaves, &cLeaves);
    22232244        AssertLogRelRCReturn(rc, rc);
    22242245
     
    22262247        RTMemFree(paLeaves);
    22272248        AssertLogRelRCReturn(rc, rc);
    2228     }
    2229     pVM->cpum.s.HostFeatures               = g_CpumHostFeatures.s;
    2230     pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
    22312249
    22322250#elif defined(RT_ARCH_ARM64)
     2251        CPUMARMV8IDREGS IdRegs = {0};
     2252        rc = CPUMCpuIdCollectIdRegistersFromArmV8Host(&IdRegs);
     2253        AssertLogRelRCReturn(rc, rc);
     2254
     2255        rc = cpumCpuIdExplodeFeaturesArmV8(&IdRegs, &g_CpumHostFeatures.s);
     2256        AssertLogRelRCReturn(rc, rc);
     2257
     2258#else
     2259# error port me
     2260#endif
     2261        AssertLogRelRCReturn(rc, rc);
     2262        pVM->cpum.s.HostFeatures.s = g_CpumHostFeatures.s;
     2263    }
     2264    pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.Common.enmCpuVendor; /* a bit bogus for mismatching host/guest */
     2265
     2266#if 0 /** @todo fix */
    22332267    /** @todo we shouldn't be using the x86/AMD64 CPUMFEATURES for HostFeatures,
    22342268     *        but it's too much work to fix that now.  So, instead we just set
     
    23002334     */
    23012335#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    2302     if (!pVM->cpum.s.HostFeatures.fFxSaveRstor)
     2336    if (!pVM->cpum.s.HostFeatures.s.fFxSaveRstor)
    23032337        return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support the FXSAVE/FXRSTOR instruction.");
    2304     if (!pVM->cpum.s.HostFeatures.fMmx)
     2338    if (!pVM->cpum.s.HostFeatures.s.fMmx)
    23052339        return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support MMX.");
    2306     if (!pVM->cpum.s.HostFeatures.fTsc)
     2340    if (!pVM->cpum.s.HostFeatures.s.fTsc)
    23072341        return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support RDTSC.");
    23082342#endif
     
    23142348    uint64_t fXStateHostMask = 0;
    23152349#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    2316     if (   pVM->cpum.s.HostFeatures.fXSaveRstor
    2317         && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor)
     2350    if (   pVM->cpum.s.HostFeatures.s.fXSaveRstor
     2351        && pVM->cpum.s.HostFeatures.s.fOpSysXSaveRstor)
    23182352    {
    23192353        fXStateHostMask  = fXcr0Host = ASMGetXcr0();
     
    23332367     * Initialize the host XSAVE/XRSTOR mask.
    23342368     */
    2335     uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.cbMaxExtendedState;
     2369#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
     2370    uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.s.cbMaxExtendedState;
    23362371    cbMaxXState = RT_ALIGN(cbMaxXState, 128);
    2337     AssertLogRelReturn(   pVM->cpum.s.HostFeatures.cbMaxExtendedState >= sizeof(X86FXSTATE)
    2338                        && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.abXState)
    2339                        && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.abXState)
     2372    AssertLogRelReturn(   pVM->cpum.s.HostFeatures.s.cbMaxExtendedState >= sizeof(X86FXSTATE)
     2373                       && pVM->cpum.s.HostFeatures.s.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.abXState)
     2374                       && pVM->cpum.s.HostFeatures.s.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.abXState)
    23402375                       , VERR_CPUM_IPE_2);
     2376#endif
    23412377
    23422378    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    23432379    {
    23442380        PVMCPU pVCpu = pVM->apCpusR3[i];
    2345 
     2381        RT_NOREF(pVCpu);
     2382
     2383#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    23462384        pVCpu->cpum.s.Host.fXStateMask       = fXStateHostMask;
     2385#endif
     2386#ifdef VBOX_VMM_TARGET_X86
    23472387        pVCpu->cpum.s.hNestedVmxPreemptTimer = NIL_TMTIMERHANDLE;
     2388#endif
    23482389    }
    23492390
     
    23692410    DBGFR3InfoRegisterInternalEx(pVM, "cpumhyper",        "Displays the hypervisor cpu state.",
    23702411                                 &cpumR3InfoHyper, DBGFINFO_FLAGS_ALL_EMTS);
     2412#ifdef RT_ARCH_AMD64
    23712413    DBGFR3InfoRegisterInternalEx(pVM, "cpumhost",         "Displays the host cpu state.",
    23722414                                 &cpumR3InfoHost, DBGFINFO_FLAGS_ALL_EMTS);
     2415#endif
    23732416    DBGFR3InfoRegisterInternalEx(pVM, "cpumguestinstr",   "Displays the current guest instruction.",
    23742417                                 &cpumR3InfoGuestInstr, DBGFINFO_FLAGS_ALL_EMTS);
     
    25732616
    25742617    pCtx->aXcr[0]                   = XSAVE_C_X87;
    2575     if (pVM->cpum.s.HostFeatures.cbMaxExtendedState >= RT_UOFFSETOF(X86XSAVEAREA, Hdr))
     2618#ifdef RT_ARCH_AMD64 /** @todo x86-on-ARM64: recheck this! */
     2619    if (pVM->cpum.s.HostFeatures.s.cbMaxExtendedState >= RT_UOFFSETOF(X86XSAVEAREA, Hdr))
     2620#endif
    25762621    {
    25772622        /* The entire FXSAVE state needs loading when we switch to XSAVE/XRSTOR
     
    32363281
    32373282        /* Load CPUID and explode guest features. */
    3238         rc = cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs);
     3283        rc = cpumR3LoadCpuIdX86(pVM, pSSM, uVersion, &GuestMsrs);
    32393284        if (fVmxGstFeat)
    32403285        {
     
    43804425    cpumR3InfoGuestHwvirt(pVM, pHlp, pszArgs);
    43814426    cpumR3InfoHyper(pVM, pHlp, pszArgs);
     4427#ifdef RT_ARCH_AMD64
    43824428    cpumR3InfoHost(pVM, pHlp, pszArgs);
     4429#endif
    43834430}
    43844431
     
    50485095
    50495096
     5097#ifdef RT_ARCH_AMD64
    50505098/**
    50515099 * Display the host cpu state.
     
    51105158        pCtx->FSbase, pCtx->GSbase, pCtx->efer);
    51115159}
     5160#endif /* RT_ARCH_AMD64 */
     5161
    51125162
    51135163/**
     
    53975447    LogRel(("******************** End of CPUID dump **********************\n"));
    53985448
     5449#ifdef RT_ARCH_AMD64
    53995450    /*
    54005451     * Log VT-x extended features.
     
    54035454     * to do here for SVM.
    54045455     */
    5405     if (pVM->cpum.s.HostFeatures.fVmx)
     5456    if (pVM->cpum.s.HostFeatures.s.fVmx)
    54065457    {
    54075458        LogRel(("*********************** VT-x features ***********************\n"));
     
    54105461        LogRel(("******************* End of VT-x features ********************\n"));
    54115462    }
     5463#endif
    54125464
    54135465    /*
  • trunk/src/VBox/VMM/VMMR3/CPUMDbg.cpp

    r107113 r107650  
    12761276 * @param   pVM                 The cross context VM structure.
    12771277 */
    1278 int cpumR3DbgInit(PVM pVM)
     1278DECLHIDDEN(int) cpumR3DbgInit(PVM pVM)
    12791279{
    12801280    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId-armv8.cpp

    r106061 r107650  
    9393/** Pointer to CPUID config (from CFGM). */
    9494typedef CPUMCPUIDCONFIG *PCPUMCPUIDCONFIG;
    95 
    96 
    97 /**
    98  * Explode the CPU features from the given ID registers.
    99  *
    100  * @returns VBox status code.
    101  * @param   pIdRegs             The ID registers to explode the features from.
    102  * @param   pFeatures           Where to store the features to.
    103  */
    104 static int cpumCpuIdExplodeFeatures(PCCPUMIDREGS pIdRegs, PCPUMFEATURES pFeatures)
    105 {
    106     uint64_t u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;
    107 
    108     static uint8_t s_aPaRange[] = { 32, 36, 40, 42, 44, 48, 52 };
    109     AssertLogRelMsgReturn(RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE) < RT_ELEMENTS(s_aPaRange),
    110                           ("CPUM: Invalid/Unsupported PARange value in ID_AA64MMFR0_EL1 register: %u\n",
    111                           RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)),
    112                           VERR_CPUM_IPE_1);
    113 
    114     pFeatures->cMaxPhysAddrWidth = s_aPaRange[RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)];
    115     pFeatures->fTGran4K          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN4)  != ARMV8_ID_AA64MMFR0_EL1_TGRAN4_NOT_IMPL;
    116     pFeatures->fTGran16K         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN16) != ARMV8_ID_AA64MMFR0_EL1_TGRAN16_NOT_IMPL;
    117     pFeatures->fTGran64K         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN64) != ARMV8_ID_AA64MMFR0_EL1_TGRAN64_NOT_IMPL;
    118 
    119     /* ID_AA64ISAR0_EL1 features. */
    120     u64IdReg = pIdRegs->u64RegIdAa64Isar0El1;
    121     pFeatures->fAes              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES)     >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED;
    122     pFeatures->fPmull            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES)     >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED_PMULL;
    123     pFeatures->fSha1             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA1)    >= ARMV8_ID_AA64ISAR0_EL1_SHA1_SUPPORTED;
    124     pFeatures->fSha256           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2)    >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256;
    125     pFeatures->fSha512           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2)    >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256_SHA512;
    126     pFeatures->fCrc32            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_CRC32)   >= ARMV8_ID_AA64ISAR0_EL1_CRC32_SUPPORTED;
    127     pFeatures->fLse              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_ATOMIC)  >= ARMV8_ID_AA64ISAR0_EL1_ATOMIC_SUPPORTED;
    128     pFeatures->fTme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TME)     >= ARMV8_ID_AA64ISAR0_EL1_TME_SUPPORTED;
    129     pFeatures->fRdm              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RDM)     >= ARMV8_ID_AA64ISAR0_EL1_RDM_SUPPORTED;
    130     pFeatures->fSha3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA3)    >= ARMV8_ID_AA64ISAR0_EL1_SHA3_SUPPORTED;
    131     pFeatures->fSm3              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM3)     >= ARMV8_ID_AA64ISAR0_EL1_SM3_SUPPORTED;
    132     pFeatures->fSm4              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM4)     >= ARMV8_ID_AA64ISAR0_EL1_SM4_SUPPORTED;
    133     pFeatures->fDotProd          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_DP)      >= ARMV8_ID_AA64ISAR0_EL1_DP_SUPPORTED;
    134     pFeatures->fFhm              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_FHM)     >= ARMV8_ID_AA64ISAR0_EL1_FHM_SUPPORTED;
    135     pFeatures->fFlagM            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS)      >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED;
    136     pFeatures->fFlagM2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS)      >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED_2;
    137     pFeatures->fTlbios           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB)     >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED;
    138     pFeatures->fTlbirange        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB)     >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED_RANGE;
    139     pFeatures->fRng              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RNDR)    >= ARMV8_ID_AA64ISAR0_EL1_RNDR_SUPPORTED;
    140 
    141     /* ID_AA64ISAR1_EL1 features. */
    142     u64IdReg = pIdRegs->u64RegIdAa64Isar1El1;
    143     pFeatures->fDpb              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB)     >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED;
    144     pFeatures->fDpb2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB)     >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED_2;
    145 
    146     /* PAuth using QARMA5. */
    147     pFeatures->fPacQarma5        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     != ARMV8_ID_AA64ISAR1_EL1_APA_NOT_IMPL;
    148     if (pFeatures->fPacQarma5)
    149     {
    150         pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH;
    151         pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_EPAC;
    152         pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH2;
    153         pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPAC;
    154         pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPACCOMBINE;
    155     }
    156 
    157     /* PAuth using implementation defined algorithm. */
    158     pFeatures->fPacImp           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     != ARMV8_ID_AA64ISAR1_EL1_API_NOT_IMPL;
    159     if (pFeatures->fPacQarma5)
    160     {
    161         pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH;
    162         pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_EPAC;
    163         pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH2;
    164         pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPAC;
    165         pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPACCOMBINE;
    166     }
    167 
    168     pFeatures->fJscvt            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FJCVTZS) >= ARMV8_ID_AA64ISAR1_EL1_FJCVTZS_SUPPORTED;
    169     pFeatures->fFcma             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FCMA)    >= ARMV8_ID_AA64ISAR1_EL1_FCMA_SUPPORTED;
    170     pFeatures->fLrcpc            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC)   >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED;
    171     pFeatures->fLrcpc2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC)   >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED_2;
    172     pFeatures->fFrintts          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FRINTTS) >= ARMV8_ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED;
    173     pFeatures->fSb               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SB)      >= ARMV8_ID_AA64ISAR1_EL1_SB_SUPPORTED;
    174     pFeatures->fSpecres          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SPECRES) >= ARMV8_ID_AA64ISAR1_EL1_SPECRES_SUPPORTED;
    175     pFeatures->fBf16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16)    >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_BF16;
    176     pFeatures->fEbf16            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16)    >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_EBF16;
    177     pFeatures->fDgh              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DGH)     >= ARMV8_ID_AA64ISAR1_EL1_DGH_SUPPORTED;
    178     pFeatures->fI8mm             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_I8MM)    >= ARMV8_ID_AA64ISAR1_EL1_I8MM_SUPPORTED;
    179     pFeatures->fXs               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_XS)      >= ARMV8_ID_AA64ISAR1_EL1_XS_SUPPORTED;
    180     pFeatures->fLs64             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED;
    181     pFeatures->fLs64V            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_V;
    182     pFeatures->fLs64Accdata      = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_ACCDATA;
    183 
    184     /* ID_AA64ISAR2_EL1 features. */
    185     u64IdReg = pIdRegs->u64RegIdAa64Isar2El1;
    186     pFeatures->fWfxt             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_WFXT)    >= ARMV8_ID_AA64ISAR2_EL1_WFXT_SUPPORTED;
    187     pFeatures->fRpres            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_RPRES)   >= ARMV8_ID_AA64ISAR2_EL1_RPRES_SUPPORTED;
    188 
    189     /* PAuth using QARMA3. */
    190     pFeatures->fPacQarma3        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_GPA3)    >= ARMV8_ID_AA64ISAR2_EL1_GPA3_SUPPORTED;
    191     pFeatures->fPacQarma3        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    != ARMV8_ID_AA64ISAR2_EL1_APA3_NOT_IMPL;
    192     if (pFeatures->fPacQarma5)
    193     {
    194         pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH;
    195         pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_EPAC;
    196         pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH2;
    197         pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPAC;
    198         pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPACCOMBINE;
    199     }
    200 
    201     pFeatures->fMops             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_MOPS)    >= ARMV8_ID_AA64ISAR2_EL1_MOPS_SUPPORTED;
    202     pFeatures->fHbc              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_BC)      >= ARMV8_ID_AA64ISAR2_EL1_BC_SUPPORTED;
    203     pFeatures->fConstPacField    = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_PACFRAC) >= ARMV8_ID_AA64ISAR2_EL1_PACFRAC_TRUE;
    204 
    205     /* ID_AA64PFR0_EL1 */
    206     u64IdReg = pIdRegs->u64RegIdAa64Pfr0El1;
    207     /* The FP and AdvSIMD field must have the same value. */
    208     Assert(RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) == RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD));
    209     pFeatures->fFp               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP)       != ARMV8_ID_AA64PFR0_EL1_FP_NOT_IMPL;
    210     pFeatures->fFp16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP)       == ARMV8_ID_AA64PFR0_EL1_FP_IMPL_SP_DP_HP;
    211     pFeatures->fAdvSimd          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD)  != ARMV8_ID_AA64PFR0_EL1_ADVSIMD_NOT_IMPL;
    212     pFeatures->fFp16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD)  == ARMV8_ID_AA64PFR0_EL1_ADVSIMD_IMPL_SP_DP_HP;
    213     pFeatures->fRas              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS)      >= ARMV8_ID_AA64PFR0_EL1_RAS_SUPPORTED;
    214     pFeatures->fRasV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS)      >= ARMV8_ID_AA64PFR0_EL1_RAS_V1P1;
    215     pFeatures->fSve              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SVE)      >= ARMV8_ID_AA64PFR0_EL1_SVE_SUPPORTED;
    216     pFeatures->fSecEl2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SEL2)     >= ARMV8_ID_AA64PFR0_EL1_SEL2_SUPPORTED;
    217     pFeatures->fAmuV1            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU)      >= ARMV8_ID_AA64PFR0_EL1_AMU_V1;
    218     pFeatures->fAmuV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU)      >= ARMV8_ID_AA64PFR0_EL1_AMU_V1P1;
    219     pFeatures->fDit              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_DIT)      >= ARMV8_ID_AA64PFR0_EL1_DIT_SUPPORTED;
    220     pFeatures->fRme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RME)      >= ARMV8_ID_AA64PFR0_EL1_RME_SUPPORTED;
    221     pFeatures->fCsv2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2)     >= ARMV8_ID_AA64PFR0_EL1_CSV2_SUPPORTED;
    222     pFeatures->fCsv2v3           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2)     >= ARMV8_ID_AA64PFR0_EL1_CSV2_3_SUPPORTED;
    223 
    224     /* ID_AA64PFR1_EL1 */
    225     u64IdReg = pIdRegs->u64RegIdAa64Pfr1El1;
    226     pFeatures->fBti              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_BT)       >= ARMV8_ID_AA64PFR1_EL1_BT_SUPPORTED;
    227     pFeatures->fSsbs             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS)     >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED;
    228     pFeatures->fSsbs2            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS)     >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED_MSR_MRS;
    229     pFeatures->fMte              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_INSN_ONLY;
    230     pFeatures->fMte2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL;
    231     pFeatures->fMte3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL_ASYM_TAG_FAULT_CHK;
    232     /** @todo RAS_frac, MPAM_frac, CSV2_frac. */
    233     pFeatures->fSme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME)      >= ARMV8_ID_AA64PFR1_EL1_SME_SUPPORTED;
    234     pFeatures->fSme2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME)      >= ARMV8_ID_AA64PFR1_EL1_SME_SME2;
    235     pFeatures->fRngTrap          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_RNDRTRAP) >= ARMV8_ID_AA64PFR1_EL1_RNDRTRAP_SUPPORTED;
    236     pFeatures->fNmi              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_NMI)      >= ARMV8_ID_AA64PFR1_EL1_NMI_SUPPORTED;
    237 
    238     /* ID_AA64MMFR0_EL1 */
    239     u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;
    240     pFeatures->fExs              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_EXS)     >= ARMV8_ID_AA64MMFR0_EL1_EXS_SUPPORTED;
    241     pFeatures->fFgt              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_FGT)     >= ARMV8_ID_AA64MMFR0_EL1_FGT_SUPPORTED;
    242     pFeatures->fEcv              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_ECV)     >= ARMV8_ID_AA64MMFR0_EL1_ECV_SUPPORTED;
    243 
    244     /* ID_AA64MMFR1_EL1 */
    245     u64IdReg = pIdRegs->u64RegIdAa64Mmfr1El1;
    246     pFeatures->fHafdbs           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HAFDBS)  >= ARMV8_ID_AA64MMFR1_EL1_HAFDBS_SUPPORTED;
    247     pFeatures->fVmid16           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VMIDBITS) >= ARMV8_ID_AA64MMFR1_EL1_VMIDBITS_16;
    248     pFeatures->fVhe              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VHE)     >= ARMV8_ID_AA64MMFR1_EL1_VHE_SUPPORTED;
    249     pFeatures->fHpds             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS)    >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED;
    250     pFeatures->fHpds2            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS)    >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED_2;
    251     pFeatures->fLor              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_LO)      >= ARMV8_ID_AA64MMFR1_EL1_LO_SUPPORTED;
    252     pFeatures->fPan              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED;
    253     pFeatures->fPan2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_2;
    254     pFeatures->fPan3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_3;
    255     pFeatures->fXnx              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_XNX)     >= ARMV8_ID_AA64MMFR1_EL1_XNX_SUPPORTED;
    256     pFeatures->fTwed             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TWED)    >= ARMV8_ID_AA64MMFR1_EL1_TWED_SUPPORTED;
    257     pFeatures->fEts2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_ETS)     >= ARMV8_ID_AA64MMFR1_EL1_ETS_SUPPORTED;
    258     pFeatures->fHcx              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HCX)     >= ARMV8_ID_AA64MMFR1_EL1_HCX_SUPPORTED;
    259     pFeatures->fAfp              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_AFP)     >= ARMV8_ID_AA64MMFR1_EL1_AFP_SUPPORTED;
    260     pFeatures->fNTlbpa           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_NTLBPA)  >= ARMV8_ID_AA64MMFR1_EL1_NTLBPA_INCLUDE_COHERENT_ONLY;
    261     pFeatures->fTidcp1           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TIDCP1)  >= ARMV8_ID_AA64MMFR1_EL1_TIDCP1_SUPPORTED;
    262     pFeatures->fCmow             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_CMOW)    >= ARMV8_ID_AA64MMFR1_EL1_CMOW_SUPPORTED;
    263 
    264     /* ID_AA64MMFR2_EL1 */
    265     u64IdReg = pIdRegs->u64RegIdAa64Mmfr2El1;
    266     pFeatures->fTtcnp            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CNP)     >= ARMV8_ID_AA64MMFR2_EL1_CNP_SUPPORTED;
    267     pFeatures->fUao              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_UAO)     >= ARMV8_ID_AA64MMFR2_EL1_UAO_SUPPORTED;
    268     pFeatures->fLsmaoc           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_LSM)     >= ARMV8_ID_AA64MMFR2_EL1_LSM_SUPPORTED;
    269     pFeatures->fIesb             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IESB)    >= ARMV8_ID_AA64MMFR2_EL1_IESB_SUPPORTED;
    270     pFeatures->fLva              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_VARANGE) >= ARMV8_ID_AA64MMFR2_EL1_VARANGE_52BITS_64KB_GRAN;
    271     pFeatures->fCcidx            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CCIDX)   >= ARMV8_ID_AA64MMFR2_EL1_CCIDX_64BIT;
    272     pFeatures->fNv               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV)      >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED;
    273     pFeatures->fNv2              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV)      >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED_2;
    274     pFeatures->fTtst             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_ST)      >= ARMV8_ID_AA64MMFR2_EL1_ST_SUPPORTED;
    275     pFeatures->fLse2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_AT)      >= ARMV8_ID_AA64MMFR2_EL1_AT_SUPPORTED;
    276     pFeatures->fIdst             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IDS)     >= ARMV8_ID_AA64MMFR2_EL1_IDS_EC_18H;
    277     pFeatures->fS2Fwb            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_FWB)     >= ARMV8_ID_AA64MMFR2_EL1_FWB_SUPPORTED;
    278     pFeatures->fTtl              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_TTL)     >= ARMV8_ID_AA64MMFR2_EL1_TTL_SUPPORTED;
    279     pFeatures->fEvt              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_EVT)     >= ARMV8_ID_AA64MMFR2_EL1_EVT_SUPPORTED;
    280     pFeatures->fE0Pd             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_E0PD)    >= ARMV8_ID_AA64MMFR2_EL1_E0PD_SUPPORTED;
    281 
    282     /* ID_AA64DFR0_EL1 */
    283     u64IdReg = pIdRegs->u64RegIdAa64Dfr0El1;
    284     pFeatures->fDebugV8p1        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8_VHE;
    285     pFeatures->fDebugV8p2        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p2;
    286     pFeatures->fDebugV8p4        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p4;
    287     pFeatures->fDebugV8p8        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p8;
    288     pFeatures->fPmuV3            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3;
    289     pFeatures->fPmuV3p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P1;
    290     pFeatures->fPmuV3p4          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P4;
    291     pFeatures->fPmuV3p5          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P5;
    292     pFeatures->fPmuV3p7          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P7;
    293     pFeatures->fPmuV3p8          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P8;
    294     pFeatures->fSpe              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED;
    295     pFeatures->fSpeV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P1;
    296     pFeatures->fSpeV1p2          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P2;
    297     pFeatures->fSpeV1p3          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P3;
    298     pFeatures->fDoubleLock       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK)  == ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK_SUPPORTED;
    299     pFeatures->fTrf              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEFILT)   >= ARMV8_ID_AA64DFR0_EL1_TRACEFILT_SUPPORTED;
    300     pFeatures->fTrbe             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER) >= ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER_SUPPORTED;
    301     pFeatures->fMtPmu            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_MTPMU)    == ARMV8_ID_AA64DFR0_EL1_MTPMU_SUPPORTED;
    302     pFeatures->fBrbe             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE)     >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED;
    303     pFeatures->fBrbeV1p1         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE)     >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED_V1P1;
    304     pFeatures->fHpmn0            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_HPMN0)    >= ARMV8_ID_AA64DFR0_EL1_HPMN0_SUPPORTED;
    305 
    306     return VINF_SUCCESS;
    307 }
    30895
    30996
     
    348135    /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
    349136       must consult HostFeatures when processing CPUMISAEXTCFG variables. */
    350     PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures;
     137    PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures.s;
    351138#define PASSTHRU_FEATURE(a_IdReg, enmConfig, fHostFeature, a_IdRegNm, a_IdRegValSup, a_IdRegValNotSup) \
    352139    (a_IdReg) =   ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) \
     
    588375 *       on the VM config.
    589376 */
    590 VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMIDREGS pIdRegs)
     377VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMARMV8IDREGS pIdRegs)
    591378{
    592379    /* Set the host features from the given ID registers. */
    593     int rc = cpumCpuIdExplodeFeatures(pIdRegs, &g_CpumHostFeatures.s);
     380    int rc = cpumCpuIdExplodeFeaturesArmV8(pIdRegs, &g_CpumHostFeatures.s);
    594381    AssertRCReturn(rc, rc);
    595382
    596     pVM->cpum.s.HostFeatures               = g_CpumHostFeatures.s;
    597     pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
     383    pVM->cpum.s.HostFeatures.s             = g_CpumHostFeatures.s;
     384    pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.Common.enmCpuVendor;
    598385    pVM->cpum.s.HostIdRegs                 = *pIdRegs;
    599386    pVM->cpum.s.GuestIdRegs                = *pIdRegs;
     
    631418     */
    632419    if (RT_SUCCESS(rc))
    633         rc = cpumCpuIdExplodeFeatures(pIdRegs, &pCpum->GuestFeatures);
     420        rc = cpumCpuIdExplodeFeaturesArmV8(pIdRegs, &pCpum->GuestFeatures);
    634421
    635422    /*
     
    650437 * @param   ppIdRegs            Where to store the pointer to the guest ID register struct.
    651438 */
    652 VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMIDREGS *ppIdRegs)
     439VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMARMV8IDREGS *ppIdRegs)
    653440{
    654441    AssertPtrReturn(ppIdRegs, VERR_INVALID_POINTER);
     
    668455 *
    669456 */
    670 /** Saved state field descriptors for CPUMIDREGS. */
    671 static const SSMFIELD g_aCpumIdRegsFields[] =
    672 {
    673     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Pfr0El1),
    674     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Pfr1El1),
    675     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Dfr0El1),
    676     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Dfr1El1),
    677     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Afr0El1),
    678     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Afr1El1),
    679     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Isar0El1),
    680     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Isar1El1),
    681     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Isar2El1),
    682     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Mmfr0El1),
    683     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Mmfr1El1),
    684     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Mmfr2El1),
    685     SSMFIELD_ENTRY(CPUMIDREGS, u64RegClidrEl1),
    686     SSMFIELD_ENTRY(CPUMIDREGS, u64RegCtrEl0),
    687     SSMFIELD_ENTRY(CPUMIDREGS, u64RegDczidEl0),
     457/** Saved state field descriptors for CPUMARMV8IDREGS. */
     458static const SSMFIELD g_aCpumArmV8IdRegsFields[] =
     459{
     460    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1),
     461    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1),
     462    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1),
     463    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1),
     464    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Afr0El1),
     465    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Afr1El1),
     466    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1),
     467    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1),
     468    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar2El1),
     469    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1),
     470    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1),
     471    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1),
     472    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegClidrEl1),
     473    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegCtrEl0),
     474    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegDczidEl0),
    688475    SSMFIELD_ENTRY_TERM()
    689476};
     
    701488     * Save all the CPU ID leaves.
    702489     */
    703     SSMR3PutStructEx(pSSM, &pVM->cpum.s.GuestIdRegs, sizeof(pVM->cpum.s.GuestIdRegs), 0, g_aCpumIdRegsFields, NULL);
     490    SSMR3PutStructEx(pSSM, &pVM->cpum.s.GuestIdRegs, sizeof(pVM->cpum.s.GuestIdRegs), 0, g_aCpumArmV8IdRegsFields, NULL);
    704491}
    705492
     
    714501 * @param   pGuestIdRegs        The guest ID register as loaded from the saved state.
    715502 */
    716 static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMIDREGS pGuestIdRegs)
     503static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMARMV8IDREGS pGuestIdRegs)
    717504{
    718505    /*
     
    926713
    927714/**
    928  * Loads the CPU ID leaves saved by pass 0.
     715 * Loads the CPU ID leaves saved by pass 0, ARMv8 targets.
    929716 *
    930717 * @returns VBox status code.
     
    933720 * @param   uVersion            The format version.
    934721 */
    935 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
    936 {
    937     CPUMIDREGS GuestIdRegs;
    938     int rc = SSMR3GetStructEx(pSSM, &GuestIdRegs, sizeof(GuestIdRegs), 0, g_aCpumIdRegsFields, NULL);
     722int cpumR3LoadCpuIdArmV8(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
     723{
     724    CPUMARMV8IDREGS GuestIdRegs;
     725    int rc = SSMR3GetStructEx(pSSM, &GuestIdRegs, sizeof(GuestIdRegs), 0, g_aCpumArmV8IdRegsFields, NULL);
    939726    AssertRCReturn(rc, rc);
    940727
     
    14341221    do { \
    14351222        if (fVerbose) \
    1436             pHlp->pfnPrintf(pHlp, "  %*s = %u (%u)\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag, pVM->cpum.s.HostFeatures.a_Flag); \
     1223            pHlp->pfnPrintf(pHlp, "  %*s = %u (%u)\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag, pVM->cpum.s.HostFeatures.s.a_Flag); \
    14371224        else \
    14381225            pHlp->pfnPrintf(pHlp, "  %*s = %u\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag); \
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r107570 r107650  
    13351335    /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
    13361336       must consult HostFeatures when processing CPUMISAEXTCFG variables. */
    1337     PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures;
     1337#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
     1338    PCCPUMFEATURES const pHstFeat = &pCpum->HostFeatures.s;
     1339#else
     1340    PCCPUMFEATURES const pHstFeat = &pCpum->GuestFeatures;
     1341#endif
    13381342#define PASSTHRU_FEATURE(enmConfig, fHostFeature, fConst) \
    13391343    ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) ? (fConst) : 0)
    13401344#define PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, fAndExpr, fConst) \
    13411345    ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) && (fAndExpr) ? (fConst) : 0)
     1346#define PASSTHRU_FEATURE_NOT_IEM(enmConfig, fHostFeature, fConst) \
     1347    PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, !VM_IS_EXEC_ENGINE_IEM(pVM), fConst)
    13421348#define PASSTHRU_FEATURE_TODO(enmConfig, fConst) ((enmConfig) ? (fConst) : 0)
    13431349
     
    14061412                           //| X86_CPUID_FEATURE_ECX_TPRUPDATE
    14071413                           //| X86_CPUID_FEATURE_ECX_PDCM  - not implemented yet.
    1408                            | PASSTHRU_FEATURE(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
     1414                           | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
    14091415                           //| X86_CPUID_FEATURE_ECX_DCA   - not implemented yet.
    14101416                           | PASSTHRU_FEATURE(pConfig->enmSse41, pHstFeat->fSse41, X86_CPUID_FEATURE_ECX_SSE4_1)
     
    18661872                               | X86_CPUID_STEXT_FEATURE_EBX_BMI2
    18671873                               //| X86_CPUID_STEXT_FEATURE_EBX_ERMS              RT_BIT(9)
    1868                                | PASSTHRU_FEATURE(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
     1874                               | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
    18691875                               //| X86_CPUID_STEXT_FEATURE_EBX_RTM               RT_BIT(11)
    18701876                               //| X86_CPUID_STEXT_FEATURE_EBX_PQM               RT_BIT(12)
     
    27912797    AssertLogRelRCReturn(rc, rc);
    27922798
     2799#ifdef RT_ARCH_AMD64 /** @todo next VT-x/AMD-V on non-AMD64 hosts */
    27932800    bool fQueryNestedHwvirt = false
    27942801#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2795                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
    2796                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON
     2802                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_AMD
     2803                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_HYGON
    27972804#endif
    27982805#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2799                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
    2800                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA
     2806                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
     2807                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_VIA
    28012808#endif
    28022809                           ;
     
    28232830        }
    28242831    }
     2832#endif /** @todo */
    28252833
    28262834    /*
     
    28982906    AssertLogRelRCReturn(rc, rc);
    28992907
    2900     bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.fXSaveRstor
    2901                             && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor
     2908#ifdef RT_ARCH_AMD64
     2909    bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.s.fXSaveRstor
     2910                            && pVM->cpum.s.HostFeatures.s.fOpSysXSaveRstor
    29022911                            && (  VM_IS_NEM_ENABLED(pVM)
    29032912                                ? NEMHCGetFeatures(pVM) & NEM_FEAT_F_XSAVE_XRSTOR
     
    29062915                                : fNestedPagingAndFullGuestExec);
    29072916    uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask;
     2917#else
     2918    bool const     fMayHaveXSave   = true;
     2919    uint64_t const fXStateHostMask = XSAVE_C_YMM | XSAVE_C_SSE | XSAVE_C_X87;
     2920#endif
    29082921
    29092922    /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends}
     
    32893302{
    32903303#ifdef RT_ARCH_AMD64
    3291     Assert(pVM->cpum.s.HostFeatures.fMtrr);
     3304    Assert(pVM->cpum.s.HostFeatures.s.fMtrr);
    32923305#endif
    32933306
     
    36953708#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    36963709# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
    3697     if (!pVM->cpum.s.HostFeatures. a_fFeature) \
     3710    if (!pVM->cpum.s.HostFeatures.s. a_fFeature) \
    36983711    { \
    36993712        LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when the host doesn't support it!\n")); \
     
    38753888            /* Valid for both Intel and AMD. */
    38763889            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
    3877             pVM->cpum.s.HostFeatures.fRdTscP = 1;
     3890            pVM->cpum.s.GuestFeatures.fRdTscP = 1;
    38783891            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
    38793892            break;
     
    39003913#ifdef RT_ARCH_AMD64
    39013914                if (   !pLeaf
    3902                     || !(pVM->cpum.s.HostFeatures.fIbpb || pVM->cpum.s.HostFeatures.fIbrs))
     3915                    || !(pVM->cpum.s.HostFeatures.s.fIbpb || pVM->cpum.s.HostFeatures.s.fIbrs))
    39033916                {
    39043917                    LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
     
    39183931#ifdef RT_ARCH_AMD64
    39193932                /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */
    3920                 if (pVM->cpum.s.HostFeatures.fIbrs)
     3933                if (pVM->cpum.s.HostFeatures.s.fIbrs)
    39213934#endif
    39223935                {
     
    39243937                    pVM->cpum.s.GuestFeatures.fIbrs = 1;
    39253938#ifdef RT_ARCH_AMD64
    3926                     if (pVM->cpum.s.HostFeatures.fStibp)
     3939                    if (pVM->cpum.s.HostFeatures.s.fStibp)
    39273940#endif
    39283941                    {
     
    39653978
    39663979#ifdef RT_ARCH_AMD64
    3967                 if (pVM->cpum.s.HostFeatures.fArchCap)
     3980                if (pVM->cpum.s.HostFeatures.s.fArchCap)
    39683981#endif
    39693982                {
     
    39843997
    39853998                    /* Advertise IBRS_ALL if present at this point... */
    3986                     if (pVM->cpum.s.HostFeatures.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
     3999#ifdef RT_ARCH_AMD64
     4000                    if (pVM->cpum.s.HostFeatures.s.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
     4001#endif
    39874002                        VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL);
    39884003                }
     
    49564971
    49574972/**
    4958  * Loads the CPU ID leaves saved by pass 0.
     4973 * Loads the CPU ID leaves saved by pass 0, x86 targets.
    49594974 *
    49604975 * @returns VBox status code.
     
    49644979 * @param   pMsrs               The guest MSRs.
    49654980 */
    4966 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
     4981int cpumR3LoadCpuIdX86(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
    49674982{
    49684983    AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
  • trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp

    r106630 r107650  
    901901        if (RT_FAILURE(rc))
    902902            return rc;
    903         rc = CPUMCpuIdCollectLeavesX86(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
     903        rc = CPUMCpuIdCollectLeavesFromX86Host(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
    904904        if (RT_FAILURE(rc))
    905905            return rc;
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp

    r107316 r107650  
    572572} s_aIdRegs[] =
    573573{
    574     { HV_FEATURE_REG_ID_AA64DFR0_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1)  },
    575     { HV_FEATURE_REG_ID_AA64DFR1_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1)  },
    576     { HV_FEATURE_REG_ID_AA64ISAR0_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
    577     { HV_FEATURE_REG_ID_AA64ISAR1_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
    578     { HV_FEATURE_REG_ID_AA64MMFR0_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
    579     { HV_FEATURE_REG_ID_AA64MMFR1_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
    580     { HV_FEATURE_REG_ID_AA64MMFR2_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
    581     { HV_FEATURE_REG_ID_AA64PFR0_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1)  },
    582     { HV_FEATURE_REG_ID_AA64PFR1_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1)  },
    583     { HV_FEATURE_REG_CLIDR_EL1,             RT_UOFFSETOF(CPUMIDREGS, u64RegClidrEl1)       },
    584     { HV_FEATURE_REG_CTR_EL0,               RT_UOFFSETOF(CPUMIDREGS, u64RegCtrEl0)         },
    585     { HV_FEATURE_REG_DCZID_EL0,             RT_UOFFSETOF(CPUMIDREGS, u64RegDczidEl0)       }
     574    { HV_FEATURE_REG_ID_AA64DFR0_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1)  },
     575    { HV_FEATURE_REG_ID_AA64DFR1_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1)  },
     576    { HV_FEATURE_REG_ID_AA64ISAR0_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) },
     577    { HV_FEATURE_REG_ID_AA64ISAR1_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) },
     578    { HV_FEATURE_REG_ID_AA64MMFR0_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) },
     579    { HV_FEATURE_REG_ID_AA64MMFR1_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) },
     580    { HV_FEATURE_REG_ID_AA64MMFR2_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) },
     581    { HV_FEATURE_REG_ID_AA64PFR0_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1)  },
     582    { HV_FEATURE_REG_ID_AA64PFR1_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1)  },
     583    { HV_FEATURE_REG_CLIDR_EL1,             RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegClidrEl1)       },
     584    { HV_FEATURE_REG_CTR_EL0,               RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegCtrEl0)         },
     585    { HV_FEATURE_REG_DCZID_EL0,             RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegDczidEl0)       }
    586586};
    587587
     
    14501450
    14511451        /* Query ID registers and hand them to CPUM. */
    1452         CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
     1452        CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
    14531453        for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
    14541454        {
     
    25402540        } s_aSysIdRegs[] =
    25412541        {
    2542 #define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg,     RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }
     2542#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMARMV8IDREGS, a_CpumIdReg) }
    25432543            ID_SYS_REG_CREATE(ID_AA64DFR0_EL1,  u64RegIdAa64Dfr0El1),
    25442544            ID_SYS_REG_CREATE(ID_AA64DFR1_EL1,  u64RegIdAa64Dfr1El1),
     
    25532553        };
    25542554
    2555         PCCPUMIDREGS pIdRegsGst = NULL;
     2555        PCCPUMARMV8IDREGS pIdRegsGst = NULL;
    25562556        int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
    25572557        AssertRCReturn(rc, rc);
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux-armv8.cpp

    r107308 r107650  
    292292} s_aIdRegs[] =
    293293{
    294     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1)  },
    295     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1)  },
    296     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
    297     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
    298     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
    299     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
    300     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
    301     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1)  },
    302     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1)  }
     294    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1)  },
     295    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1)  },
     296    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) },
     297    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) },
     298    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) },
     299    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) },
     300    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) },
     301    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1)  },
     302    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1)  }
    303303};
    304304
     
    480480
    481481            /* Need to query the ID registers and populate CPUM. */
    482             CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
     482            CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
    483483            for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
    484484            {
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp

    r107194 r107650  
    831831             * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
    832832             */
    833             CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
     833            CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
    834834
    835835            WHV_REGISTER_NAME  aenmNames[10];
     
    870870
    871871            /* Apply any overrides to the partition. */
    872             PCCPUMIDREGS pIdRegsGst = NULL;
     872            PCCPUMARMV8IDREGS pIdRegsGst = NULL;
    873873            rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
    874874            AssertRCReturn(rc, rc);
  • trunk/src/VBox/VMM/include/CPUMInternal-armv8.h

    r106061 r107650  
    11/* $Id$ */
    22/** @file
    3  * CPUM - Internal header file, ARMv8 variant.
     3 * CPUM - Internal header file, obsolete.
    44 */
    55
     
    3232#endif
    3333
    34 #ifndef VBOX_FOR_DTRACE_LIB
    35 # include <VBox/cdefs.h>
    36 # include <VBox/types.h>
    37 # include <VBox/vmm/stam.h>
    38 #else
    39 # pragma D depends_on library cpumctx.d
    40 # pragma D depends_on library cpum.d
    41 
    42 /* Some fudging. */
    43 typedef uint64_t STAMCOUNTER;
    44 #endif
    45 
    46 
    47 
    48 
    49 /** @defgroup grp_cpum_int   Internals
    50  * @ingroup grp_cpum
    51  * @internal
    52  * @{
    53  */
    54 
    55 /** Use flags (CPUM::fUseFlags).
    56  * @{ */
    57 /** Set to indicate that we should save host DR0-7 and load the hypervisor debug
    58  * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */
    59 #define CPUM_USE_DEBUG_REGS_HYPER       RT_BIT(0)
    60 /** Used in ring-0 to indicate that we have loaded the hypervisor debug
    61  * registers. */
    62 #define CPUM_USED_DEBUG_REGS_HYPER      RT_BIT(1)
    63 /** Used in ring-0 to indicate that we have loaded the guest debug
    64  * registers (DR0-3 and maybe DR6) for direct use by the guest.
    65  * DR7 (and AMD-V DR6) are handled via the VMCB. */
    66 #define CPUM_USED_DEBUG_REGS_GUEST      RT_BIT(2)
    67 /** @} */
    68 
    69 
    70 /** @name CPUM Saved State Version.
    71  * @{ */
    72 /** The current saved state version. */
    73 #define CPUM_SAVED_STATE_VERSION                1
    74 /** @} */
    75 
    76 
    77 /**
    78  * CPU info
    79  */
    80 typedef struct CPUMINFO
    81 {
    82     /** The number of system register ranges (CPUMSSREGRANGE) in the array pointed to below. */
    83     uint32_t                    cSysRegRanges;
    84 
    85     /** Pointer to the sysrem register ranges. */
    86     R3PTRTYPE(PCPUMSYSREGRANGE) paSysRegRangesR3;
    87 
    88     /** System register ranges. */
    89     CPUMSYSREGRANGE             aSysRegRanges[128];
    90 } CPUMINFO;
    91 /** Pointer to a CPU info structure. */
    92 typedef CPUMINFO *PCPUMINFO;
    93 /** Pointer to a const CPU info structure. */
    94 typedef CPUMINFO const *CPCPUMINFO;
    95 
    96 
    97 /**
    98  * CPUM Data (part of VM)
    99  */
    100 typedef struct CPUM
    101 {
    102     /** The (more) portable CPUID level. */
    103     uint8_t                 u8PortableCpuIdLevel;
    104     /** Indicates that a state restore is pending.
    105      * This is used to verify load order dependencies (PGM). */
    106     bool                    fPendingRestore;
    107     /** The initial exception level (EL) to start the CPU after a reset,
    108      * should be either ARMV8_AARCH64_EL_1 or ARMV8_AARCH64_EL_2 for nested virtualization. */
    109     uint8_t                 bResetEl;
    110 
    111     uint8_t                 abPadding0[5];
    112 
    113     /** The reset value of the program counter. */
    114     uint64_t                u64ResetPc;
    115 
    116     /** Align to 64-byte boundary. */
    117     uint8_t                 abPadding1[48];
    118 
    119     /** Host CPU feature information.
    120      * Externaly visible via the VM structure, aligned on 64-byte boundrary. */
    121     CPUMFEATURES            HostFeatures;
    122     /** Guest CPU feature information.
    123      * Externaly visible via that VM structure, aligned with HostFeatures. */
    124     CPUMFEATURES            GuestFeatures;
    125     /** Guest CPU info. */
    126     CPUMINFO                GuestInfo;
    127     /** Host CPU ID registers. */
    128     CPUMIDREGS              HostIdRegs;
    129     /** Guest CPU ID registers. */
    130     CPUMIDREGS              GuestIdRegs;
    131 
    132     /** @name System register statistics.
    133      * @{ */
    134     STAMCOUNTER             cSysRegWrites;
    135     STAMCOUNTER             cSysRegWritesToIgnoredBits;
    136     STAMCOUNTER             cSysRegWritesRaiseExcp;
    137     STAMCOUNTER             cSysRegWritesUnknown;
    138     STAMCOUNTER             cSysRegReads;
    139     STAMCOUNTER             cSysRegReadsRaiseExcp;
    140     STAMCOUNTER             cSysRegReadsUnknown;
    141     /** @} */
    142 } CPUM;
    143 #ifndef VBOX_FOR_DTRACE_LIB
    144 AssertCompileMemberOffset(CPUM, HostFeatures, 64);
    145 AssertCompileMemberOffset(CPUM, GuestFeatures, 112);
    146 #endif
    147 /** Pointer to the CPUM instance data residing in the shared VM structure. */
    148 typedef CPUM *PCPUM;
    149 
    150 /**
    151  * CPUM Data (part of VMCPU)
    152  */
    153 typedef struct CPUMCPU
    154 {
    155     /** Guest context.
    156      * Aligned on a 64-byte boundary. */
    157     CPUMCTX                 Guest;
    158 
    159     /** Use flags.
    160      * These flags indicates both what is to be used and what has been used. */
    161     uint32_t                fUseFlags;
    162 
    163     /** Changed flags.
    164      * These flags indicates to REM (and others) which important guest
    165      * registers which has been changed since last time the flags were cleared.
    166      * See the CPUM_CHANGED_* defines for what we keep track of.
    167      *
    168      * @todo Obsolete, but will probably be refactored so keep it for reference. */
    169     uint32_t                fChanged;
    170 } CPUMCPU;
    171 #ifndef VBOX_FOR_DTRACE_LIB
    172 /** @todo Compile time size/alignment assertions. */
    173 #endif
    174 /** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
    175 typedef CPUMCPU *PCPUMCPU;
    176 
    177 #ifndef VBOX_FOR_DTRACE_LIB
    178 RT_C_DECLS_BEGIN
    179 
    180 # ifdef IN_RING3
    181 DECLHIDDEN(int)       cpumR3DbgInit(PVM pVM);
    182 DECLHIDDEN(int)       cpumR3SysRegStrictInitChecks(void);
    183 
    184 void                  cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM);
    185 int                   cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
    186 
    187 DECLCALLBACK(void)    cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    188 DECLCALLBACK(void)    cpumR3CpuFeatInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    189 
    190 # endif
    191 
    192 RT_C_DECLS_END
    193 #endif /* !VBOX_FOR_DTRACE_LIB */
    194 
    195 /** @} */
     34#include "CPUMInternal.h"
    19635
    19736#endif /* !VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h */
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r107570 r107650  
    6060 * @note Was part of saved state (6.1 and earlier).
    6161 * @{ */
     62#if defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
     63
    6264/** Indicates that we've saved the host FPU, SSE, whatever state and that it
    6365 * needs to be restored. */
     
    100102/** Set if the VM supports long-mode. */
    101103#define CPUM_USE_SUPPORTS_LONGMODE      RT_BIT(20)
     104
     105#endif
    102106/** @} */
    103107
     
    105109/** @name CPUM Saved State Version.
    106110 * @{ */
     111
    107112/** The current saved state version.
    108  *  @todo When bumping to next version, add CPUMCTX::enmHwVirt and
     113 *  @todo AMD64:When bumping to next version, add CPUMCTX::enmHwVirt and
    109114 *        uMicrocodeRevision to the saved state. */
    110 #define CPUM_SAVED_STATE_VERSION                CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4
     115#if defined(VBOX_VMM_TARGET_X86)
     116# define CPUM_SAVED_STATE_VERSION               CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4
     117#elif defined(VBOX_VMM_TARGET_ARMV8)
     118# define CPUM_SAVED_STATE_VERSION               CPUM_SAVED_STATE_VERSION_ARMV8_V1
     119#endif
     120
     121#if defined(VBOX_VMM_TARGET_X86)
    111122/** The saved state version with u32RestoreProcCtls2 for Nested Microsoft
    112123 *  Hyper-V. */
    113 #define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4   23
     124# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4  23
    114125/** The saved state version with more virtual VMCS fields (HLAT prefix size,
    115126 *  PCONFIG-exiting bitmap, HLAT ptr, VM-exit ctls2) and a CPUMCTX field (VM-exit
    116127 *  ctls2 MSR). */
    117 #define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3   22
     128# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3  22
    118129/** The saved state version with PAE PDPEs added. */
    119 #define CPUM_SAVED_STATE_VERSION_PAE_PDPES      21
     130# define CPUM_SAVED_STATE_VERSION_PAE_PDPES     21
    120131/** The saved state version with more virtual VMCS fields and CPUMCTX VMX fields. */
    121 #define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2   20
     132# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2  20
    122133/** The saved state version including VMX hardware virtualization state. */
    123 #define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX     19
     134# define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX    19
    124135/** The saved state version including SVM hardware virtualization state. */
    125 #define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM     18
     136# define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM    18
    126137/** The saved state version including XSAVE state. */
    127 #define CPUM_SAVED_STATE_VERSION_XSAVE          17
     138# define CPUM_SAVED_STATE_VERSION_XSAVE         17
    128139/** The saved state version with good CPUID leaf count. */
    129 #define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16
     140# define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16
    130141/** CPUID changes with explode forgetting to update the leaf count on
    131142 * restore, resulting in garbage being saved restoring+saving old states). */
    132 #define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15
     143# define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15
    133144/** The saved state version before the CPUIDs changes. */
    134 #define CPUM_SAVED_STATE_VERSION_PUT_STRUCT     14
     145# define CPUM_SAVED_STATE_VERSION_PUT_STRUCT    14
    135146/** The saved state version before using SSMR3PutStruct. */
    136 #define CPUM_SAVED_STATE_VERSION_MEM            13
     147# define CPUM_SAVED_STATE_VERSION_MEM           13
    137148/** The saved state version before introducing the MSR size field. */
    138 #define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE    12
     149# define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE   12
    139150/** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden
    140151 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */
    141 #define CPUM_SAVED_STATE_VERSION_VER3_2         11
     152# define CPUM_SAVED_STATE_VERSION_VER3_2        11
    142153/** The saved state version of 3.0 and 3.1 trunk before the teleportation
    143154 * changes. */
    144 #define CPUM_SAVED_STATE_VERSION_VER3_0         10
     155# define CPUM_SAVED_STATE_VERSION_VER3_0        10
    145156/** The saved state version for the 2.1 trunk before the MSR changes. */
    146 #define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR   9
     157# define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR  9
    147158/** The saved state version of 2.0, used for backwards compatibility. */
    148 #define CPUM_SAVED_STATE_VERSION_VER2_0         8
     159# define CPUM_SAVED_STATE_VERSION_VER2_0        8
    149160/** The saved state version of 1.6, used for backwards compatibility. */
    150 #define CPUM_SAVED_STATE_VERSION_VER1_6         6
     161# define CPUM_SAVED_STATE_VERSION_VER1_6        6
     162#endif
     163
     164#if defined(VBOX_VMM_TARGET_ARMV8)
     165/** The initial ARMv8 saved state. */
     166# define CPUM_SAVED_STATE_VERSION_ARMV8_V1      1
     167#endif
    151168/** @} */
    152169
    153170
    154 /** @name XSAVE limits.
     171#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
     172/** @name AMD64: XSAVE limits.
    155173 * @{ */
    156174/** Max size we accept for the XSAVE area.
     
    160178#define CPUM_MIN_XSAVE_AREA_SIZE    0x240
    161179/** @} */
    162 
     180#endif
    163181
    164182/**
     
    167185typedef struct CPUMINFO
    168186{
     187#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
    169188    /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
    170189    uint32_t                    cMsrRanges;
     
    207226     *       allocation.  The insanity is mainly for more recent AMD CPUs. */
    208227    CPUMMSRRANGE                aMsrRanges[8192];
     228
     229#elif defined(VBOX_VMM_TARGET_ARMV8)
     230    /** The number of system register ranges (CPUMSSREGRANGE) in the array pointed to below. */
     231    uint32_t                    cSysRegRanges;
     232    uint32_t                    uPadding;
     233
     234    /** Pointer to the sysrem register ranges. */
     235    R3PTRTYPE(PCPUMSYSREGRANGE) paSysRegRangesR3;
     236
     237    /** System register ranges. */
     238    CPUMSYSREGRANGE             aSysRegRanges[128];
     239#else
     240# error "port me"
     241#endif
    209242} CPUMINFO;
    210243/** Pointer to a CPU info structure. */
     
    214247
    215248
     249#ifdef RT_ARCH_AMD64
    216250/**
    217251 * The saved host CPU state.
     
    317351    /* padding to get 64byte aligned size */
    318352    uint8_t         auPadding[24];
    319 #if HC_ARCH_BITS != 64
    320 # error HC_ARCH_BITS not defined or unsupported
    321 #endif
     353# if HC_ARCH_BITS != 64
     354#  error HC_ARCH_BITS not defined or unsupported
     355# endif
    322356} CPUMHOSTCTX;
    323 #ifndef VBOX_FOR_DTRACE_LIB
     357# ifndef VBOX_FOR_DTRACE_LIB
    324358AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
    325 #endif
     359# endif
    326360/** Pointer to the saved host CPU state. */
    327361typedef CPUMHOSTCTX *PCPUMHOSTCTX;
    328 
    329 
     362#endif /* RT_ARCH_AMD64 */
     363
     364
     365#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
    330366/**
    331367 * The hypervisor context CPU state (just DRx left now).
     
    344380    uint64_t        au64Padding[7];
    345381} CPUMHYPERCTX;
    346 #ifndef VBOX_FOR_DTRACE_LIB
     382# ifndef VBOX_FOR_DTRACE_LIB
    347383AssertCompileSizeAlignment(CPUMHYPERCTX, 64);
    348 #endif
     384# endif
    349385/** Pointer to the hypervisor context CPU state. */
    350386typedef CPUMHYPERCTX *PCPUMHYPERCTX;
     387#endif
    351388
    352389
     
    356393typedef struct CPUM
    357394{
    358     /** Use flags.
    359      * These flags indicates which CPU features the host uses.
    360      */
    361     uint32_t                fHostUseFlags;
     395    /** Guest CPU feature information.
     396     * Externaly visible via that VM structure, aligned with HostFeatures. */
     397    CPUMFEATURES            GuestFeatures;
     398    /** Host CPU feature information.
     399     * Externaly visible via the VM structure, aligned on 64-byte boundrary. */
     400    CPUHOSTFEATURES         HostFeatures;
    362401
    363402    /** The (more) portable CPUID level. */
     
    366405     * This is used to verify load order dependencies (PGM). */
    367406    bool                    fPendingRestore;
     407
     408#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
    368409    /** Whether MTRR reads report valid memory types for memory regions. */
    369410    bool                    fMtrrRead;
    370411    /** Whether the guest's writes to MTRRs are implemented. */
    371412    bool                    fMtrrWrite;
     413    /** Use flags.
     414     * These flags indicates which CPU features the host uses.
     415     */
     416    uint32_t                fHostUseFlags;
    372417
    373418    /** XSAVE/XRTOR components we can expose to the guest mask. */
     
    377422    uint64_t                fXStateHostMask;
    378423
    379 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    380     /** The host MXCSR mask (determined at init). */
    381     uint32_t                fHostMxCsrMask;
    382 #else
    383     uint32_t                u32UnusedOnNonX86;
    384 #endif
    385     uint8_t                 abPadding1[4];
    386 
    387424    /** Random value we store in the reserved RFLAGS bits we don't use ourselves so
    388425     *  we can detect corruption. */
    389426    uint64_t                fReservedRFlagsCookie;
    390427
    391     /** Align to 64-byte boundary. */
    392     uint8_t                 abPadding2[16+8];
    393 
    394     /** Host CPU feature information.
    395      * Externaly visible via the VM structure, aligned on 64-byte boundrary. */
    396     CPUMFEATURES            HostFeatures;
    397     /** Guest CPU feature information.
    398      * Externaly visible via that VM structure, aligned with HostFeatures. */
    399     CPUMFEATURES            GuestFeatures;
    400428    /** Guest CPU info. */
    401429    CPUMINFO                GuestInfo;
     
    418446    STAMCOUNTER             cMsrReadsUnknown;
    419447    /** @} */
     448
     449#elif defined(VBOX_VMM_TARGET_ARMV8)
     450    /** The initial exception level (EL) to start the CPU after a reset,
     451     * should be either ARMV8_AARCH64_EL_1 or ARMV8_AARCH64_EL_2 for nested virtualization. */
     452    uint8_t                 bResetEl;
     453    uint8_t                 abPadding0[5];
     454
     455    /** The reset value of the program counter. */
     456    uint64_t                u64ResetPc;
     457
     458    /** Guest CPU info. */
     459    CPUMINFO                GuestInfo;
     460    /** Guest CPU ID registers. */
     461    CPUMARMV8IDREGS         GuestIdRegs;
     462
     463    /** @name System register statistics.
     464     * @{ */
     465    STAMCOUNTER             cSysRegWrites;
     466    STAMCOUNTER             cSysRegWritesToIgnoredBits;
     467    STAMCOUNTER             cSysRegWritesRaiseExcp;
     468    STAMCOUNTER             cSysRegWritesUnknown;
     469    STAMCOUNTER             cSysRegReads;
     470    STAMCOUNTER             cSysRegReadsRaiseExcp;
     471    STAMCOUNTER             cSysRegReadsUnknown;
     472    /** @} */
     473#endif
     474
     475#ifdef RT_ARCH_ARM64
     476    /** Host CPU ID registers. */
     477    CPUMARMV8IDREGS         HostIdRegs;
     478
     479#elif defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
     480    /** The host MXCSR mask (determined at init). */
     481    uint32_t                fHostMxCsrMask;
     482#endif
    420483} CPUM;
    421484#ifndef VBOX_FOR_DTRACE_LIB
     485AssertCompileMemberOffset(CPUM, GuestFeatures, 0);
    422486AssertCompileMemberOffset(CPUM, HostFeatures, 64);
    423 AssertCompileMemberOffset(CPUM, GuestFeatures, 112);
     487AssertCompileMemberOffset(CPUM, u8PortableCpuIdLevel, 128);
    424488#endif
    425489/** Pointer to the CPUM instance data residing in the shared VM structure. */
     
    434498     * Aligned on a 64-byte boundary. */
    435499    CPUMCTX                 Guest;
     500#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
    436501    /** Guest context - misc MSRs
    437502     * Aligned on a 64-byte boundary. */
    438503    CPUMCTXMSRS             GuestMsrs;
    439 
    440     /** Nested VMX: VMX-preemption timer. */
    441     TMTIMERHANDLE           hNestedVmxPreemptTimer;
     504#endif
     505#ifdef RT_ARCH_AMD64
     506    /** Saved host context.  Only valid while inside RC or HM contexts.
     507     * Must be aligned on a 64-byte boundary. */
     508    CPUMHOSTCTX             Host;
     509#endif
    442510
    443511    /** Use flags.
     
    453521    uint32_t                fChanged;
    454522
    455     /** Temporary storage for the return code of the function called in the
    456      * 32-64 switcher. */
    457     uint32_t                u32RetCode;
    458 
     523#if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
     524    /** Nested VMX: VMX-preemption timer. */
     525    TMTIMERHANDLE           hNestedVmxPreemptTimer;
    459526    /** Whether the X86_CPUID_FEATURE_EDX_APIC and X86_CPUID_AMD_FEATURE_EDX_APIC
    460527     *  (?) bits are visible or not.  (The APIC is responsible for setting this
    461528     *  when loading state, so we won't save it.) */
    462529    bool                    fCpuIdApicFeatureVisible;
    463 
    464     /** Align the next member on a 64-byte boundary. */
    465     uint8_t                 abPadding2[64 - 8 - 4*3 - 1];
    466 
    467     /** Saved host context.  Only valid while inside RC or HM contexts.
    468      * Must be aligned on a 64-byte boundary. */
    469     CPUMHOSTCTX             Host;
     530    uint8_t                 abPadding[7];
     531
    470532    /** Old hypervisor context, only used for combined DRx values now.
    471533     * Must be aligned on a 64-byte boundary. */
    472534    CPUMHYPERCTX            Hyper;
     535#endif
    473536
    474537#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    478541} CPUMCPU;
    479542#ifndef VBOX_FOR_DTRACE_LIB
     543# ifdef RT_ARCH_AMD64
    480544AssertCompileMemberAlignment(CPUMCPU, Host, 64);
     545# endif
    481546#endif
    482547/** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */
     
    486551RT_C_DECLS_BEGIN
    487552
     553# if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
    488554PCPUMCPUIDLEAF      cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf);
    489555PCPUMCPUIDLEAF      cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit);
     556# endif
     557# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86)
    490558PCPUMCPUIDLEAF      cpumCpuIdGetLeafInt(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf);
    491559PCPUMCPUIDLEAF      cpumCpuIdEnsureSpace(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves);
    492 # ifdef VBOX_STRICT
     560#  ifdef VBOX_STRICT
    493561void                cpumCpuIdAssertOrder(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves);
    494 # endif
     562#  endif
    495563int                 cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs,
    496                                                 PCPUMFEATURES pFeatures);
     564                                                CPUMFEATURESX86 *pFeatures);
     565# endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */
     566# if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8)
     567int                 cpumCpuIdExplodeFeaturesArmV8(PCCPUMARMV8IDREGS pIdRegs, CPUMFEATURESARMV8 *pFeatures);
     568# endif
    497569
    498570# ifdef IN_RING3
    499 int                 cpumR3DbgInit(PVM pVM);
     571DECLHIDDEN(int)     cpumR3DbgInit(PVM pVM);
     572#  if defined(VBOX_VMM_TARGET_ARMV8)
     573DECLHIDDEN(int)     cpumR3SysRegStrictInitChecks(void);
     574#  elif defined(VBOX_VMM_TARGET_X86)
    500575int                 cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs);
    501576void                cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCFGMNODE pCpumCfg, PCVMXMSRS pHostVmxMsrs,
    502577                                                      PVMXMSRS pGuestVmxMsrs);
    503578void                cpumR3CpuIdRing3InitDone(PVM pVM);
     579#  endif
    504580void                cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM);
    505 int                 cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs);
     581#  ifdef VBOX_VMM_TARGET_X86
     582int                 cpumR3LoadCpuIdX86(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs);
    506583int                 cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
     584#  elif defined(VBOX_VMM_TARGET_ARMV8)
     585int                 cpumR3LoadCpuIdArmV8(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion);
     586DECLCALLBACK(void)  cpumR3CpuFeatInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     587#  endif
    507588DECLCALLBACK(void)  cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    508589
    509590int                 cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
     591#  ifdef VBOX_VMM_TARGET_X86
    510592int                 cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
    511593int                 cpumR3MsrReconcileWithCpuId(PVM pVM);
     
    514596int                 cpumR3MsrStrictInitChecks(void);
    515597PCPUMMSRRANGE       cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
    516 # endif
    517 
    518 # ifdef IN_RC
    519 DECLASM(int)        cpumHandleLazyFPUAsm(PCPUMCPU pCPUM);
    520 # endif
     598#  endif
     599# endif /* IN_RING3 */
    521600
    522601# ifdef IN_RING0
     602#  if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
    523603DECLASM(int)        cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM);
    524604DECLASM(void)       cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);
    525 if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
     605 if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    526606DECLASM(void)       cpumR0RestoreHostFPUState(PCPUMCPU pCPUM);
     607#   endif
    527608#  endif
    528609# endif
    529610
    530611# if defined(IN_RC) || defined(IN_RING0)
     612#  if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
    531613DECLASM(int)        cpumRZSaveHostFPUState(PCPUMCPU pCPUM);
    532614DECLASM(void)       cpumRZSaveGuestFpuState(PCPUMCPU pCPUM, bool fLeaveFpuAccessible);
    533615DECLASM(void)       cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM);
    534616DECLASM(void)       cpumRZSaveGuestAvxRegisters(PCPUMCPU pCPUM);
     617#  endif
    535618# endif
    536619
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r107639 r107650  
    8080
    8181struc CPUM
    82     ;...
    83     .fHostUseFlags              resd    1
     82    .GuestFeatures              resb    64
     83    .HostFeatures               resb    64
    8484
    8585    .u8PortableCpuIdLevel       resb    1
     
    8888    .fMtrrWrite                 resb    1
    8989
     90    .fHostUseFlags              resd    1
     91
    9092    alignb 8
    9193    .fXStateGuestMask           resq    1
    9294    .fXStateHostMask            resq    1
    93 
    94     alignb 64
    95     .HostFeatures               resb    48
    96     .GuestFeatures              resb    48
     95    .fReservedRFlagsCookie      resq    1
     96
     97    alignb 8
    9798    .GuestInfo                  resb    CPUMINFO_size
    9899
     
    110111    .cMsrReadsRaiseGp           resq    1
    111112    .cMsrReadsUnknown           resq    1
     113
     114    .fHostMxCsrMask             resd    1
     115     alignb 8
    112116endstruc
    113117
     
    289293
    290294    ;
    291     ; Other stuff.
    292     ;
    293     .hNestedVmxPreemptTimer     resq    1
    294 
    295     .fUseFlags                  resd    1
    296     .fChanged                   resd    1
    297     .u32RetCode                 resd    1
    298     .fCpuIdApicFeatureVisible   resb    1
    299 
    300     ;
    301295    ; Host context state
    302296    ;
     
    368362    .Host.xcr0                  resq    1
    369363    .Host.fXStateMask           resq    1
     364    alignb 64
     365
     366    ;
     367    ; Other stuff.
     368    ;
     369    .fUseFlags                  resd    1
     370    .fChanged                   resd    1
     371    alignb 8
     372    .hNestedVmxPreemptTimer     resq    1
     373    .fCpuIdApicFeatureVisible   resb    1
    370374
    371375    ;
    372376    ; Hypervisor Context.
    373377    ;
    374     alignb 64
     378    alignb 8
    375379    .Hyper                      resq    0
    376380    .Hyper.dr                   resq    8
    377381    .Hyper.cr3                  resq    1
    378     alignb 64
     382    .Hyper.au64Padding          resq    7
    379383
    380384%ifdef VBOX_WITH_CRASHDUMP_MAGIC
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r107220 r107650  
    5757    GEN_CHECK_OFF(CPUMCPU, fUseFlags);
    5858    GEN_CHECK_OFF(CPUMCPU, fChanged);
    59     GEN_CHECK_OFF(CPUMCPU, u32RetCode);
    6059    GEN_CHECK_OFF(CPUMCPU, fCpuIdApicFeatureVisible);
    6160
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r107178 r107650  
    283283#endif
    284284    CHECK_SIZE_ALIGNMENT(CPUMCTX, 64);
     285#ifdef RT_ARCH_AMD64
    285286    CHECK_SIZE_ALIGNMENT(CPUMHOSTCTX, 64);
     287#endif
    286288    CHECK_SIZE_ALIGNMENT(CPUMCTXMSRS, 64);
    287289
  • trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp

    r106061 r107650  
    45334533    PCPUMCPUIDLEAF  paLeaves;
    45344534    uint32_t        cLeaves;
    4535     int rc = CPUMCpuIdCollectLeavesX86(&paLeaves, &cLeaves);
     4535    int rc = CPUMCpuIdCollectLeavesFromX86Host(&paLeaves, &cLeaves);
    45364536    if (RT_FAILURE(rc))
    45374537        return RTMsgErrorRc(rc, "CPUMR3CollectCpuIdInfo failed: %Rrc\n", rc);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette