VirtualBox

Changeset 107650 in vbox for trunk/src/VBox/VMM/VMMR3


Ignore:
Timestamp:
Jan 10, 2025 1:42:28 PM (4 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
166740
Message:

VMM/CPUM,++: Made the HostFeatures match the host when targeting x86 guests on arm64 hosts. Merged and deduplicated code targeting x86 & amd64. jiraref:VBP-1470

Location:
trunk/src/VBox/VMM/VMMR3
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/CPUM-armv8.cpp

    r107032 r107650  
    706706
    707707    /* Load CPUID and explode guest features. */
    708     return cpumR3LoadCpuId(pVM, pSSM, uVersion);
     708    return cpumR3LoadCpuIdArmV8(pVM, pSSM, uVersion);
    709709}
    710710
     
    11001100}
    11011101
    1102 
     1102#if 0 /* nobody is are using these atm, they are for AMD64/darwin only */
    11031103/**
    11041104 * Marks the guest debug state as active.
     
    11311131    ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER);
    11321132}
     1133#endif
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r107220 r107650  
    220220static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    221221static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     222#ifdef RT_ARCH_AMD64
    222223static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
     224#endif
    223225
    224226
     
    226228*   Global Variables                                                                                                             *
    227229*********************************************************************************************************************************/
    228 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    229230/** Host CPU features. */
    230231DECL_HIDDEN_DATA(CPUHOSTFEATURES) g_CpumHostFeatures;
    231 #endif
    232232
    233233/** Saved state field descriptors for CPUMCTX. */
     
    12111211{
    12121212    RT_NOREF(pszArgs);
    1213     PCCPUMFEATURES pHostFeatures  = &pVM->cpum.s.HostFeatures;
     1213#ifdef RT_ARCH_AMD64
     1214    PCCPUMFEATURES pHostFeatures  = &pVM->cpum.s.HostFeatures.s;
     1215#else
     1216    PCCPUMFEATURES pHostFeatures  = &pVM->cpum.s.GuestFeatures;
     1217#endif
    12141218    PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures;
    12151219    if (   pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL
     
    12171221        || pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_SHANGHAI)
    12181222    {
    1219 #define VMXFEATDUMP(a_szDesc, a_Var) \
     1223#ifdef RT_ARCH_AMD64
     1224# define VMXFEATDUMP(a_szDesc, a_Var) \
    12201225        pHlp->pfnPrintf(pHlp, "  %s = %u (%u)\n", a_szDesc, pGuestFeatures->a_Var, pHostFeatures->a_Var)
     1226#else
     1227# define VMXFEATDUMP(a_szDesc, a_Var) \
     1228        pHlp->pfnPrintf(pHlp, "  %s = %u\n", a_szDesc, pGuestFeatures->a_Var)
     1229#endif
    12211230
    12221231        pHlp->pfnPrintf(pHlp, "Nested hardware virtualization - VMX features\n");
     1232#ifdef RT_ARCH_AMD64
    12231233        pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest (host)\n");
     1234#else
     1235        pHlp->pfnPrintf(pHlp, "  Mnemonic - Description                                  = guest\n");
     1236#endif
    12241237        VMXFEATDUMP("VMX - Virtual-Machine Extensions                       ", fVmx);
    12251238        /* Basic. */
     
    18451858        if (!VM_IS_HM_ENABLED(pVM) && !VM_IS_EXEC_ENGINE_IEM(pVM))
    18461859            pszWhy = "execution engine is neither HM nor IEM";
     1860#ifdef RT_ARCH_AMD64
    18471861        else if (VM_IS_HM_ENABLED(pVM) && !HMIsNestedPagingActive(pVM))
    18481862            pszWhy = "nested paging is not enabled for the VM or it is not supported by the host";
    1849         else if (VM_IS_HM_ENABLED(pVM) && !pVM->cpum.s.HostFeatures.fNoExecute)
     1863        else if (VM_IS_HM_ENABLED(pVM) && !pVM->cpum.s.HostFeatures.s.fNoExecute)
    18501864            pszWhy = "NX is not available on the host";
     1865#endif
    18511866        if (pszWhy)
    18521867        {
     
    19581973     * by the hardware, hence we merge our emulated features with the host features below.
    19591974     */
    1960     PCCPUMFEATURES pBaseFeat  = cpumR3IsHwAssistNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures : &EmuFeat;
    1961     PCPUMFEATURES  pGuestFeat = &pVM->cpum.s.GuestFeatures;
     1975#ifdef RT_ARCH_AMD64
     1976    PCCPUMFEATURES const pBaseFeat  = cpumR3IsHwAssistNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures.s : &EmuFeat;
     1977#else
     1978    PCCPUMFEATURES const pBaseFeat  = &EmuFeat;
     1979#endif
     1980    PCPUMFEATURES const  pGuestFeat = &pVM->cpum.s.GuestFeatures;
    19621981    Assert(pBaseFeat->fVmx);
    19631982#define CPUMVMX_SET_GST_FEAT(a_Feat) \
     
    21862205    AssertCompileSizeAlignment(CPUMCTX, 64);
    21872206    AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
     2207#ifdef RT_ARCH_AMD64
    21882208    AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
     2209#endif
    21892210    AssertCompileMemberAlignment(VM, cpum, 64);
    21902211    AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
     
    22122233    AssertLogRelRCReturn(rc, rc);
    22132234
     2235    /* Use the host features detected by CPUMR0ModuleInit if available. */
     2236    if (pVM->cpum.s.HostFeatures.Common.enmCpuVendor != CPUMCPUVENDOR_INVALID)
     2237        g_CpumHostFeatures.s = pVM->cpum.s.HostFeatures.s;
     2238    else
     2239    {
    22142240#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    2215     /* Use the host features detected by CPUMR0ModuleInit if available. */
    2216     if (pVM->cpum.s.HostFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID)
    2217         g_CpumHostFeatures.s = pVM->cpum.s.HostFeatures;
    2218     else
    2219     {
    22202241        PCPUMCPUIDLEAF  paLeaves;
    22212242        uint32_t        cLeaves;
    2222         rc = CPUMCpuIdCollectLeavesX86(&paLeaves, &cLeaves);
     2243        rc = CPUMCpuIdCollectLeavesFromX86Host(&paLeaves, &cLeaves);
    22232244        AssertLogRelRCReturn(rc, rc);
    22242245
     
    22262247        RTMemFree(paLeaves);
    22272248        AssertLogRelRCReturn(rc, rc);
    2228     }
    2229     pVM->cpum.s.HostFeatures               = g_CpumHostFeatures.s;
    2230     pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
    22312249
    22322250#elif defined(RT_ARCH_ARM64)
     2251        CPUMARMV8IDREGS IdRegs = {0};
     2252        rc = CPUMCpuIdCollectIdRegistersFromArmV8Host(&IdRegs);
     2253        AssertLogRelRCReturn(rc, rc);
     2254
     2255        rc = cpumCpuIdExplodeFeaturesArmV8(&IdRegs, &g_CpumHostFeatures.s);
     2256        AssertLogRelRCReturn(rc, rc);
     2257
     2258#else
     2259# error port me
     2260#endif
     2261        AssertLogRelRCReturn(rc, rc);
     2262        pVM->cpum.s.HostFeatures.s = g_CpumHostFeatures.s;
     2263    }
     2264    pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.Common.enmCpuVendor; /* a bit bogus for mismatching host/guest */
     2265
     2266#if 0 /** @todo fix */
    22332267    /** @todo we shouldn't be using the x86/AMD64 CPUMFEATURES for HostFeatures,
    22342268     *        but it's too much work to fix that now.  So, instead we just set
     
    23002334     */
    23012335#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    2302     if (!pVM->cpum.s.HostFeatures.fFxSaveRstor)
     2336    if (!pVM->cpum.s.HostFeatures.s.fFxSaveRstor)
    23032337        return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support the FXSAVE/FXRSTOR instruction.");
    2304     if (!pVM->cpum.s.HostFeatures.fMmx)
     2338    if (!pVM->cpum.s.HostFeatures.s.fMmx)
    23052339        return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support MMX.");
    2306     if (!pVM->cpum.s.HostFeatures.fTsc)
     2340    if (!pVM->cpum.s.HostFeatures.s.fTsc)
    23072341        return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support RDTSC.");
    23082342#endif
     
    23142348    uint64_t fXStateHostMask = 0;
    23152349#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    2316     if (   pVM->cpum.s.HostFeatures.fXSaveRstor
    2317         && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor)
     2350    if (   pVM->cpum.s.HostFeatures.s.fXSaveRstor
     2351        && pVM->cpum.s.HostFeatures.s.fOpSysXSaveRstor)
    23182352    {
    23192353        fXStateHostMask  = fXcr0Host = ASMGetXcr0();
     
    23332367     * Initialize the host XSAVE/XRSTOR mask.
    23342368     */
    2335     uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.cbMaxExtendedState;
     2369#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
     2370    uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.s.cbMaxExtendedState;
    23362371    cbMaxXState = RT_ALIGN(cbMaxXState, 128);
    2337     AssertLogRelReturn(   pVM->cpum.s.HostFeatures.cbMaxExtendedState >= sizeof(X86FXSTATE)
    2338                        && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.abXState)
    2339                        && pVM->cpum.s.HostFeatures.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.abXState)
     2372    AssertLogRelReturn(   pVM->cpum.s.HostFeatures.s.cbMaxExtendedState >= sizeof(X86FXSTATE)
     2373                       && pVM->cpum.s.HostFeatures.s.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.abXState)
     2374                       && pVM->cpum.s.HostFeatures.s.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.abXState)
    23402375                       , VERR_CPUM_IPE_2);
     2376#endif
    23412377
    23422378    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    23432379    {
    23442380        PVMCPU pVCpu = pVM->apCpusR3[i];
    2345 
     2381        RT_NOREF(pVCpu);
     2382
     2383#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    23462384        pVCpu->cpum.s.Host.fXStateMask       = fXStateHostMask;
     2385#endif
     2386#ifdef VBOX_VMM_TARGET_X86
    23472387        pVCpu->cpum.s.hNestedVmxPreemptTimer = NIL_TMTIMERHANDLE;
     2388#endif
    23482389    }
    23492390
     
    23692410    DBGFR3InfoRegisterInternalEx(pVM, "cpumhyper",        "Displays the hypervisor cpu state.",
    23702411                                 &cpumR3InfoHyper, DBGFINFO_FLAGS_ALL_EMTS);
     2412#ifdef RT_ARCH_AMD64
    23712413    DBGFR3InfoRegisterInternalEx(pVM, "cpumhost",         "Displays the host cpu state.",
    23722414                                 &cpumR3InfoHost, DBGFINFO_FLAGS_ALL_EMTS);
     2415#endif
    23732416    DBGFR3InfoRegisterInternalEx(pVM, "cpumguestinstr",   "Displays the current guest instruction.",
    23742417                                 &cpumR3InfoGuestInstr, DBGFINFO_FLAGS_ALL_EMTS);
     
    25732616
    25742617    pCtx->aXcr[0]                   = XSAVE_C_X87;
    2575     if (pVM->cpum.s.HostFeatures.cbMaxExtendedState >= RT_UOFFSETOF(X86XSAVEAREA, Hdr))
     2618#ifdef RT_ARCH_AMD64 /** @todo x86-on-ARM64: recheck this! */
     2619    if (pVM->cpum.s.HostFeatures.s.cbMaxExtendedState >= RT_UOFFSETOF(X86XSAVEAREA, Hdr))
     2620#endif
    25762621    {
    25772622        /* The entire FXSAVE state needs loading when we switch to XSAVE/XRSTOR
     
    32363281
    32373282        /* Load CPUID and explode guest features. */
    3238         rc = cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs);
     3283        rc = cpumR3LoadCpuIdX86(pVM, pSSM, uVersion, &GuestMsrs);
    32393284        if (fVmxGstFeat)
    32403285        {
     
    43804425    cpumR3InfoGuestHwvirt(pVM, pHlp, pszArgs);
    43814426    cpumR3InfoHyper(pVM, pHlp, pszArgs);
     4427#ifdef RT_ARCH_AMD64
    43824428    cpumR3InfoHost(pVM, pHlp, pszArgs);
     4429#endif
    43834430}
    43844431
     
    50485095
    50495096
     5097#ifdef RT_ARCH_AMD64
    50505098/**
    50515099 * Display the host cpu state.
     
    51105158        pCtx->FSbase, pCtx->GSbase, pCtx->efer);
    51115159}
     5160#endif /* RT_ARCH_AMD64 */
     5161
    51125162
    51135163/**
     
    53975447    LogRel(("******************** End of CPUID dump **********************\n"));
    53985448
     5449#ifdef RT_ARCH_AMD64
    53995450    /*
    54005451     * Log VT-x extended features.
     
    54035454     * to do here for SVM.
    54045455     */
    5405     if (pVM->cpum.s.HostFeatures.fVmx)
     5456    if (pVM->cpum.s.HostFeatures.s.fVmx)
    54065457    {
    54075458        LogRel(("*********************** VT-x features ***********************\n"));
     
    54105461        LogRel(("******************* End of VT-x features ********************\n"));
    54115462    }
     5463#endif
    54125464
    54135465    /*
  • trunk/src/VBox/VMM/VMMR3/CPUMDbg.cpp

    r107113 r107650  
    12761276 * @param   pVM                 The cross context VM structure.
    12771277 */
    1278 int cpumR3DbgInit(PVM pVM)
     1278DECLHIDDEN(int) cpumR3DbgInit(PVM pVM)
    12791279{
    12801280    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId-armv8.cpp

    r106061 r107650  
    9393/** Pointer to CPUID config (from CFGM). */
    9494typedef CPUMCPUIDCONFIG *PCPUMCPUIDCONFIG;
    95 
    96 
    97 /**
    98  * Explode the CPU features from the given ID registers.
    99  *
    100  * @returns VBox status code.
    101  * @param   pIdRegs             The ID registers to explode the features from.
    102  * @param   pFeatures           Where to store the features to.
    103  */
    104 static int cpumCpuIdExplodeFeatures(PCCPUMIDREGS pIdRegs, PCPUMFEATURES pFeatures)
    105 {
    106     uint64_t u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;
    107 
    108     static uint8_t s_aPaRange[] = { 32, 36, 40, 42, 44, 48, 52 };
    109     AssertLogRelMsgReturn(RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE) < RT_ELEMENTS(s_aPaRange),
    110                           ("CPUM: Invalid/Unsupported PARange value in ID_AA64MMFR0_EL1 register: %u\n",
    111                           RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)),
    112                           VERR_CPUM_IPE_1);
    113 
    114     pFeatures->cMaxPhysAddrWidth = s_aPaRange[RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)];
    115     pFeatures->fTGran4K          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN4)  != ARMV8_ID_AA64MMFR0_EL1_TGRAN4_NOT_IMPL;
    116     pFeatures->fTGran16K         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN16) != ARMV8_ID_AA64MMFR0_EL1_TGRAN16_NOT_IMPL;
    117     pFeatures->fTGran64K         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN64) != ARMV8_ID_AA64MMFR0_EL1_TGRAN64_NOT_IMPL;
    118 
    119     /* ID_AA64ISAR0_EL1 features. */
    120     u64IdReg = pIdRegs->u64RegIdAa64Isar0El1;
    121     pFeatures->fAes              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES)     >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED;
    122     pFeatures->fPmull            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES)     >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED_PMULL;
    123     pFeatures->fSha1             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA1)    >= ARMV8_ID_AA64ISAR0_EL1_SHA1_SUPPORTED;
    124     pFeatures->fSha256           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2)    >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256;
    125     pFeatures->fSha512           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2)    >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256_SHA512;
    126     pFeatures->fCrc32            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_CRC32)   >= ARMV8_ID_AA64ISAR0_EL1_CRC32_SUPPORTED;
    127     pFeatures->fLse              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_ATOMIC)  >= ARMV8_ID_AA64ISAR0_EL1_ATOMIC_SUPPORTED;
    128     pFeatures->fTme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TME)     >= ARMV8_ID_AA64ISAR0_EL1_TME_SUPPORTED;
    129     pFeatures->fRdm              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RDM)     >= ARMV8_ID_AA64ISAR0_EL1_RDM_SUPPORTED;
    130     pFeatures->fSha3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA3)    >= ARMV8_ID_AA64ISAR0_EL1_SHA3_SUPPORTED;
    131     pFeatures->fSm3              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM3)     >= ARMV8_ID_AA64ISAR0_EL1_SM3_SUPPORTED;
    132     pFeatures->fSm4              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM4)     >= ARMV8_ID_AA64ISAR0_EL1_SM4_SUPPORTED;
    133     pFeatures->fDotProd          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_DP)      >= ARMV8_ID_AA64ISAR0_EL1_DP_SUPPORTED;
    134     pFeatures->fFhm              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_FHM)     >= ARMV8_ID_AA64ISAR0_EL1_FHM_SUPPORTED;
    135     pFeatures->fFlagM            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS)      >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED;
    136     pFeatures->fFlagM2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS)      >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED_2;
    137     pFeatures->fTlbios           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB)     >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED;
    138     pFeatures->fTlbirange        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB)     >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED_RANGE;
    139     pFeatures->fRng              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RNDR)    >= ARMV8_ID_AA64ISAR0_EL1_RNDR_SUPPORTED;
    140 
    141     /* ID_AA64ISAR1_EL1 features. */
    142     u64IdReg = pIdRegs->u64RegIdAa64Isar1El1;
    143     pFeatures->fDpb              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB)     >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED;
    144     pFeatures->fDpb2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB)     >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED_2;
    145 
    146     /* PAuth using QARMA5. */
    147     pFeatures->fPacQarma5        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     != ARMV8_ID_AA64ISAR1_EL1_APA_NOT_IMPL;
    148     if (pFeatures->fPacQarma5)
    149     {
    150         pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH;
    151         pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_EPAC;
    152         pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH2;
    153         pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPAC;
    154         pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA)     >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPACCOMBINE;
    155     }
    156 
    157     /* PAuth using implementation defined algorithm. */
    158     pFeatures->fPacImp           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     != ARMV8_ID_AA64ISAR1_EL1_API_NOT_IMPL;
    159     if (pFeatures->fPacQarma5)
    160     {
    161         pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH;
    162         pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_EPAC;
    163         pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH2;
    164         pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPAC;
    165         pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API)     >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPACCOMBINE;
    166     }
    167 
    168     pFeatures->fJscvt            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FJCVTZS) >= ARMV8_ID_AA64ISAR1_EL1_FJCVTZS_SUPPORTED;
    169     pFeatures->fFcma             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FCMA)    >= ARMV8_ID_AA64ISAR1_EL1_FCMA_SUPPORTED;
    170     pFeatures->fLrcpc            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC)   >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED;
    171     pFeatures->fLrcpc2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC)   >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED_2;
    172     pFeatures->fFrintts          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FRINTTS) >= ARMV8_ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED;
    173     pFeatures->fSb               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SB)      >= ARMV8_ID_AA64ISAR1_EL1_SB_SUPPORTED;
    174     pFeatures->fSpecres          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SPECRES) >= ARMV8_ID_AA64ISAR1_EL1_SPECRES_SUPPORTED;
    175     pFeatures->fBf16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16)    >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_BF16;
    176     pFeatures->fEbf16            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16)    >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_EBF16;
    177     pFeatures->fDgh              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DGH)     >= ARMV8_ID_AA64ISAR1_EL1_DGH_SUPPORTED;
    178     pFeatures->fI8mm             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_I8MM)    >= ARMV8_ID_AA64ISAR1_EL1_I8MM_SUPPORTED;
    179     pFeatures->fXs               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_XS)      >= ARMV8_ID_AA64ISAR1_EL1_XS_SUPPORTED;
    180     pFeatures->fLs64             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED;
    181     pFeatures->fLs64V            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_V;
    182     pFeatures->fLs64Accdata      = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64)    >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_ACCDATA;
    183 
    184     /* ID_AA64ISAR2_EL1 features. */
    185     u64IdReg = pIdRegs->u64RegIdAa64Isar2El1;
    186     pFeatures->fWfxt             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_WFXT)    >= ARMV8_ID_AA64ISAR2_EL1_WFXT_SUPPORTED;
    187     pFeatures->fRpres            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_RPRES)   >= ARMV8_ID_AA64ISAR2_EL1_RPRES_SUPPORTED;
    188 
    189     /* PAuth using QARMA3. */
    190     pFeatures->fPacQarma3        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_GPA3)    >= ARMV8_ID_AA64ISAR2_EL1_GPA3_SUPPORTED;
    191     pFeatures->fPacQarma3        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    != ARMV8_ID_AA64ISAR2_EL1_APA3_NOT_IMPL;
    192     if (pFeatures->fPacQarma5)
    193     {
    194         pFeatures->fPAuth        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH;
    195         pFeatures->fEpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_EPAC;
    196         pFeatures->fPAuth2       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH2;
    197         pFeatures->fFpac         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPAC;
    198         pFeatures->fFpacCombine  = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3)    >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPACCOMBINE;
    199     }
    200 
    201     pFeatures->fMops             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_MOPS)    >= ARMV8_ID_AA64ISAR2_EL1_MOPS_SUPPORTED;
    202     pFeatures->fHbc              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_BC)      >= ARMV8_ID_AA64ISAR2_EL1_BC_SUPPORTED;
    203     pFeatures->fConstPacField    = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_PACFRAC) >= ARMV8_ID_AA64ISAR2_EL1_PACFRAC_TRUE;
    204 
    205     /* ID_AA64PFR0_EL1 */
    206     u64IdReg = pIdRegs->u64RegIdAa64Pfr0El1;
    207     /* The FP and AdvSIMD field must have the same value. */
    208     Assert(RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) == RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD));
    209     pFeatures->fFp               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP)       != ARMV8_ID_AA64PFR0_EL1_FP_NOT_IMPL;
    210     pFeatures->fFp16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP)       == ARMV8_ID_AA64PFR0_EL1_FP_IMPL_SP_DP_HP;
    211     pFeatures->fAdvSimd          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD)  != ARMV8_ID_AA64PFR0_EL1_ADVSIMD_NOT_IMPL;
    212     pFeatures->fFp16             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD)  == ARMV8_ID_AA64PFR0_EL1_ADVSIMD_IMPL_SP_DP_HP;
    213     pFeatures->fRas              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS)      >= ARMV8_ID_AA64PFR0_EL1_RAS_SUPPORTED;
    214     pFeatures->fRasV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS)      >= ARMV8_ID_AA64PFR0_EL1_RAS_V1P1;
    215     pFeatures->fSve              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SVE)      >= ARMV8_ID_AA64PFR0_EL1_SVE_SUPPORTED;
    216     pFeatures->fSecEl2           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SEL2)     >= ARMV8_ID_AA64PFR0_EL1_SEL2_SUPPORTED;
    217     pFeatures->fAmuV1            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU)      >= ARMV8_ID_AA64PFR0_EL1_AMU_V1;
    218     pFeatures->fAmuV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU)      >= ARMV8_ID_AA64PFR0_EL1_AMU_V1P1;
    219     pFeatures->fDit              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_DIT)      >= ARMV8_ID_AA64PFR0_EL1_DIT_SUPPORTED;
    220     pFeatures->fRme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RME)      >= ARMV8_ID_AA64PFR0_EL1_RME_SUPPORTED;
    221     pFeatures->fCsv2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2)     >= ARMV8_ID_AA64PFR0_EL1_CSV2_SUPPORTED;
    222     pFeatures->fCsv2v3           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2)     >= ARMV8_ID_AA64PFR0_EL1_CSV2_3_SUPPORTED;
    223 
    224     /* ID_AA64PFR1_EL1 */
    225     u64IdReg = pIdRegs->u64RegIdAa64Pfr1El1;
    226     pFeatures->fBti              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_BT)       >= ARMV8_ID_AA64PFR1_EL1_BT_SUPPORTED;
    227     pFeatures->fSsbs             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS)     >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED;
    228     pFeatures->fSsbs2            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS)     >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED_MSR_MRS;
    229     pFeatures->fMte              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_INSN_ONLY;
    230     pFeatures->fMte2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL;
    231     pFeatures->fMte3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE)      >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL_ASYM_TAG_FAULT_CHK;
    232     /** @todo RAS_frac, MPAM_frac, CSV2_frac. */
    233     pFeatures->fSme              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME)      >= ARMV8_ID_AA64PFR1_EL1_SME_SUPPORTED;
    234     pFeatures->fSme2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME)      >= ARMV8_ID_AA64PFR1_EL1_SME_SME2;
    235     pFeatures->fRngTrap          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_RNDRTRAP) >= ARMV8_ID_AA64PFR1_EL1_RNDRTRAP_SUPPORTED;
    236     pFeatures->fNmi              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_NMI)      >= ARMV8_ID_AA64PFR1_EL1_NMI_SUPPORTED;
    237 
    238     /* ID_AA64MMFR0_EL1 */
    239     u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;
    240     pFeatures->fExs              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_EXS)     >= ARMV8_ID_AA64MMFR0_EL1_EXS_SUPPORTED;
    241     pFeatures->fFgt              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_FGT)     >= ARMV8_ID_AA64MMFR0_EL1_FGT_SUPPORTED;
    242     pFeatures->fEcv              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_ECV)     >= ARMV8_ID_AA64MMFR0_EL1_ECV_SUPPORTED;
    243 
    244     /* ID_AA64MMFR1_EL1 */
    245     u64IdReg = pIdRegs->u64RegIdAa64Mmfr1El1;
    246     pFeatures->fHafdbs           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HAFDBS)  >= ARMV8_ID_AA64MMFR1_EL1_HAFDBS_SUPPORTED;
    247     pFeatures->fVmid16           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VMIDBITS) >= ARMV8_ID_AA64MMFR1_EL1_VMIDBITS_16;
    248     pFeatures->fVhe              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VHE)     >= ARMV8_ID_AA64MMFR1_EL1_VHE_SUPPORTED;
    249     pFeatures->fHpds             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS)    >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED;
    250     pFeatures->fHpds2            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS)    >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED_2;
    251     pFeatures->fLor              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_LO)      >= ARMV8_ID_AA64MMFR1_EL1_LO_SUPPORTED;
    252     pFeatures->fPan              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED;
    253     pFeatures->fPan2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_2;
    254     pFeatures->fPan3             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN)     >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_3;
    255     pFeatures->fXnx              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_XNX)     >= ARMV8_ID_AA64MMFR1_EL1_XNX_SUPPORTED;
    256     pFeatures->fTwed             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TWED)    >= ARMV8_ID_AA64MMFR1_EL1_TWED_SUPPORTED;
    257     pFeatures->fEts2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_ETS)     >= ARMV8_ID_AA64MMFR1_EL1_ETS_SUPPORTED;
    258     pFeatures->fHcx              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HCX)     >= ARMV8_ID_AA64MMFR1_EL1_HCX_SUPPORTED;
    259     pFeatures->fAfp              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_AFP)     >= ARMV8_ID_AA64MMFR1_EL1_AFP_SUPPORTED;
    260     pFeatures->fNTlbpa           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_NTLBPA)  >= ARMV8_ID_AA64MMFR1_EL1_NTLBPA_INCLUDE_COHERENT_ONLY;
    261     pFeatures->fTidcp1           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TIDCP1)  >= ARMV8_ID_AA64MMFR1_EL1_TIDCP1_SUPPORTED;
    262     pFeatures->fCmow             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_CMOW)    >= ARMV8_ID_AA64MMFR1_EL1_CMOW_SUPPORTED;
    263 
    264     /* ID_AA64MMFR2_EL1 */
    265     u64IdReg = pIdRegs->u64RegIdAa64Mmfr2El1;
    266     pFeatures->fTtcnp            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CNP)     >= ARMV8_ID_AA64MMFR2_EL1_CNP_SUPPORTED;
    267     pFeatures->fUao              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_UAO)     >= ARMV8_ID_AA64MMFR2_EL1_UAO_SUPPORTED;
    268     pFeatures->fLsmaoc           = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_LSM)     >= ARMV8_ID_AA64MMFR2_EL1_LSM_SUPPORTED;
    269     pFeatures->fIesb             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IESB)    >= ARMV8_ID_AA64MMFR2_EL1_IESB_SUPPORTED;
    270     pFeatures->fLva              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_VARANGE) >= ARMV8_ID_AA64MMFR2_EL1_VARANGE_52BITS_64KB_GRAN;
    271     pFeatures->fCcidx            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CCIDX)   >= ARMV8_ID_AA64MMFR2_EL1_CCIDX_64BIT;
    272     pFeatures->fNv               = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV)      >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED;
    273     pFeatures->fNv2              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV)      >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED_2;
    274     pFeatures->fTtst             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_ST)      >= ARMV8_ID_AA64MMFR2_EL1_ST_SUPPORTED;
    275     pFeatures->fLse2             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_AT)      >= ARMV8_ID_AA64MMFR2_EL1_AT_SUPPORTED;
    276     pFeatures->fIdst             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IDS)     >= ARMV8_ID_AA64MMFR2_EL1_IDS_EC_18H;
    277     pFeatures->fS2Fwb            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_FWB)     >= ARMV8_ID_AA64MMFR2_EL1_FWB_SUPPORTED;
    278     pFeatures->fTtl              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_TTL)     >= ARMV8_ID_AA64MMFR2_EL1_TTL_SUPPORTED;
    279     pFeatures->fEvt              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_EVT)     >= ARMV8_ID_AA64MMFR2_EL1_EVT_SUPPORTED;
    280     pFeatures->fE0Pd             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_E0PD)    >= ARMV8_ID_AA64MMFR2_EL1_E0PD_SUPPORTED;
    281 
    282     /* ID_AA64DFR0_EL1 */
    283     u64IdReg = pIdRegs->u64RegIdAa64Dfr0El1;
    284     pFeatures->fDebugV8p1        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8_VHE;
    285     pFeatures->fDebugV8p2        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p2;
    286     pFeatures->fDebugV8p4        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p4;
    287     pFeatures->fDebugV8p8        = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p8;
    288     pFeatures->fPmuV3            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3;
    289     pFeatures->fPmuV3p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P1;
    290     pFeatures->fPmuV3p4          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P4;
    291     pFeatures->fPmuV3p5          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P5;
    292     pFeatures->fPmuV3p7          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P7;
    293     pFeatures->fPmuV3p8          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER)   >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P8;
    294     pFeatures->fSpe              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED;
    295     pFeatures->fSpeV1p1          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P1;
    296     pFeatures->fSpeV1p2          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P2;
    297     pFeatures->fSpeV1p3          = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER)   >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P3;
    298     pFeatures->fDoubleLock       = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK)  == ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK_SUPPORTED;
    299     pFeatures->fTrf              = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEFILT)   >= ARMV8_ID_AA64DFR0_EL1_TRACEFILT_SUPPORTED;
    300     pFeatures->fTrbe             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER) >= ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER_SUPPORTED;
    301     pFeatures->fMtPmu            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_MTPMU)    == ARMV8_ID_AA64DFR0_EL1_MTPMU_SUPPORTED;
    302     pFeatures->fBrbe             = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE)     >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED;
    303     pFeatures->fBrbeV1p1         = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE)     >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED_V1P1;
    304     pFeatures->fHpmn0            = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_HPMN0)    >= ARMV8_ID_AA64DFR0_EL1_HPMN0_SUPPORTED;
    305 
    306     return VINF_SUCCESS;
    307 }
    30895
    30996
     
    348135    /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
    349136       must consult HostFeatures when processing CPUMISAEXTCFG variables. */
    350     PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures;
     137    PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures.s;
    351138#define PASSTHRU_FEATURE(a_IdReg, enmConfig, fHostFeature, a_IdRegNm, a_IdRegValSup, a_IdRegValNotSup) \
    352139    (a_IdReg) =   ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) \
     
    588375 *       on the VM config.
    589376 */
    590 VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMIDREGS pIdRegs)
     377VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMARMV8IDREGS pIdRegs)
    591378{
    592379    /* Set the host features from the given ID registers. */
    593     int rc = cpumCpuIdExplodeFeatures(pIdRegs, &g_CpumHostFeatures.s);
     380    int rc = cpumCpuIdExplodeFeaturesArmV8(pIdRegs, &g_CpumHostFeatures.s);
    594381    AssertRCReturn(rc, rc);
    595382
    596     pVM->cpum.s.HostFeatures               = g_CpumHostFeatures.s;
    597     pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
     383    pVM->cpum.s.HostFeatures.s             = g_CpumHostFeatures.s;
     384    pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.Common.enmCpuVendor;
    598385    pVM->cpum.s.HostIdRegs                 = *pIdRegs;
    599386    pVM->cpum.s.GuestIdRegs                = *pIdRegs;
     
    631418     */
    632419    if (RT_SUCCESS(rc))
    633         rc = cpumCpuIdExplodeFeatures(pIdRegs, &pCpum->GuestFeatures);
     420        rc = cpumCpuIdExplodeFeaturesArmV8(pIdRegs, &pCpum->GuestFeatures);
    634421
    635422    /*
     
    650437 * @param   ppIdRegs            Where to store the pointer to the guest ID register struct.
    651438 */
    652 VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMIDREGS *ppIdRegs)
     439VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMARMV8IDREGS *ppIdRegs)
    653440{
    654441    AssertPtrReturn(ppIdRegs, VERR_INVALID_POINTER);
     
    668455 *
    669456 */
    670 /** Saved state field descriptors for CPUMIDREGS. */
    671 static const SSMFIELD g_aCpumIdRegsFields[] =
    672 {
    673     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Pfr0El1),
    674     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Pfr1El1),
    675     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Dfr0El1),
    676     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Dfr1El1),
    677     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Afr0El1),
    678     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Afr1El1),
    679     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Isar0El1),
    680     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Isar1El1),
    681     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Isar2El1),
    682     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Mmfr0El1),
    683     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Mmfr1El1),
    684     SSMFIELD_ENTRY(CPUMIDREGS, u64RegIdAa64Mmfr2El1),
    685     SSMFIELD_ENTRY(CPUMIDREGS, u64RegClidrEl1),
    686     SSMFIELD_ENTRY(CPUMIDREGS, u64RegCtrEl0),
    687     SSMFIELD_ENTRY(CPUMIDREGS, u64RegDczidEl0),
     457/** Saved state field descriptors for CPUMARMV8IDREGS. */
     458static const SSMFIELD g_aCpumArmV8IdRegsFields[] =
     459{
     460    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1),
     461    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1),
     462    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1),
     463    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1),
     464    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Afr0El1),
     465    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Afr1El1),
     466    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1),
     467    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1),
     468    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar2El1),
     469    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1),
     470    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1),
     471    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1),
     472    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegClidrEl1),
     473    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegCtrEl0),
     474    SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegDczidEl0),
    688475    SSMFIELD_ENTRY_TERM()
    689476};
     
    701488     * Save all the CPU ID leaves.
    702489     */
    703     SSMR3PutStructEx(pSSM, &pVM->cpum.s.GuestIdRegs, sizeof(pVM->cpum.s.GuestIdRegs), 0, g_aCpumIdRegsFields, NULL);
     490    SSMR3PutStructEx(pSSM, &pVM->cpum.s.GuestIdRegs, sizeof(pVM->cpum.s.GuestIdRegs), 0, g_aCpumArmV8IdRegsFields, NULL);
    704491}
    705492
     
    714501 * @param   pGuestIdRegs        The guest ID register as loaded from the saved state.
    715502 */
    716 static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMIDREGS pGuestIdRegs)
     503static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMARMV8IDREGS pGuestIdRegs)
    717504{
    718505    /*
     
    926713
    927714/**
    928  * Loads the CPU ID leaves saved by pass 0.
     715 * Loads the CPU ID leaves saved by pass 0, ARMv8 targets.
    929716 *
    930717 * @returns VBox status code.
     
    933720 * @param   uVersion            The format version.
    934721 */
    935 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
    936 {
    937     CPUMIDREGS GuestIdRegs;
    938     int rc = SSMR3GetStructEx(pSSM, &GuestIdRegs, sizeof(GuestIdRegs), 0, g_aCpumIdRegsFields, NULL);
     722int cpumR3LoadCpuIdArmV8(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
     723{
     724    CPUMARMV8IDREGS GuestIdRegs;
     725    int rc = SSMR3GetStructEx(pSSM, &GuestIdRegs, sizeof(GuestIdRegs), 0, g_aCpumArmV8IdRegsFields, NULL);
    939726    AssertRCReturn(rc, rc);
    940727
     
    14341221    do { \
    14351222        if (fVerbose) \
    1436             pHlp->pfnPrintf(pHlp, "  %*s = %u (%u)\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag, pVM->cpum.s.HostFeatures.a_Flag); \
     1223            pHlp->pfnPrintf(pHlp, "  %*s = %u (%u)\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag, pVM->cpum.s.HostFeatures.s.a_Flag); \
    14371224        else \
    14381225            pHlp->pfnPrintf(pHlp, "  %*s = %u\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag); \
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r107570 r107650  
    13351335    /* The CPUID entries we start with here isn't necessarily the ones of the host, so we
    13361336       must consult HostFeatures when processing CPUMISAEXTCFG variables. */
    1337     PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures;
     1337#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
     1338    PCCPUMFEATURES const pHstFeat = &pCpum->HostFeatures.s;
     1339#else
     1340    PCCPUMFEATURES const pHstFeat = &pCpum->GuestFeatures;
     1341#endif
    13381342#define PASSTHRU_FEATURE(enmConfig, fHostFeature, fConst) \
    13391343    ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) ? (fConst) : 0)
    13401344#define PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, fAndExpr, fConst) \
    13411345    ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) && (fAndExpr) ? (fConst) : 0)
     1346#define PASSTHRU_FEATURE_NOT_IEM(enmConfig, fHostFeature, fConst) \
     1347    PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, !VM_IS_EXEC_ENGINE_IEM(pVM), fConst)
    13421348#define PASSTHRU_FEATURE_TODO(enmConfig, fConst) ((enmConfig) ? (fConst) : 0)
    13431349
     
    14061412                           //| X86_CPUID_FEATURE_ECX_TPRUPDATE
    14071413                           //| X86_CPUID_FEATURE_ECX_PDCM  - not implemented yet.
    1408                            | PASSTHRU_FEATURE(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
     1414                           | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)
    14091415                           //| X86_CPUID_FEATURE_ECX_DCA   - not implemented yet.
    14101416                           | PASSTHRU_FEATURE(pConfig->enmSse41, pHstFeat->fSse41, X86_CPUID_FEATURE_ECX_SSE4_1)
     
    18661872                               | X86_CPUID_STEXT_FEATURE_EBX_BMI2
    18671873                               //| X86_CPUID_STEXT_FEATURE_EBX_ERMS              RT_BIT(9)
    1868                                | PASSTHRU_FEATURE(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
     1874                               | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)
    18691875                               //| X86_CPUID_STEXT_FEATURE_EBX_RTM               RT_BIT(11)
    18701876                               //| X86_CPUID_STEXT_FEATURE_EBX_PQM               RT_BIT(12)
     
    27912797    AssertLogRelRCReturn(rc, rc);
    27922798
     2799#ifdef RT_ARCH_AMD64 /** @todo next VT-x/AMD-V on non-AMD64 hosts */
    27932800    bool fQueryNestedHwvirt = false
    27942801#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2795                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD
    2796                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON
     2802                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_AMD
     2803                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_HYGON
    27972804#endif
    27982805#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2799                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL
    2800                            || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_VIA
     2806                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
     2807                           || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_VIA
    28012808#endif
    28022809                           ;
     
    28232830        }
    28242831    }
     2832#endif /** @todo */
    28252833
    28262834    /*
     
    28982906    AssertLogRelRCReturn(rc, rc);
    28992907
    2900     bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.fXSaveRstor
    2901                             && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor
     2908#ifdef RT_ARCH_AMD64
     2909    bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.s.fXSaveRstor
     2910                            && pVM->cpum.s.HostFeatures.s.fOpSysXSaveRstor
    29022911                            && (  VM_IS_NEM_ENABLED(pVM)
    29032912                                ? NEMHCGetFeatures(pVM) & NEM_FEAT_F_XSAVE_XRSTOR
     
    29062915                                : fNestedPagingAndFullGuestExec);
    29072916    uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask;
     2917#else
     2918    bool const     fMayHaveXSave   = true;
     2919    uint64_t const fXStateHostMask = XSAVE_C_YMM | XSAVE_C_SSE | XSAVE_C_X87;
     2920#endif
    29082921
    29092922    /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends}
     
    32893302{
    32903303#ifdef RT_ARCH_AMD64
    3291     Assert(pVM->cpum.s.HostFeatures.fMtrr);
     3304    Assert(pVM->cpum.s.HostFeatures.s.fMtrr);
    32923305#endif
    32933306
     
    36953708#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    36963709# define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \
    3697     if (!pVM->cpum.s.HostFeatures. a_fFeature) \
     3710    if (!pVM->cpum.s.HostFeatures.s. a_fFeature) \
    36983711    { \
    36993712        LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when the host doesn't support it!\n")); \
     
    38753888            /* Valid for both Intel and AMD. */
    38763889            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
    3877             pVM->cpum.s.HostFeatures.fRdTscP = 1;
     3890            pVM->cpum.s.GuestFeatures.fRdTscP = 1;
    38783891            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
    38793892            break;
     
    39003913#ifdef RT_ARCH_AMD64
    39013914                if (   !pLeaf
    3902                     || !(pVM->cpum.s.HostFeatures.fIbpb || pVM->cpum.s.HostFeatures.fIbrs))
     3915                    || !(pVM->cpum.s.HostFeatures.s.fIbpb || pVM->cpum.s.HostFeatures.s.fIbrs))
    39033916                {
    39043917                    LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n"));
     
    39183931#ifdef RT_ARCH_AMD64
    39193932                /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */
    3920                 if (pVM->cpum.s.HostFeatures.fIbrs)
     3933                if (pVM->cpum.s.HostFeatures.s.fIbrs)
    39213934#endif
    39223935                {
     
    39243937                    pVM->cpum.s.GuestFeatures.fIbrs = 1;
    39253938#ifdef RT_ARCH_AMD64
    3926                     if (pVM->cpum.s.HostFeatures.fStibp)
     3939                    if (pVM->cpum.s.HostFeatures.s.fStibp)
    39273940#endif
    39283941                    {
     
    39653978
    39663979#ifdef RT_ARCH_AMD64
    3967                 if (pVM->cpum.s.HostFeatures.fArchCap)
     3980                if (pVM->cpum.s.HostFeatures.s.fArchCap)
    39683981#endif
    39693982                {
     
    39843997
    39853998                    /* Advertise IBRS_ALL if present at this point... */
    3986                     if (pVM->cpum.s.HostFeatures.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
     3999#ifdef RT_ARCH_AMD64
     4000                    if (pVM->cpum.s.HostFeatures.s.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL)
     4001#endif
    39874002                        VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL);
    39884003                }
     
    49564971
    49574972/**
    4958  * Loads the CPU ID leaves saved by pass 0.
     4973 * Loads the CPU ID leaves saved by pass 0, x86 targets.
    49594974 *
    49604975 * @returns VBox status code.
     
    49644979 * @param   pMsrs               The guest MSRs.
    49654980 */
    4966 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
     4981int cpumR3LoadCpuIdX86(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)
    49674982{
    49684983    AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);
  • trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp

    r106630 r107650  
    901901        if (RT_FAILURE(rc))
    902902            return rc;
    903         rc = CPUMCpuIdCollectLeavesX86(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
     903        rc = CPUMCpuIdCollectLeavesFromX86Host(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);
    904904        if (RT_FAILURE(rc))
    905905            return rc;
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp

    r107316 r107650  
    572572} s_aIdRegs[] =
    573573{
    574     { HV_FEATURE_REG_ID_AA64DFR0_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1)  },
    575     { HV_FEATURE_REG_ID_AA64DFR1_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1)  },
    576     { HV_FEATURE_REG_ID_AA64ISAR0_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
    577     { HV_FEATURE_REG_ID_AA64ISAR1_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
    578     { HV_FEATURE_REG_ID_AA64MMFR0_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
    579     { HV_FEATURE_REG_ID_AA64MMFR1_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
    580     { HV_FEATURE_REG_ID_AA64MMFR2_EL1,      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
    581     { HV_FEATURE_REG_ID_AA64PFR0_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1)  },
    582     { HV_FEATURE_REG_ID_AA64PFR1_EL1,       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1)  },
    583     { HV_FEATURE_REG_CLIDR_EL1,             RT_UOFFSETOF(CPUMIDREGS, u64RegClidrEl1)       },
    584     { HV_FEATURE_REG_CTR_EL0,               RT_UOFFSETOF(CPUMIDREGS, u64RegCtrEl0)         },
    585     { HV_FEATURE_REG_DCZID_EL0,             RT_UOFFSETOF(CPUMIDREGS, u64RegDczidEl0)       }
     574    { HV_FEATURE_REG_ID_AA64DFR0_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1)  },
     575    { HV_FEATURE_REG_ID_AA64DFR1_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1)  },
     576    { HV_FEATURE_REG_ID_AA64ISAR0_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) },
     577    { HV_FEATURE_REG_ID_AA64ISAR1_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) },
     578    { HV_FEATURE_REG_ID_AA64MMFR0_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) },
     579    { HV_FEATURE_REG_ID_AA64MMFR1_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) },
     580    { HV_FEATURE_REG_ID_AA64MMFR2_EL1,      RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) },
     581    { HV_FEATURE_REG_ID_AA64PFR0_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1)  },
     582    { HV_FEATURE_REG_ID_AA64PFR1_EL1,       RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1)  },
     583    { HV_FEATURE_REG_CLIDR_EL1,             RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegClidrEl1)       },
     584    { HV_FEATURE_REG_CTR_EL0,               RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegCtrEl0)         },
     585    { HV_FEATURE_REG_DCZID_EL0,             RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegDczidEl0)       }
    586586};
    587587
     
    14501450
    14511451        /* Query ID registers and hand them to CPUM. */
    1452         CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
     1452        CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
    14531453        for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
    14541454        {
     
    25402540        } s_aSysIdRegs[] =
    25412541        {
    2542 #define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg,     RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }
     2542#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMARMV8IDREGS, a_CpumIdReg) }
    25432543            ID_SYS_REG_CREATE(ID_AA64DFR0_EL1,  u64RegIdAa64Dfr0El1),
    25442544            ID_SYS_REG_CREATE(ID_AA64DFR1_EL1,  u64RegIdAa64Dfr1El1),
     
    25532553        };
    25542554
    2555         PCCPUMIDREGS pIdRegsGst = NULL;
     2555        PCCPUMARMV8IDREGS pIdRegsGst = NULL;
    25562556        int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
    25572557        AssertRCReturn(rc, rc);
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux-armv8.cpp

    r107308 r107650  
    292292} s_aIdRegs[] =
    293293{
    294     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1)  },
    295     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1)  },
    296     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
    297     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
    298     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
    299     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
    300     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1),      RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
    301     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1)  },
    302     { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1),       RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1)  }
     294    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1)  },
     295    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1)  },
     296    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) },
     297    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) },
     298    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) },
     299    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) },
     300    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1),  RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) },
     301    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1)  },
     302    { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1),   RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1)  }
    303303};
    304304
     
    480480
    481481            /* Need to query the ID registers and populate CPUM. */
    482             CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
     482            CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
    483483            for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
    484484            {
  • trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp

    r107194 r107650  
    831831             * these are partition wide registers and need to be queried/set with WHV_ANY_VP.
    832832             */
    833             CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
     833            CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs);
    834834
    835835            WHV_REGISTER_NAME  aenmNames[10];
     
    870870
    871871            /* Apply any overrides to the partition. */
    872             PCCPUMIDREGS pIdRegsGst = NULL;
     872            PCCPUMARMV8IDREGS pIdRegsGst = NULL;
    873873            rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
    874874            AssertRCReturn(rc, rc);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette