VirtualBox

Changeset 54714 in vbox


Ignore:
Timestamp:
Mar 11, 2015 2:00:23 PM (10 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
98878
Message:

PATM,CPUM: CPUID patch update.

Location:
trunk
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r54673 r54714  
    272272/**
    273273 * CPUID leaf.
     274 *
     275 * @remarks This structure is used by the patch manager and is therefore
     276 *          more or less set in stone.
    274277 */
    275278typedef struct CPUMCPUIDLEAF
     
    294297    uint32_t    fFlags;
    295298} CPUMCPUIDLEAF;
     299AssertCompileSize(CPUMCPUIDLEAF, 32);
    296300/** Pointer to a CPUID leaf. */
    297301typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF;
     
    309313/**
    310314 * Method used to deal with unknown CPUID leafs.
     315 * @remarks Used in patch code.
    311316 */
    312317typedef enum CPUMUKNOWNCPUID
     
    12711276
    12721277# if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING)
    1273 /** @name APIs for Patch Manager CPUID legacy tables
     1278/** @name APIs for the CPUID raw-mode patch.
    12741279 * @{ */
    1275 VMMR3_INT_DECL(uint32_t)                CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
    1276 VMMR3_INT_DECL(uint32_t)                CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
    1277 VMMR3_INT_DECL(uint32_t)                CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
    1278 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
    1279 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
    1280 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
    1281 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))  CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
     1280VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM);
     1281VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM);
     1282VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM);
     1283VMMR3_INT_DECL(CPUMUKNOWNCPUID)            CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM);
     1284/* Legacy: */
     1285VMMR3_INT_DECL(uint32_t)                   CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM);
     1286VMMR3_INT_DECL(uint32_t)                   CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM);
     1287VMMR3_INT_DECL(uint32_t)                   CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM);
     1288VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM);
     1289VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM);
     1290VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID))     CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM);
    12821291/** @} */
    12831292# endif
  • trunk/include/VBox/vmm/cpum.mac

    r43657 r54714  
    44
    55;
    6 ; Copyright (C) 2006-2012 Oracle Corporation
     6; Copyright (C) 2006-2015 Oracle Corporation
    77;
    88; This file is part of VirtualBox Open Source Edition (OSE), as
     
    2626%ifndef ___VBox_vmm_cpum_mac__
    2727%define ___VBox_vmm_cpum_mac__
     28
     29%include "iprt/asmdefs.mac"
     30
     31;;
     32; CPUID leaf.
     33; @remarks This structure is used by the patch manager and can only be extended
     34;          by adding to the end of it.
     35struc CPUMCPUIDLEAF
     36    .uLeaf              resd    1
     37    .uSubLeaf           resd    1
     38    .fSubLeafMask       resd    1
     39    .uEax               resd    1
     40    .uEbx               resd    1
     41    .uEcx               resd    1
     42    .uEdx               resd    1
     43    .fFlags             resd    1
     44endstruc
     45%define CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED RT_BIT_32(0)
     46
     47;;
     48; For the default CPUID leaf value.
     49; @remarks This is used by the patch manager and cannot be modified in any way.
     50struc CPUMCPUID
     51    .uEax               resd    1
     52    .uEbx               resd    1
     53    .uEcx               resd    1
     54    .uEdx               resd    1
     55endstruc
     56
     57
     58;; @name Method used to deal with unknown CPUID leafs.
     59;; @{
     60%define CPUMUKNOWNCPUID_DEFAULTS                1
     61%define CPUMUKNOWNCPUID_LAST_STD_LEAF           2
     62%define CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX  3
     63%define CPUMUKNOWNCPUID_PASSTHRU                4
     64;; @}
     65
    2866
    2967;;
  • trunk/include/VBox/vmm/cpumctx.h

    r48371 r54714  
    44
    55/*
    6  * Copyright (C) 2006-2012 Oracle Corporation
     6 * Copyright (C) 2006-2015 Oracle Corporation
    77 *
    88 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    468468typedef struct CPUMCPUID
    469469{
    470     uint32_t eax;
    471     uint32_t ebx;
    472     uint32_t ecx;
    473     uint32_t edx;
     470    uint32_t uEax;
     471    uint32_t uEbx;
     472    uint32_t uEcx;
     473    uint32_t uEdx;
    474474} CPUMCPUID;
    475475/** Pointer to a CPUID leaf. */
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r54674 r54714  
    13281328    PVM             pVM          = pVCpu->CTX_SUFF(pVM);
    13291329    uint64_t const  uOldEfer     = pVCpu->cpum.s.Guest.msrEFER;
    1330     uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].eax >= 0x80000001
    1331                                  ? pVM->cpum.s.aGuestCpuIdPatmExt[1].edx
     1330    uint32_t const  fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001
     1331                                 ? pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx
    13321332                                 : 0;
    13331333    uint64_t        fMask        = 0;
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r54674 r54714  
    11791179                    if (uSubLeaf < paLeaves[i].uSubLeaf)
    11801180                        while (   i > 0
    1181                                && uLeaf    == paLeaves[i].uLeaf
    1182                                && uSubLeaf  < paLeaves[i].uSubLeaf)
     1181                               && uLeaf    == paLeaves[i - 1].uLeaf
     1182                               && uSubLeaf <= paLeaves[i - 1].uSubLeaf)
    11831183                            i--;
    11841184                    else
     
    12171217        pCpuId = &pVM->cpum.s.aGuestCpuIdPatmExt[iLeaf - UINT32_C(0x80000000)];
    12181218    else if (   iLeaf - UINT32_C(0x40000000) < 0x100   /** @todo Fix this later: Hyper-V says 0x400000FF is the last valid leaf. */
    1219              && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdPatmStd[1].ecx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */
     1219             && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdPatmStd[1].uEcx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */
    12201220    {
    12211221        PCPUMCPUIDLEAF pHyperLeaf = cpumCpuIdGetLeaf(pVM, iLeaf, 0 /* uSubLeaf */);
     
    12401240    uint32_t cCurrentCacheIndex = *pEcx;
    12411241
    1242     *pEax = pCpuId->eax;
    1243     *pEbx = pCpuId->ebx;
    1244     *pEcx = pCpuId->ecx;
    1245     *pEdx = pCpuId->edx;
     1242    *pEax = pCpuId->uEax;
     1243    *pEbx = pCpuId->uEbx;
     1244    *pEcx = pCpuId->uEcx;
     1245    *pEdx = pCpuId->uEdx;
    12461246
    12471247    if (    iLeaf == 1)
     
    13281328            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    13291329            if (pLeaf)
    1330                 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
     1330                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
    13311331
    13321332            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    13331333            if (   pLeaf
    13341334                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    1335                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
     1335                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
    13361336
    13371337            pVM->cpum.s.GuestFeatures.fApic = 1;
     
    13451345            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    13461346            if (pLeaf)
    1347                 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
     1347                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
    13481348            pVM->cpum.s.GuestFeatures.fX2Apic = 1;
    13491349            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
     
    13631363            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    13641364            if (pLeaf)
    1365                 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
     1365                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
    13661366            pVM->cpum.s.GuestFeatures.fSysEnter = 1;
    13671367            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
     
    13901390
    13911391            /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
    1392             pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
     1392            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
    13931393            pVM->cpum.s.GuestFeatures.fSysCall = 1;
    13941394            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
     
    14081408            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    14091409            if (pLeaf)
    1410                 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
     1410                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
    14111411
    14121412            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    14131413            if (    pLeaf
    14141414                &&  pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    1415                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
     1415                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
    14161416
    14171417            pVM->cpum.s.GuestFeatures.fPae = 1;
     
    14331433
    14341434            /* Valid for both Intel and AMD. */
    1435             pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
     1435            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
    14361436            pVM->cpum.s.GuestFeatures.fLongMode = 1;
    14371437            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
     
    14521452
    14531453            /* Valid for both Intel and AMD. */
    1454             pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
     1454            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
    14551455            pVM->cpum.s.GuestFeatures.fNoExecute = 1;
    14561456            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
     
    14721472
    14731473            /* Valid for both Intel and AMD. */
    1474             pVM->cpum.s.aGuestCpuIdPatmExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
     1474            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
    14751475            pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
    14761476            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
     
    14851485            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    14861486            if (pLeaf)
    1487                 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
     1487                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
    14881488
    14891489            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    14901490            if (   pLeaf
    14911491                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    1492                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
     1492                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
    14931493
    14941494            pVM->cpum.s.GuestFeatures.fPat = 1;
     
    15121512
    15131513            /* Valid for both Intel and AMD. */
    1514             pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
     1514            pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
    15151515            pVM->cpum.s.HostFeatures.fRdTscP = 1;
    15161516            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
     
    15231523            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    15241524            if (pLeaf)
    1525                 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
     1525                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;
    15261526            pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
    15271527            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
     
    15421542
    15431543            /* Valid for both Intel and AMD. */
    1544             pVM->cpum.s.aGuestCpuIdPatmStd[5].ecx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
     1544            pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
    15451545            pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1;
    15461546            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n"));
     
    16071607            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    16081608            if (pLeaf)
    1609                 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
     1609                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
    16101610
    16111611            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    16121612            if (   pLeaf
    16131613                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    1614                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
     1614                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
    16151615
    16161616            pVM->cpum.s.GuestFeatures.fApic = 0;
     
    16211621            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    16221622            if (pLeaf)
    1623                 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
     1623                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
    16241624            pVM->cpum.s.GuestFeatures.fX2Apic = 0;
    16251625            Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
     
    16291629            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    16301630            if (pLeaf)
    1631                 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
     1631                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
    16321632
    16331633            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    16341634            if (   pLeaf
    16351635                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    1636                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
     1636                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
    16371637
    16381638            pVM->cpum.s.GuestFeatures.fPae = 0;
     
    16431643            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    16441644            if (pLeaf)
    1645                 pVM->cpum.s.aGuestCpuIdPatmStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
     1645                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
    16461646
    16471647            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    16481648            if (   pLeaf
    16491649                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    1650                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
     1650                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
    16511651
    16521652            pVM->cpum.s.GuestFeatures.fPat = 0;
     
    16571657            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    16581658            if (pLeaf)
    1659                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
     1659                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
    16601660            pVM->cpum.s.GuestFeatures.fLongMode = 0;
    16611661            break;
     
    16641664            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    16651665            if (pLeaf)
    1666                 pVM->cpum.s.aGuestCpuIdPatmExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
     1666                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
    16671667            pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
    16681668            break;
     
    16711671            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
    16721672            if (pLeaf)
    1673                 pVM->cpum.s.aGuestCpuIdPatmExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
     1673                pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
    16741674            pVM->cpum.s.GuestFeatures.fRdTscP = 0;
    16751675            Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
     
    16791679            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
    16801680            if (pLeaf)
    1681                 pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
     1681                pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
    16821682            pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
    16831683            break;
     
    16861686            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0);
    16871687            if (pLeaf)
    1688                 pVM->cpum.s.aGuestCpuIdPatmStd[5].ecx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
     1688                pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);
    16891689            pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0;
    16901690            Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n"));
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r54674 r54714  
    8080{
    8181    uint32_t uLeaf;  /**< Leaf to check. */
    82     uint32_t ecx;    /**< which bits in ecx to unify between CPUs. */
    83     uint32_t edx;    /**< which bits in edx to unify between CPUs. */
     82    uint32_t uEcx;   /**< which bits in ecx to unify between CPUs. */
     83    uint32_t uEdx;   /**< which bits in edx to unify between CPUs. */
    8484}
    8585const g_aCpuidUnifyBits[] =
     
    170170        ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx);
    171171
    172         ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);
    173         ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx);
     172        ASMAtomicAndU32(&pLegacyLeaf->uEcx, ecx | ~g_aCpuidUnifyBits[i].uEcx);
     173        ASMAtomicAndU32(&pLegacyLeaf->uEdx, edx | ~g_aCpuidUnifyBits[i].uEdx);
    174174    }
    175175}
     
    300300                    continue;
    301301
    302                 pLeaf->uEcx = pLegacyLeaf->ecx;
    303                 pLeaf->uEdx = pLegacyLeaf->edx;
     302                pLeaf->uEcx = pLegacyLeaf->uEcx;
     303                pLeaf->uEdx = pLegacyLeaf->uEdx;
    304304            }
    305305        }
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r54674 r54714  
    727727    if (!fHWVirtExEnabled)
    728728    {
    729         Assert(   pVM->cpum.s.aGuestCpuIdPatmStd[4].eax == 0
    730                || pVM->cpum.s.aGuestCpuIdPatmStd[0].eax < 0x4);
    731         pVM->cpum.s.aGuestCpuIdPatmStd[4].eax = 0;
     729        Assert(   pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax == 0
     730               || pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax < 0x4);
     731        pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax = 0;
    732732    }
    733733}
     
    12321232         * features in the future.
    12331233         */
    1234         AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].ecx &
     1234        AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
    12351235                              (   X86_CPUID_FEATURE_ECX_DTES64
    12361236                               |  X86_CPUID_FEATURE_ECX_VMX
     
    18751875    CPUMCPUID   Host;
    18761876    CPUMCPUID   Guest;
    1877     unsigned    cStdMax = pVM->cpum.s.aGuestCpuIdPatmStd[0].eax;
     1877    unsigned    cStdMax = pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax;
    18781878
    18791879    uint32_t    cStdHstMax;
     
    18911891        {
    18921892            Guest = pVM->cpum.s.aGuestCpuIdPatmStd[i];
    1893             ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
     1893            ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    18941894
    18951895            pHlp->pfnPrintf(pHlp,
    18961896                            "Gst: %08x  %08x %08x %08x %08x%s\n"
    18971897                            "Hst:           %08x %08x %08x %08x\n",
    1898                             i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
     1898                            i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx,
    18991899                            i <= cStdMax ? "" : "*",
    1900                             Host.eax, Host.ebx, Host.ecx, Host.edx);
     1900                            Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
    19011901        }
    19021902        else
    19031903        {
    1904             ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
     1904            ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    19051905
    19061906            pHlp->pfnPrintf(pHlp,
    19071907                            "Hst: %08x  %08x %08x %08x %08x\n",
    1908                             i, Host.eax, Host.ebx, Host.ecx, Host.edx);
     1908                            i, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
    19091909        }
    19101910    }
     
    19191919                        "Name:                            %.04s%.04s%.04s\n"
    19201920                        "Supports:                        0-%x\n",
    1921                         &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
     1921                        &Guest.uEbx, &Guest.uEdx, &Guest.uEcx, Guest.uEax);
    19221922    }
    19231923
     
    19251925     * Get Features.
    19261926     */
    1927     bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0].ebx,
    1928                                         pVM->cpum.s.aGuestCpuIdPatmStd[0].ecx,
    1929                                         pVM->cpum.s.aGuestCpuIdPatmStd[0].edx);
     1927    bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0].uEbx,
     1928                                        pVM->cpum.s.aGuestCpuIdPatmStd[0].uEcx,
     1929                                        pVM->cpum.s.aGuestCpuIdPatmStd[0].uEdx);
    19301930    if (cStdMax >= 1 && iVerbosity)
    19311931    {
     
    19331933
    19341934        Guest = pVM->cpum.s.aGuestCpuIdPatmStd[1];
    1935         uint32_t uEAX = Guest.eax;
     1935        uint32_t uEAX = Guest.uEax;
    19361936
    19371937        pHlp->pfnPrintf(pHlp,
     
    19481948                        ASMGetCpuStepping(uEAX),
    19491949                        (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3],
    1950                         (Guest.ebx >> 24) & 0xff,
    1951                         (Guest.ebx >> 16) & 0xff,
    1952                         (Guest.ebx >>  8) & 0xff,
    1953                         (Guest.ebx >>  0) & 0xff);
     1950                        (Guest.uEbx >> 24) & 0xff,
     1951                        (Guest.uEbx >> 16) & 0xff,
     1952                        (Guest.uEbx >>  8) & 0xff,
     1953                        (Guest.uEbx >>  0) & 0xff);
    19541954        if (iVerbosity == 1)
    19551955        {
    1956             uint32_t uEDX = Guest.edx;
     1956            uint32_t uEDX = Guest.uEdx;
    19571957            pHlp->pfnPrintf(pHlp, "Features EDX:                   ");
    19581958            if (uEDX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " FPU");
     
    19901990            pHlp->pfnPrintf(pHlp, "\n");
    19911991
    1992             uint32_t uECX = Guest.ecx;
     1992            uint32_t uECX = Guest.uEcx;
    19931993            pHlp->pfnPrintf(pHlp, "Features ECX:                   ");
    19941994            if (uECX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " SSE3");
     
    20282028        else
    20292029        {
    2030             ASMCpuIdExSlow(1, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
    2031 
    2032             X86CPUIDFEATEDX EdxHost  = *(PX86CPUIDFEATEDX)&Host.edx;
    2033             X86CPUIDFEATECX EcxHost  = *(PX86CPUIDFEATECX)&Host.ecx;
    2034             X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.edx;
    2035             X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.ecx;
     2030            ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
     2031
     2032            X86CPUIDFEATEDX EdxHost  = *(PX86CPUIDFEATEDX)&Host.uEdx;
     2033            X86CPUIDFEATECX EcxHost  = *(PX86CPUIDFEATECX)&Host.uEcx;
     2034            X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.uEdx;
     2035            X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.uEcx;
    20362036
    20372037            pHlp->pfnPrintf(pHlp, "Mnemonic - Description                 = guest (host)\n");
     
    21122112     * Implemented after AMD specs.
    21132113     */
    2114     unsigned    cExtMax = pVM->cpum.s.aGuestCpuIdPatmExt[0].eax & 0xffff;
     2114    unsigned    cExtMax = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax & 0xffff;
    21152115
    21162116    pHlp->pfnPrintf(pHlp,
     
    21222122    {
    21232123        Guest = pVM->cpum.s.aGuestCpuIdPatmExt[i];
    2124         ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
     2124        ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    21252125
    21262126        if (   i == 7
    2127             && (Host.edx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR))
     2127            && (Host.uEdx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR))
    21282128        {
    21292129            fSupportsInvariantTsc = true;
     
    21322132                        "Gst: %08x  %08x %08x %08x %08x%s\n"
    21332133                        "Hst:           %08x %08x %08x %08x\n",
    2134                         0x80000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
     2134                        0x80000000 | i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx,
    21352135                        i <= cExtMax ? "" : "*",
    2136                         Host.eax, Host.ebx, Host.ecx, Host.edx);
     2136                        Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
    21372137    }
    21382138
     
    21462146                        "Ext Name:                        %.4s%.4s%.4s\n"
    21472147                        "Ext Supports:                    0x80000000-%#010x\n",
    2148                         &Guest.ebx, &Guest.edx, &Guest.ecx, Guest.eax);
     2148                        &Guest.uEbx, &Guest.uEdx, &Guest.uEcx, Guest.uEax);
    21492149    }
    21502150
     
    21522152    {
    21532153        Guest = pVM->cpum.s.aGuestCpuIdPatmExt[1];
    2154         uint32_t uEAX = Guest.eax;
     2154        uint32_t uEAX = Guest.uEax;
    21552155        pHlp->pfnPrintf(pHlp,
    21562156                        "Family:                          %d  \tExtended: %d \tEffective: %d\n"
     
    21612161                        (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel),
    21622162                        ASMGetCpuStepping(uEAX),
    2163                         Guest.ebx & 0xfff);
     2163                        Guest.uEbx & 0xfff);
    21642164
    21652165        if (iVerbosity == 1)
    21662166        {
    2167             uint32_t uEDX = Guest.edx;
     2167            uint32_t uEDX = Guest.uEdx;
    21682168            pHlp->pfnPrintf(pHlp, "Features EDX:                   ");
    21692169            if (uEDX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " FPU");
     
    22012201            pHlp->pfnPrintf(pHlp, "\n");
    22022202
    2203             uint32_t uECX = Guest.ecx;
     2203            uint32_t uECX = Guest.uEcx;
    22042204            pHlp->pfnPrintf(pHlp, "Features ECX:                   ");
    22052205            if (uECX & RT_BIT(0))   pHlp->pfnPrintf(pHlp, " LAHF/SAHF");
     
    22242224        else
    22252225        {
    2226             ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
    2227 
    2228             uint32_t uEdxGst = Guest.edx;
    2229             uint32_t uEdxHst = Host.edx;
     2226            ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
     2227
     2228            uint32_t uEdxGst = Guest.uEdx;
     2229            uint32_t uEdxHst = Host.uEdx;
    22302230            pHlp->pfnPrintf(pHlp, "Mnemonic - Description                 = guest (host)\n");
    22312231            pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip                  = %d (%d)\n",  !!(uEdxGst & RT_BIT( 0)),  !!(uEdxHst & RT_BIT( 0)));
     
    22622262            pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow!                        = %d (%d)\n",  !!(uEdxGst & RT_BIT(31)),  !!(uEdxHst & RT_BIT(31)));
    22632263
    2264             uint32_t uEcxGst = Guest.ecx;
    2265             uint32_t uEcxHst = Host.ecx;
     2264            uint32_t uEcxGst = Guest.uEcx;
     2265            uint32_t uEcxHst = Host.uEcx;
    22662266            pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode    = %d (%d)\n",  !!(uEcxGst & RT_BIT( 0)),  !!(uEcxHst & RT_BIT( 0)));
    22672267            pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n",  !!(uEcxGst & RT_BIT( 1)),  !!(uEcxHst & RT_BIT( 1)));
     
    22862286        char szString[4*4*3+1] = {0};
    22872287        uint32_t *pu32 = (uint32_t *)szString;
    2288         *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].eax;
    2289         *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].ebx;
    2290         *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].ecx;
    2291         *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].edx;
     2288        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEax;
     2289        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEbx;
     2290        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEcx;
     2291        *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEdx;
    22922292        if (cExtMax >= 3)
    22932293        {
    2294             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].eax;
    2295             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].ebx;
    2296             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].ecx;
    2297             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].edx;
     2294            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEax;
     2295            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEbx;
     2296            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEcx;
     2297            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEdx;
    22982298        }
    22992299        if (cExtMax >= 4)
    23002300        {
    2301             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].eax;
    2302             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].ebx;
    2303             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].ecx;
    2304             *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].edx;
     2301            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEax;
     2302            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEbx;
     2303            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEcx;
     2304            *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEdx;
    23052305        }
    23062306        pHlp->pfnPrintf(pHlp, "Full Name:                       %s\n", szString);
     
    23092309    if (iVerbosity && cExtMax >= 5)
    23102310    {
    2311         uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[5].eax;
    2312         uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[5].ebx;
    2313         uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[5].ecx;
    2314         uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[5].edx;
     2311        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEax;
     2312        uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEbx;
     2313        uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEcx;
     2314        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEdx;
    23152315        char sz1[32];
    23162316        char sz2[32];
     
    23472347    if (iVerbosity && cExtMax >= 6)
    23482348    {
    2349         uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[6].eax;
    2350         uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[6].ebx;
    2351         uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[6].edx;
     2349        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEax;
     2350        uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEbx;
     2351        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEdx;
    23522352
    23532353        pHlp->pfnPrintf(pHlp,
     
    23742374    if (iVerbosity && cExtMax >= 7)
    23752375    {
    2376         uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[7].edx;
     2376        uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[7].uEdx;
    23772377
    23782378        pHlp->pfnPrintf(pHlp, "Host Invariant-TSC support:      %RTbool\n", fSupportsInvariantTsc);
     
    23962396    if (iVerbosity && cExtMax >= 8)
    23972397    {
    2398         uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[8].eax;
    2399         uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[8].ecx;
     2398        uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[8].uEax;
     2399        uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[8].uEcx;
    24002400
    24012401        pHlp->pfnPrintf(pHlp,
     
    24202420    RT_ZERO(Host);
    24212421    if (cStdHstMax >= 1)
    2422         ASMCpuIdExSlow(1, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
    2423     bool fHostHvp  = RT_BOOL(Host.ecx & X86_CPUID_FEATURE_ECX_HVP);
     2422        ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
     2423    bool fHostHvp  = RT_BOOL(Host.uEcx & X86_CPUID_FEATURE_ECX_HVP);
    24242424    bool fGuestHvp = false;
    24252425    if (cStdMax >= 1)
    24262426    {
    24272427        Guest     = pVM->cpum.s.aGuestCpuIdPatmStd[1];
    2428         fGuestHvp = RT_BOOL(Guest.ecx & X86_CPUID_FEATURE_ECX_HVP);
     2428        fGuestHvp = RT_BOOL(Guest.uEcx & X86_CPUID_FEATURE_ECX_HVP);
    24292429    }
    24302430
     
    24472447        RT_ZERO(Host);
    24482448        if (fHostHvp)
    2449             ASMCpuIdExSlow(uHyperLeaf, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
     2449            ASMCpuIdExSlow(uHyperLeaf, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    24502450
    24512451        CPUMCPUIDLEAF  GuestLeaf;
    24522452        uint32_t const cHyperGstMax = pHyperLeafGst ? pHyperLeafGst->uEax : 0;
    2453         uint32_t const cHyperHstMax = Host.eax;
     2453        uint32_t const cHyperHstMax = Host.uEax;
    24542454        uint32_t const cHyperMax    = RT_MAX(cHyperHstMax, cHyperGstMax);
    24552455        for (uint32_t i = uHyperLeaf; i <= cHyperMax; i++)
     
    24582458            RT_ZERO(GuestLeaf);
    24592459            if (i <= cHyperHstMax)
    2460                 ASMCpuIdExSlow(i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
     2460                ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    24612461            CPUMR3CpuIdGetLeaf(pVM, &GuestLeaf, i, 0 /* uSubLeaf */);
    24622462            if (!fHostHvp)
     
    24732473                                i, GuestLeaf.uEax, GuestLeaf.uEbx, GuestLeaf.uEcx, GuestLeaf.uEdx,
    24742474                                i <= cHyperGstMax ? "" : "*",
    2475                                 Host.eax, Host.ebx, Host.ecx, Host.edx, i <= cHyperHstMax ? "" : "*");
     2475                                Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx, i <= cHyperHstMax ? "" : "*");
    24762476            }
    24772477        }
     
    24812481     * Centaur.
    24822482     */
    2483     unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdPatmCentaur[0].eax & 0xffff;
     2483    unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdPatmCentaur[0].uEax & 0xffff;
    24842484
    24852485    pHlp->pfnPrintf(pHlp,
     
    24902490    {
    24912491        Guest = pVM->cpum.s.aGuestCpuIdPatmCentaur[i];
    2492         ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
     2492        ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
    24932493
    24942494        pHlp->pfnPrintf(pHlp,
    24952495                        "Gst: %08x  %08x %08x %08x %08x%s\n"
    24962496                        "Hst:           %08x %08x %08x %08x\n",
    2497                         0xc0000000 | i, Guest.eax, Guest.ebx, Guest.ecx, Guest.edx,
     2497                        0xc0000000 | i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx,
    24982498                        i <= cCentaurMax ? "" : "*",
    2499                         Host.eax, Host.ebx, Host.ecx, Host.edx);
     2499                        Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx);
    25002500    }
    25012501
     
    25082508        pHlp->pfnPrintf(pHlp,
    25092509                        "Centaur Supports:                0xc0000000-%#010x\n",
    2510                         Guest.eax);
     2510                        Guest.uEax);
    25112511    }
    25122512
    25132513    if (iVerbosity && cCentaurMax >= 1)
    25142514    {
    2515         ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.eax, &Host.ebx, &Host.ecx, &Host.edx);
    2516         uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdPatmCentaur[1].edx;
    2517         uint32_t uEdxHst = Host.edx;
     2515        ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx);
     2516        uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdPatmCentaur[1].uEdx;
     2517        uint32_t uEdxHst = Host.uEdx;
    25182518
    25192519        if (iVerbosity == 1)
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r54674 r54714  
    541541    if (pLeaf)
    542542    {
    543         pLegacy->eax = pLeaf->uEax;
    544         pLegacy->ebx = pLeaf->uEbx;
    545         pLegacy->ecx = pLeaf->uEcx;
    546         pLegacy->edx = pLeaf->uEdx;
     543        pLegacy->uEax = pLeaf->uEax;
     544        pLegacy->uEbx = pLeaf->uEbx;
     545        pLegacy->uEcx = pLeaf->uEcx;
     546        pLegacy->uEdx = pLeaf->uEdx;
    547547        return true;
    548548    }
     
    11951195     */
    11961196    *penmUnknownMethod = CPUMUKNOWNCPUID_DEFAULTS;
    1197     pDefUnknown->eax = 0;
    1198     pDefUnknown->ebx = 0;
    1199     pDefUnknown->ecx = 0;
    1200     pDefUnknown->edx = 0;
     1197    pDefUnknown->uEax = 0;
     1198    pDefUnknown->uEbx = 0;
     1199    pDefUnknown->uEcx = 0;
     1200    pDefUnknown->uEdx = 0;
    12011201
    12021202    /*
     
    12581258        else
    12591259            *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF;
    1260         pDefUnknown->eax = auLast[0];
    1261         pDefUnknown->ebx = auLast[1];
    1262         pDefUnknown->ecx = auLast[2];
    1263         pDefUnknown->edx = auLast[3];
     1260        pDefUnknown->uEax = auLast[0];
     1261        pDefUnknown->uEbx = auLast[1];
     1262        pDefUnknown->uEcx = auLast[2];
     1263        pDefUnknown->uEdx = auLast[3];
    12641264        return VINF_SUCCESS;
    12651265    }
     
    16911691        int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);
    16921692        if (RT_SUCCESS(rc))
    1693             pLeaf->eax = u32;
     1693            pLeaf->uEax = u32;
    16941694        else
    16951695            AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
     
    16971697        rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);
    16981698        if (RT_SUCCESS(rc))
    1699             pLeaf->ebx = u32;
     1699            pLeaf->uEbx = u32;
    17001700        else
    17011701            AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
     
    17031703        rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);
    17041704        if (RT_SUCCESS(rc))
    1705             pLeaf->ecx = u32;
     1705            pLeaf->uEcx = u32;
    17061706        else
    17071707            AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
     
    17091709        rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);
    17101710        if (RT_SUCCESS(rc))
    1711             pLeaf->edx = u32;
     1711            pLeaf->uEdx = u32;
    17121712        else
    17131713            AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);
     
    17531753    /* Using the ECX variant for all of them can't hurt... */
    17541754    for (uint32_t i = 0; i < cLeaves; i++)
    1755         ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);
     1755        ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].uEax, &paLeaves[i].uEbx, &paLeaves[i].uEcx, &paLeaves[i].uEdx);
    17561756
    17571757    /* Load CPUID leaf override; we currently don't care if the user
     
    18161816            if (pLeaf)
    18171817            {
    1818                 pLegacyLeaf->eax = pLeaf->uEax;
    1819                 pLegacyLeaf->ebx = pLeaf->uEbx;
    1820                 pLegacyLeaf->ecx = pLeaf->uEcx;
    1821                 pLegacyLeaf->edx = pLeaf->uEdx;
     1818                pLegacyLeaf->uEax = pLeaf->uEax;
     1819                pLegacyLeaf->uEbx = pLeaf->uEbx;
     1820                pLegacyLeaf->uEcx = pLeaf->uEcx;
     1821                pLegacyLeaf->uEdx = pLeaf->uEdx;
    18221822            }
    18231823            else
     
    26032603    CPUMCPUID   aRawStd[16];
    26042604    for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)
    2605         ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
     2605        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
    26062606    SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));
    26072607    SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));
     
    26092609    CPUMCPUID   aRawExt[32];
    26102610    for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)
    2611         ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
     2611        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
    26122612    SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
    26132613    SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
     
    26342634                NewLeaf.uSubLeaf        = 0;
    26352635                NewLeaf.fSubLeafMask    = 0;
    2636                 NewLeaf.uEax            = CpuId.eax;
    2637                 NewLeaf.uEbx            = CpuId.ebx;
    2638                 NewLeaf.uEcx            = CpuId.ecx;
    2639                 NewLeaf.uEdx            = CpuId.edx;
     2636                NewLeaf.uEax            = CpuId.uEax;
     2637                NewLeaf.uEbx            = CpuId.uEbx;
     2638                NewLeaf.uEcx            = CpuId.uEcx;
     2639                NewLeaf.uEdx            = CpuId.uEdx;
    26402640                NewLeaf.fFlags          = 0;
    26412641                rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);
     
    28732873    AssertRCReturn(rc, rc);
    28742874    for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
    2875         ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
     2875        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx);
    28762876
    28772877    CPUMCPUID   aRawExt[32];
     
    28832883    AssertRCReturn(rc, rc);
    28842884    for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
    2885         ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
     2885        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx);
    28862886
    28872887    /*
     
    28902890    CPUMCPUID   aHostRawStd[16];
    28912891    for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)
    2892         ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);
     2892        ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].uEax, &aHostRawStd[i].uEbx, &aHostRawStd[i].uEcx, &aHostRawStd[i].uEdx);
    28932893
    28942894    CPUMCPUID   aHostRawExt[32];
    28952895    for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)
    28962896        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,
    2897                        &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);
     2897                       &aHostRawExt[i].uEax, &aHostRawExt[i].uEbx, &aHostRawExt[i].uEcx, &aHostRawExt[i].uEdx);
    28982898
    28992899    /*
     
    29262926    {
    29272927        /* CPUID(0) */
    2928         CPUID_CHECK_RET(   aHostRawStd[0].ebx == aRawStd[0].ebx
    2929                         && aHostRawStd[0].ecx == aRawStd[0].ecx
    2930                         && aHostRawStd[0].edx == aRawStd[0].edx,
     2928        CPUID_CHECK_RET(   aHostRawStd[0].uEbx == aRawStd[0].uEbx
     2929                        && aHostRawStd[0].uEcx == aRawStd[0].uEcx
     2930                        && aHostRawStd[0].uEdx == aRawStd[0].uEdx,
    29312931                        (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
    2932                          &aHostRawStd[0].ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx,
    2933                          &aRawStd[0].ebx, &aRawStd[0].edx, &aRawStd[0].ecx));
    2934         CPUID_CHECK2_WRN("Std CPUID max leaf",   aHostRawStd[0].eax, aRawStd[0].eax);
    2935         CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3);
    2936         CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].eax >> 28,       aRawExt[1].eax >> 28);
    2937 
    2938         bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].ebx, aRawStd[0].ecx, aRawStd[0].edx);
     2932                         &aHostRawStd[0].uEbx, &aHostRawStd[0].uEdx, &aHostRawStd[0].uEcx,
     2933                         &aRawStd[0].uEbx, &aRawStd[0].uEdx, &aRawStd[0].uEcx));
     2934        CPUID_CHECK2_WRN("Std CPUID max leaf",   aHostRawStd[0].uEax, aRawStd[0].uEax);
     2935        CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].uEax >> 14) & 3, (aRawExt[1].uEax >> 14) & 3);
     2936        CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].uEax >> 28,       aRawExt[1].uEax >> 28);
     2937
     2938        bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].uEbx, aRawStd[0].uEcx, aRawStd[0].uEdx);
    29392939
    29402940        /* CPUID(1).eax */
    2941         CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawStd[1].eax),        ASMGetCpuFamily(aRawStd[1].eax));
    2942         CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawStd[1].eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel));
    2943         CPUID_CHECK2_WRN("CPU type",            (aHostRawStd[1].eax >> 12) & 3,             (aRawStd[1].eax >> 12) & 3 );
     2941        CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawStd[1].uEax),        ASMGetCpuFamily(aRawStd[1].uEax));
     2942        CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawStd[1].uEax, fIntel), ASMGetCpuModel(aRawStd[1].uEax, fIntel));
     2943        CPUID_CHECK2_WRN("CPU type",            (aHostRawStd[1].uEax >> 12) & 3,             (aRawStd[1].uEax >> 12) & 3 );
    29442944
    29452945        /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */
    2946         CPUID_CHECK2_RET("CPU brand ID",         aHostRawStd[1].ebx & 0xff,                 aRawStd[1].ebx & 0xff);
    2947         CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].ebx >> 8) & 0xff,           (aRawStd[1].ebx >> 8) & 0xff);
     2946        CPUID_CHECK2_RET("CPU brand ID",         aHostRawStd[1].uEbx & 0xff,                 aRawStd[1].uEbx & 0xff);
     2947        CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].uEbx >> 8) & 0xff,           (aRawStd[1].uEbx >> 8) & 0xff);
    29482948
    29492949        /* CPUID(1).ecx */
    2950         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);
    2951         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);
    2952         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);
    2953         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
    2954         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);
    2955         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);
    2956         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);
    2957         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST);
    2958         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);
    2959         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);
    2960         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);
    2961         CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
    2962         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);
    2963         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);
    2964         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);
    2965         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);
    2966         CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
    2967         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PCID);
    2968         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);
    2969         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);
    2970         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);
    2971         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
    2972         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);
    2973         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);
    2974         CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TSCDEADL);
    2975         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);
    2976         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);
    2977         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE);
    2978         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);
    2979         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_F16C);
    2980         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_RDRAND);
    2981         CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);
     2950        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3);
     2951        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL);
     2952        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64);
     2953        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
     2954        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS);
     2955        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX);
     2956        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX);
     2957        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_EST);
     2958        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2);
     2959        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3);
     2960        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID);
     2961        CPUID_RAW_FEATURE_RET(Std, uEcx, RT_BIT_32(11) /*reserved*/ );
     2962        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA);
     2963        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16);
     2964        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);
     2965        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM);
     2966        CPUID_RAW_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
     2967        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
     2968        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA);
     2969        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1);
     2970        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2);
     2971        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
     2972        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE);
     2973        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT);
     2974        CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
     2975        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES);
     2976        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE);
     2977        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE);
     2978        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX);
     2979        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
     2980        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
     2981        CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP);
    29822982
    29832983        /* CPUID(1).edx */
    2984         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
    2985         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
    2986         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);
    2987         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
    2988         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);
    2989         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);
    2990         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
    2991         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
    2992         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);
    2993         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
    2994         CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
    2995         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
    2996         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
    2997         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
    2998         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
    2999         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);
    3000         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
    3001         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
    3002         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
    3003         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);
    3004         CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
    3005         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS);
    3006         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);
    3007         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);
    3008         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);
    3009         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);
    3010         CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);
    3011         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS);
    3012         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT);
    3013         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM);
    3014         CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);
    3015         CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE);
     2984        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
     2985        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
     2986        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE);
     2987        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
     2988        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC);
     2989        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR);
     2990        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
     2991        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
     2992        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8);
     2993        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
     2994        CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
     2995        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
     2996        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
     2997        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
     2998        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
     2999        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV);
     3000        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
     3001        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
     3002        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
     3003        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH);
     3004        CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
     3005        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_DS);
     3006        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI);
     3007        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX);
     3008        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR);
     3009        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE);
     3010        CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2);
     3011        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SS);
     3012        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT);
     3013        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_TM);
     3014        CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/);
     3015        CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE);
    30163016
    30173017        /* CPUID(2) - config, mostly about caches. ignore. */
     
    30273027
    30283028        /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */
    3029         CPUID_CHECK_WRN(   aRawStd[0].eax     <  UINT32_C(0x0000000d)
    3030                         || aHostRawStd[0].eax >= UINT32_C(0x0000000d),
     3029        CPUID_CHECK_WRN(   aRawStd[0].uEax     <  UINT32_C(0x0000000d)
     3030                        || aHostRawStd[0].uEax >= UINT32_C(0x0000000d),
    30313031                        ("CPUM: Standard leaf D was present on saved state host, not present on current.\n"));
    3032         if (   aRawStd[0].eax     >= UINT32_C(0x0000000d)
    3033             && aHostRawStd[0].eax >= UINT32_C(0x0000000d))
     3032        if (   aRawStd[0].uEax     >= UINT32_C(0x0000000d)
     3033            && aHostRawStd[0].uEax >= UINT32_C(0x0000000d))
    30343034        {
    3035             CPUID_CHECK2_WRN("Valid low XCR0 bits",             aHostRawStd[0xd].eax, aRawStd[0xd].eax);
    3036             CPUID_CHECK2_WRN("Valid high XCR0 bits",            aHostRawStd[0xd].edx, aRawStd[0xd].edx);
    3037             CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size",  aHostRawStd[0xd].ebx, aRawStd[0xd].ebx);
    3038             CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size",      aHostRawStd[0xd].ecx, aRawStd[0xd].ecx);
     3035            CPUID_CHECK2_WRN("Valid low XCR0 bits",             aHostRawStd[0xd].uEax, aRawStd[0xd].uEax);
     3036            CPUID_CHECK2_WRN("Valid high XCR0 bits",            aHostRawStd[0xd].uEdx, aRawStd[0xd].uEdx);
     3037            CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size",  aHostRawStd[0xd].uEbx, aRawStd[0xd].uEbx);
     3038            CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size",      aHostRawStd[0xd].uEcx, aRawStd[0xd].uEcx);
    30393039        }
    30403040
     
    30423042           Note! Intel have/is marking many of the fields here as reserved. We
    30433043                 will verify them as if it's an AMD CPU. */
    3044         CPUID_CHECK_RET(   (aHostRawExt[0].eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f))
    3045                         || !(aRawExt[0].eax    >= UINT32_C(0x80000001) && aRawExt[0].eax     <= UINT32_C(0x8000007f)),
     3044        CPUID_CHECK_RET(   (aHostRawExt[0].uEax >= UINT32_C(0x80000001) && aHostRawExt[0].uEax <= UINT32_C(0x8000007f))
     3045                        || !(aRawExt[0].uEax    >= UINT32_C(0x80000001) && aRawExt[0].uEax     <= UINT32_C(0x8000007f)),
    30463046                        (N_("Extended leaves was present on saved state host, but is missing on the current\n")));
    3047         if (aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax     <= UINT32_C(0x8000007f))
     3047        if (aRawExt[0].uEax >= UINT32_C(0x80000001) && aRawExt[0].uEax     <= UINT32_C(0x8000007f))
    30483048        {
    3049             CPUID_CHECK_RET(   aHostRawExt[0].ebx == aRawExt[0].ebx
    3050                             && aHostRawExt[0].ecx == aRawExt[0].ecx
    3051                             && aHostRawExt[0].edx == aRawExt[0].edx,
     3049            CPUID_CHECK_RET(   aHostRawExt[0].uEbx == aRawExt[0].uEbx
     3050                            && aHostRawExt[0].uEcx == aRawExt[0].uEcx
     3051                            && aHostRawExt[0].uEdx == aRawExt[0].uEdx,
    30523052                            (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),
    3053                              &aHostRawExt[0].ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx,
    3054                              &aRawExt[0].ebx,     &aRawExt[0].edx,     &aRawExt[0].ecx));
    3055             CPUID_CHECK2_WRN("Ext CPUID max leaf",   aHostRawExt[0].eax, aRawExt[0].eax);
     3053                             &aHostRawExt[0].uEbx, &aHostRawExt[0].uEdx, &aHostRawExt[0].uEcx,
     3054                             &aRawExt[0].uEbx,     &aRawExt[0].uEdx,     &aRawExt[0].uEcx));
     3055            CPUID_CHECK2_WRN("Ext CPUID max leaf",   aHostRawExt[0].uEax, aRawExt[0].uEax);
    30563056
    30573057            /* CPUID(0x80000001).eax - same as CPUID(0).eax. */
    3058             CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawExt[1].eax),        ASMGetCpuFamily(aRawExt[1].eax));
    3059             CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawExt[1].eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel));
    3060             CPUID_CHECK2_WRN("CPU type",            (aHostRawExt[1].eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 );
    3061             CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 );
    3062             CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);
     3058            CPUID_CHECK2_RET("CPU family",          ASMGetCpuFamily(aHostRawExt[1].uEax),        ASMGetCpuFamily(aRawExt[1].uEax));
     3059            CPUID_CHECK2_RET("CPU model",           ASMGetCpuModel(aHostRawExt[1].uEax, fIntel), ASMGetCpuModel(aRawExt[1].uEax, fIntel));
     3060            CPUID_CHECK2_WRN("CPU type",            (aHostRawExt[1].uEax >> 12) & 3, (aRawExt[1].uEax >> 12) & 3 );
     3061            CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].uEax >> 14) & 3, (aRawExt[1].uEax >> 14) & 3 );
     3062            CPUID_CHECK2_WRN("Reserved bits 31:28",  aHostRawExt[1].uEax >> 28, aRawExt[1].uEax >> 28);
    30633063
    30643064            /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */
    3065             CPUID_CHECK2_WRN("CPU BrandID",          aHostRawExt[1].ebx & 0xffff, aRawExt[1].ebx & 0xffff);
    3066             CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff);
    3067             CPUID_CHECK2_WRN("PkgType",             (aHostRawExt[1].ebx >> 28) &   0xf, (aRawExt[1].ebx >> 28) &   0xf);
     3065            CPUID_CHECK2_WRN("CPU BrandID",          aHostRawExt[1].uEbx & 0xffff, aRawExt[1].uEbx & 0xffff);
     3066            CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].uEbx >> 16) & 0xfff, (aRawExt[1].uEbx >> 16) & 0xfff);
     3067            CPUID_CHECK2_WRN("PkgType",             (aHostRawExt[1].uEbx >> 28) &   0xf, (aRawExt[1].uEbx >> 28) &   0xf);
    30683068
    30693069            /* CPUID(0x80000001).ecx */
    3070             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
    3071             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
    3072             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);
    3073             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);
    3074             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);
    3075             CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);
    3076             CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);
    3077             CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);
    3078             CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);
    3079             CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);
    3080             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);
    3081             CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);
    3082             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);
    3083             CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);
    3084             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
    3085             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
    3086             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
    3087             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
    3088             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
    3089             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
    3090             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
    3091             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
    3092             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
    3093             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
    3094             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
    3095             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
    3096             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
    3097             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
    3098             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
    3099             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
    3100             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
    3101             CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
     3070            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
     3071            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL);
     3072            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM);
     3073            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);
     3074            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L);
     3075            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM);
     3076            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);
     3077            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);
     3078            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);
     3079            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW);
     3080            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS);
     3081            CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE5);
     3082            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);
     3083            CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT);
     3084            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
     3085            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
     3086            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
     3087            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
     3088            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
     3089            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
     3090            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
     3091            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
     3092            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
     3093            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
     3094            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
     3095            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
     3096            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
     3097            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
     3098            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
     3099            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
     3100            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
     3101            CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
    31023102
    31033103            /* CPUID(0x80000001).edx */
    3104             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU);
    3105             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME);
    3106             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE);
    3107             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE);
    3108             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC);
    3109             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR);
    3110             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE);
    3111             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE);
    3112             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8);
    3113             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);
    3114             CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);
    3115             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP);
    3116             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
    3117             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);
    3118             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA);
    3119             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV);
    3120             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT);
    3121             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36);
    3122             CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);
    3123             CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);
    3124             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
    3125             CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);
    3126             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
    3127             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX);
    3128             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
    3129             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
    3130             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
    3131             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    3132             CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);
    3133             CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
    3134             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
    3135             CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     3104            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU);
     3105            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_VME);
     3106            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_DE);
     3107            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE);
     3108            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC);
     3109            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR);
     3110            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE);
     3111            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE);
     3112            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8);
     3113            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC);
     3114            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
     3115            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SEP);
     3116            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR);
     3117            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE);
     3118            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA);
     3119            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV);
     3120            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT);
     3121            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36);
     3122            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
     3123            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
     3124            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
     3125            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(21) /*reserved*/);
     3126            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
     3127            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX);
     3128            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR);
     3129            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
     3130            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
     3131            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     3132            CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(28) /*reserved*/);
     3133            CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
     3134            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
     3135            CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
    31363136
    31373137            /** @todo verify the rest as well. */
     
    31583158
    31593159    /* CPUID(1).ecx */
    3160     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);    // -> EMU
    3161     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);  // -> EMU?
    3162     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);  // -> EMU?
    3163     CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);
    3164     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);   // -> EMU?
    3165     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);     // -> EMU
    3166     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);     // -> EMU
    3167     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST);     // -> EMU
    3168     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);     // -> EMU?
    3169     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);   // -> EMU
    3170     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);  // -> EMU
    3171     CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );
    3172     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);     // -> EMU? what's this?
    3173     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);    // -> EMU?
    3174     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
    3175     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);    // -> EMU
    3176     CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);
    3177     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCID);
    3178     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);     // -> EMU?
    3179     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);  // -> EMU
    3180     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);  // -> EMU
    3181     CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);
    3182     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);   // -> EMU
    3183     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);  // -> EMU
    3184     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TSCDEADL);
    3185     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);     // -> EMU
    3186     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);   // -> EMU
    3187     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
    3188     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);     // -> EMU?
    3189     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_F16C);
    3190     CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_RDRAND);
    3191     CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);     // Normally not set by host
     3160    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3);    // -> EMU
     3161    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL);  // -> EMU?
     3162    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64);  // -> EMU?
     3163    CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR);
     3164    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS);   // -> EMU?
     3165    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX);     // -> EMU
     3166    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX);     // -> EMU
     3167    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_EST);     // -> EMU
     3168    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2);     // -> EMU?
     3169    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3);   // -> EMU
     3170    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID);  // -> EMU
     3171    CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(11) /*reserved*/ );
     3172    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA);     // -> EMU? what's this?
     3173    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16);    // -> EMU?
     3174    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU
     3175    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM);    // -> EMU
     3176    CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/);
     3177    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID);
     3178    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA);     // -> EMU?
     3179    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1);  // -> EMU
     3180    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2);  // -> EMU
     3181    CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC);
     3182    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE);   // -> EMU
     3183    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT);  // -> EMU
     3184    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL);
     3185    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES);     // -> EMU
     3186    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE);   // -> EMU
     3187    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU
     3188    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX);     // -> EMU?
     3189    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C);
     3190    CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND);
     3191    CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP);     // Normally not set by host
    31923192
    31933193    /* CPUID(1).edx */
    3194     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);
    3195     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);
    3196     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);      // -> EMU?
    3197     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);
    3198     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
    3199     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
    3200     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE);
    3201     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);
    3202     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
    3203     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);
    3204     CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);
    3205     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);
    3206     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);
    3207     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);
    3208     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);
    3209     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
    3210     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);
    3211     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);
    3212     CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);
    3213     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);   // -> EMU
    3214     CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);
    3215     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS);      // -> EMU?
    3216     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);    // -> EMU?
    3217     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
    3218     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
    3219     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);     // -> EMU
    3220     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);    // -> EMU
    3221     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS);      // -> EMU?
    3222     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT);     // -> EMU?
    3223     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM);      // -> EMU?
    3224     CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);   // -> EMU
    3225     CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE);     // -> EMU?
     3194    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU);
     3195    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME);
     3196    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE);      // -> EMU?
     3197    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE);
     3198    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
     3199    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
     3200    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE);
     3201    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE);
     3202    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
     3203    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC);
     3204    CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/);
     3205    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP);
     3206    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR);
     3207    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE);
     3208    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA);
     3209    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
     3210    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT);
     3211    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36);
     3212    CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN);
     3213    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH);   // -> EMU
     3214    CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/);
     3215    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DS);      // -> EMU?
     3216    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI);    // -> EMU?
     3217    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
     3218    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
     3219    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE);     // -> EMU
     3220    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2);    // -> EMU
     3221    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SS);      // -> EMU?
     3222    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT);     // -> EMU?
     3223    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TM);      // -> EMU?
     3224    CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/);   // -> EMU
     3225    CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE);     // -> EMU?
    32263226
    32273227    /* CPUID(0x80000000). */
     
    32313231    {
    32323232        /** @todo deal with no 0x80000001 on the host. */
    3233         bool const fHostAmd  = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);
    3234         bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx);
     3233        bool const fHostAmd  = ASMIsAmdCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx);
     3234        bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx);
    32353235
    32363236        /* CPUID(0x80000001).ecx */
    3237         CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);   // -> EMU
    3238         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);    // -> EMU
    3239         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);     // -> EMU
    3240         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
    3241         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);    // -> EMU
    3242         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);     // -> EMU
    3243         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);   // -> EMU
    3244         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
    3245         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
    3246         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);    // -> EMU?
    3247         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);     // -> EMU
    3248         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);    // -> EMU
    3249         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);  // -> EMU
    3250         CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);     // -> EMU
    3251         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));
    3252         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));
    3253         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));
    3254         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));
    3255         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));
    3256         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));
    3257         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));
    3258         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));
    3259         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));
    3260         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));
    3261         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));
    3262         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));
    3263         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));
    3264         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));
    3265         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));
    3266         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));
    3267         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));
    3268         CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));
     3237        CPUID_GST_FEATURE_WRN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);   // -> EMU
     3238        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL);    // -> EMU
     3239        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM);     // -> EMU
     3240        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???
     3241        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L);    // -> EMU
     3242        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM);     // -> EMU
     3243        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);   // -> EMU
     3244        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU
     3245        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU
     3246        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW);    // -> EMU?
     3247        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS);     // -> EMU
     3248        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE5);    // -> EMU
     3249        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);  // -> EMU
     3250        CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT);     // -> EMU
     3251        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14));
     3252        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15));
     3253        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16));
     3254        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17));
     3255        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18));
     3256        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19));
     3257        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20));
     3258        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21));
     3259        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22));
     3260        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23));
     3261        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24));
     3262        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25));
     3263        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26));
     3264        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27));
     3265        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28));
     3266        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29));
     3267        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30));
     3268        CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31));
    32693269
    32703270        /* CPUID(0x80000001).edx */
    3271         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_FPU,   X86_CPUID_FEATURE_EDX_FPU);     // -> EMU
    3272         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_VME,   X86_CPUID_FEATURE_EDX_VME);     // -> EMU
    3273         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_DE,    X86_CPUID_FEATURE_EDX_DE);      // -> EMU
    3274         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PSE,   X86_CPUID_FEATURE_EDX_PSE);
    3275         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_TSC,   X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
    3276         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_MSR,   X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
    3277         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_PAE,   X86_CPUID_FEATURE_EDX_PAE);
    3278         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_MCE,   X86_CPUID_FEATURE_EDX_MCE);
    3279         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_CX8,   X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
    3280         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_APIC,  X86_CPUID_FEATURE_EDX_APIC);
    3281         CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);
    3282         CPUID_GST_FEATURE_IGN(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL);                              // On Intel: long mode only.
    3283         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_MTRR,  X86_CPUID_FEATURE_EDX_MTRR);
    3284         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PGE,   X86_CPUID_FEATURE_EDX_PGE);
    3285         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_MCA,   X86_CPUID_FEATURE_EDX_MCA);
    3286         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_CMOV,  X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
    3287         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PAT,   X86_CPUID_FEATURE_EDX_PAT);
    3288         CPUID_GST_FEATURE2_IGN(        edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
    3289         CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);
    3290         CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);
    3291         CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);
    3292         CPUID_GST_FEATURE_WRN(    Ext, edx, RT_BIT_32(21) /*reserved*/);
    3293         CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
    3294         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_MMX,   X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
    3295         CPUID_GST_FEATURE2_RET(        edx, X86_CPUID_AMD_FEATURE_EDX_FXSR,  X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
    3296         CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
    3297         CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
    3298         CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    3299         CPUID_GST_FEATURE_IGN(    Ext, edx, RT_BIT_32(28) /*reserved*/);
    3300         CPUID_GST_FEATURE_RET(    Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
    3301         CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
    3302         CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     3271        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU,   X86_CPUID_FEATURE_EDX_FPU);     // -> EMU
     3272        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_VME,   X86_CPUID_FEATURE_EDX_VME);     // -> EMU
     3273        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_DE,    X86_CPUID_FEATURE_EDX_DE);      // -> EMU
     3274        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE,   X86_CPUID_FEATURE_EDX_PSE);
     3275        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC,   X86_CPUID_FEATURE_EDX_TSC);     // -> EMU
     3276        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR,   X86_CPUID_FEATURE_EDX_MSR);     // -> EMU
     3277        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE,   X86_CPUID_FEATURE_EDX_PAE);
     3278        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE,   X86_CPUID_FEATURE_EDX_MCE);
     3279        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8,   X86_CPUID_FEATURE_EDX_CX8);     // -> EMU?
     3280        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC,  X86_CPUID_FEATURE_EDX_APIC);
     3281        CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(10) /*reserved*/);
     3282        CPUID_GST_FEATURE_IGN(    Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL);                              // On Intel: long mode only.
     3283        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR,  X86_CPUID_FEATURE_EDX_MTRR);
     3284        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE,   X86_CPUID_FEATURE_EDX_PGE);
     3285        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA,   X86_CPUID_FEATURE_EDX_MCA);
     3286        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV,  X86_CPUID_FEATURE_EDX_CMOV);    // -> EMU
     3287        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT,   X86_CPUID_FEATURE_EDX_PAT);
     3288        CPUID_GST_FEATURE2_IGN(        uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);
     3289        CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(18) /*reserved*/);
     3290        CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(19) /*reserved*/);
     3291        CPUID_GST_FEATURE_RET(    Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX);
     3292        CPUID_GST_FEATURE_WRN(    Ext, uEdx, RT_BIT_32(21) /*reserved*/);
     3293        CPUID_GST_FEATURE_RET(    Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);
     3294        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX,   X86_CPUID_FEATURE_EDX_MMX);     // -> EMU
     3295        CPUID_GST_FEATURE2_RET(        uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR,  X86_CPUID_FEATURE_EDX_FXSR);    // -> EMU
     3296        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);
     3297        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);
     3298        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     3299        CPUID_GST_FEATURE_IGN(    Ext, uEdx, RT_BIT_32(28) /*reserved*/);
     3300        CPUID_GST_FEATURE_RET(    Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
     3301        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
     3302        CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);
    33033303    }
    33043304
     
    33463346
    33473347/**
     3348 * Gets a pointer to the default CPUID leaf.
     3349 *
     3350 * @returns Raw-mode pointer to the default CPUID leaf (read-only).
     3351 * @param   pVM         Pointer to the VM.
     3352 * @remark  Intended for PATM only.
     3353 */
     3354VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM)
     3355{
     3356    return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdPatmDef);
     3357}
     3358
     3359
     3360/**
     3361 * Gets a pointer to the CPUID leaf array.
     3362 *
     3363 * @returns Raw-mode pointer to the CPUID leaf array.
     3364 * @param   pVM         Pointer to the VM.
     3365 * @remark  Intended for PATM only.
     3366 */
     3367VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM)
     3368{
     3369    Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
     3370    return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC;
     3371}
     3372
     3373
     3374/**
     3375 * Gets a pointer to the CPUID leaf array.
     3376 *
     3377 * @returns Raw-mode pointer to the end of CPUID leaf array (exclusive).
     3378 * @param   pVM         Pointer to the VM.
     3379 * @remark  Intended for PATM only.
     3380 */
     3381VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM)
     3382{
     3383    Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
     3384    return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC
     3385         + pVM->cpum.s.GuestInfo.cCpuIdLeaves * sizeof(CPUMCPUIDLEAF);
     3386}
     3387
     3388
     3389/**
     3390 * Gets the unknown CPUID leaf method.
     3391 *
     3392 * @returns Unknown CPUID leaf method.
     3393 * @param   pVM         Pointer to the VM.
     3394 * @remark  Intended for PATM only.
     3395 */
     3396VMMR3_INT_DECL(CPUMUKNOWNCPUID) CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM)
     3397{
     3398    return pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod;
     3399}
     3400
     3401
     3402
     3403/**
    33483404 * Gets a number of standard CPUID leafs (PATM only).
    33493405 *
    33503406 * @returns Number of leafs.
    33513407 * @param   pVM         Pointer to the VM.
    3352  * @remark  Intended for PATM.
     3408 * @remark  Intended for PATM - legacy, don't use in new code.
    33533409 */
    33543410VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM)
     
    33633419 * @returns Number of leafs.
    33643420 * @param   pVM         Pointer to the VM.
    3365  * @remark  Intended for PATM.
     3421 * @remark  Intended for PATM - legacy, don't use in new code.
    33663422 */
    33673423VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM)
     
    33763432 * @returns Number of leafs.
    33773433 * @param   pVM         Pointer to the VM.
    3378  * @remark  Intended for PATM.
     3434 * @remark  Intended for PATM - legacy, don't use in new code.
    33793435 */
    33803436VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM)
     
    33893445 * CPUMR3GetGuestCpuIdStdMax() give the size of the array.
    33903446 *
    3391  * @returns Pointer to the standard CPUID leaves (read-only).
     3447 * @returns Raw-mode pointer to the standard CPUID leaves (read-only).
    33923448 * @param   pVM         Pointer to the VM.
    3393  * @remark  Intended for PATM.
     3449 * @remark  Intended for PATM - legacy, don't use in new code.
    33943450 */
    33953451VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM)
     
    34043460 * CPUMGetGuestCpuIdExtMax() give the size of the array.
    34053461 *
    3406  * @returns Pointer to the extended CPUID leaves (read-only).
     3462 * @returns Raw-mode pointer to the extended CPUID leaves (read-only).
    34073463 * @param   pVM         Pointer to the VM.
    3408  * @remark  Intended for PATM.
     3464 * @remark  Intended for PATM - legacy, don't use in new code.
    34093465 */
    34103466VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM)
     
    34193475 * CPUMGetGuestCpuIdCentaurMax() give the size of the array.
    34203476 *
    3421  * @returns Pointer to the centaur CPUID leaves (read-only).
     3477 * @returns Raw-mode pointer to the centaur CPUID leaves (read-only).
    34223478 * @param   pVM         Pointer to the VM.
    3423  * @remark  Intended for PATM.
     3479 * @remark  Intended for PATM - legacy, don't use in new code.
    34243480 */
    34253481VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM)
    34263482{
    34273483    return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0]);
    3428 }
    3429 
    3430 
    3431 /**
    3432  * Gets a pointer to the default CPUID leaf.
    3433  *
    3434  * @returns Pointer to the default CPUID leaf (read-only).
    3435  * @param   pVM         Pointer to the VM.
    3436  * @remark  Intended for PATM.
    3437  */
    3438 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM)
    3439 {
    3440     return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdPatmDef);
    34413484}
    34423485
  • trunk/src/VBox/VMM/VMMR3/PATM.cpp

    r54687 r54714  
    748748
    749749    /*
    750      * Apply fixups
     750     * Apply fixups.
    751751     */
    752     PRELOCREC pRec = 0;
    753     AVLPVKEY  key  = 0;
    754 
    755     while (true)
    756     {
    757         /* Get the record that's closest from above */
    758         pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
    759         if (pRec == 0)
     752    AVLPVKEY key = NULL;
     753    for (;;)
     754    {
     755        /* Get the record that's closest from above (after or equal to key). */
     756        PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
     757        if (!pRec)
    760758            break;
    761759
    762         key = (AVLPVKEY)(pRec->pRelocPos + 1);   /* search for the next record during the next round. */
     760        key = (uint8_t *)pRec->Core.Key + 1;   /* search for the next record during the next round. */
    763761
    764762        switch (pRec->uType)
    765763        {
     764        case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
     765            Assert(pRec->pDest == pRec->pSource);
     766            Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
     767            *(RTRCUINTPTR *)pRec->pRelocPos += delta;
     768            break;
     769
    766770        case FIXUP_ABSOLUTE:
    767771            Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
     
    26482652            if (fAddFixup)
    26492653            {
    2650                 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
     2654                if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
     2655                                        pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
    26512656                {
    26522657                    Log(("Relocation failed for the jump in the guest code!!\n"));
     
    26642669            if (fAddFixup)
    26652670            {
    2666                 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
     2671                if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump,
     2672                                        pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
    26672673                {
    26682674                    Log(("Relocation failed for the jump in the guest code!!\n"));
     
    26892695        if (fAddFixup)
    26902696        {
    2691             if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
     2697            if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32,
     2698                                    PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
    26922699            {
    26932700                Log(("Relocation failed for the jump in the guest code!!\n"));
     
    27832790    if (fAddFixup)
    27842791    {
    2785         if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
     2792        if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH,
     2793                                pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
    27862794        {
    27872795            Log(("Relocation failed for the jump in the guest code!!\n"));
  • trunk/src/VBox/VMM/VMMR3/PATMA.asm

    r54692 r54714  
    3232%include "VBox/err.mac"
    3333%include "iprt/x86.mac"
     34%include "VBox/vmm/cpum.mac"
    3435%include "VBox/vmm/vm.mac"
    3536%include "PATMA.mac"
     
    17231724;
    17241725BEGIN_PATCH g_patmCpuidRecord, PATMCpuidReplacement
     1726    not     dword [esp-16]              ; probe stack before starting, just in case.
     1727    not     dword [esp-16]
    17251728    mov     dword [ss:PATM_INTERRUPTFLAG], 0
    17261729PATCH_FIXUP PATM_INTERRUPTFLAG
    17271730    pushf
    17281731
    1729     cmp     eax, PATM_CPUID_STD_MAX
    1730 PATCH_FIXUP PATM_CPUID_STD_MAX
    1731     jb      cpuid_std
    1732     cmp     eax, 0x80000000
    1733     jb      cpuid_def
    1734     cmp     eax, PATM_CPUID_EXT_MAX
    1735 PATCH_FIXUP PATM_CPUID_EXT_MAX
    1736     jb      cpuid_ext
    1737     cmp     eax, 0xc0000000
    1738     jb      cpuid_def
    1739     cmp     eax, PATM_CPUID_CENTAUR_MAX
    1740 PATCH_FIXUP PATM_CPUID_CENTAUR_MAX
    1741     jb      cpuid_centaur
    1742 
    1743 cpuid_def:
    1744     mov     eax, PATM_CPUID_DEF_PTR
     1732;; @todo We could put all this stuff in a CPUM assembly function can simply call it.
     1733
     1734    ; Save the registers we use for passthru and sub-leaf matching (eax is not used).
     1735    push    edx
     1736    push    ecx
     1737    push    ebx
     1738
     1739    ;
     1740    ; Perform a linear search of the strictly sorted CPUID leaf array.
     1741    ;
     1742    ; (Was going to do a binary search, but that ended up being complicated if
     1743    ; we want a flexible leaf size. Linear search is probably good enough.)
     1744    ;
     1745    mov     ebx, PATM_CPUID_ARRAY_PTR
     1746PATCH_FIXUP PATM_CPUID_ARRAY_PTR
     1747    mov     edx, PATM_CPUID_ARRAY_END_PTR
     1748PATCH_FIXUP PATM_CPUID_ARRAY_END_PTR
     1749    cmp     ebx, edx
     1750    jae     cpuid_unknown
     1751
     1752cpuid_lookup_leaf:
     1753    cmp     eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf]
     1754    jbe     cpuid_maybe_match_eax
     1755    add     ebx, PATM_CPUID_ARRAY_ENTRY_SIZE
     1756PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE
     1757    cmp     ebx, edx
     1758    jb      cpuid_lookup_leaf
     1759    jmp     cpuid_unknown
     1760
     1761cpuid_maybe_match_eax:   
     1762    jne     cpuid_unknown
     1763
     1764    ; Sub-leaf match too?
     1765    mov     ecx, [esp + 4]
     1766    and     ecx, [ss:ebx + CPUMCPUIDLEAF.fSubLeafMask]
     1767    cmp     ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf]
     1768    je      cpuid_fetch
     1769
     1770    ; Search forward until we've got a matching sub-leaf (or not).
     1771cpuid_subleaf_lookup:
     1772    add     ebx, PATM_CPUID_ARRAY_ENTRY_SIZE
     1773PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE
     1774    cmp     ebx, edx
     1775    jae     cpuid_subleaf_not_found_sub_ebx
     1776    cmp     eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf]   
     1777    jne     cpuid_subleaf_not_found_sub_ebx
     1778    cmp     ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf]   
     1779    ja      cpuid_subleaf_lookup
     1780    je      cpuid_fetch
     1781cpuid_subleaf_not_found_sub_ebx:
     1782    sub     ebx, PATM_CPUID_ARRAY_ENTRY_SIZE
     1783PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE
     1784   
     1785    ;
     1786    ; Out of range sub-leafs aren't quite as easy and pretty as we emulate them
     1787    ; here, but we do an adequate job.
     1788    ;   
     1789cpuid_subleaf_not_found:
     1790    mov     ecx, [esp + 4]
     1791    test    dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED
     1792    jnz     cpuid_load_zeros_except_ecx
     1793cpuid_load_zeros:
     1794    xor     ecx, ecx
     1795cpuid_load_zeros_except_ecx:
     1796    xor     edx, edx
     1797    xor     eax, eax
     1798    xor     ebx, ebx
     1799    jmp     cpuid_done
     1800
     1801    ;
     1802    ; Different CPUs have different ways of dealing with unknown CPUID leaves.
     1803    ;
     1804cpuid_unknown:
     1805    mov     edx, PATM_CPUID_UNKNOWN_METHOD
     1806PATCH_FIXUP PATM_CPUID_UNKNOWN_METHOD
     1807    cmp     edx, CPUMUKNOWNCPUID_PASSTHRU
     1808    je      cpuid_unknown_passthru
     1809    ; Load the default cpuid leaf.
     1810cpuid_unknown_def_leaf:
     1811    mov     ebx, PATM_CPUID_DEF_PTR
    17451812PATCH_FIXUP PATM_CPUID_DEF_PTR
    1746     jmp     cpuid_fetch
    1747 
    1748 cpuid_std:
    1749     mov     edx, PATM_CPUID_STD_PTR
    1750 PATCH_FIXUP PATM_CPUID_STD_PTR
    1751     jmp     cpuid_calc
    1752 
    1753 cpuid_ext:
    1754     and     eax, 0ffh                   
    1755     mov     edx, PATM_CPUID_EXT_PTR
    1756 PATCH_FIXUP PATM_CPUID_EXT_PTR
    1757     jmp     cpuid_calc
    1758 
    1759 cpuid_centaur:
    1760     and     eax, 0ffh                   
    1761     mov     edx, PATM_CPUID_CENTAUR_PTR
    1762 PATCH_FIXUP PATM_CPUID_CENTAUR_PTR
    1763 
    1764 cpuid_calc:
    1765     lea     eax, [ss:eax * 4]              ; 4 entries...
    1766     lea     eax, [ss:eax * 4]              ; 4 bytes each
    1767     add     eax, edx
    1768 
     1813    mov     edx, [ss:ebx + CPUMCPUID.uEdx]
     1814    mov     ecx, [ss:ebx + CPUMCPUID.uEcx]
     1815    mov     eax, [ss:ebx + CPUMCPUID.uEax]
     1816    mov     ebx, [ss:ebx + CPUMCPUID.uEbx]
     1817    jmp     cpuid_done
     1818    ; Pass thru the input values unmodified (eax is still virgin).
     1819cpuid_unknown_passthru:
     1820    mov     edx, [esp + 8]
     1821    mov     ecx, [esp + 4]
     1822    mov     ebx, [esp]
     1823    jmp     cpuid_done
     1824
     1825    ;
     1826    ; Normal return.
     1827    ;
    17691828cpuid_fetch:
    1770     mov     edx, [ss:eax + 12]             ; CPUMCPUID layout assumptions!
    1771     mov     ecx, [ss:eax + 8]
    1772     mov     ebx, [ss:eax + 4]
    1773     mov     eax, [ss:eax]
    1774 
     1829    mov     edx, [ss:ebx + CPUMCPUIDLEAF.uEdx]
     1830    mov     ecx, [ss:ebx + CPUMCPUIDLEAF.uEcx]
     1831    mov     eax, [ss:ebx + CPUMCPUIDLEAF.uEax]
     1832    mov     ebx, [ss:ebx + CPUMCPUIDLEAF.uEbx]
     1833             
     1834cpuid_done:
     1835    add     esp, 12
    17751836    popf
    17761837    mov     dword [ss:PATM_INTERRUPTFLAG], 1
  • trunk/src/VBox/VMM/VMMR3/PATMA.mac

    r54686 r54714  
    2020
    2121;; @name Patch Fixup Types
     22; @remarks These fixups types are part of the saved state.
    2223; @{
    2324%define PATM_VMFLAGS                            0xF1ABCD00
     
    5253%define PATM_CALL_RETURN_ADDR                   0xF1ABCD19
    5354%define PATM_CPUID_CENTAUR_PTR                  0xF1ABCD1a
     55%define PATM_CPUID_ARRAY_PTR                    0xF1ABCD1b
     56%define PATM_CPUID_ARRAY_END_PTR                0xF1ABCD1c
     57%define PATM_CPUID_ARRAY_ENTRY_SIZE             0xF1ABCD1d
     58%define PATM_CPUID_UNKNOWN_METHOD               0xF1ABCD1e
     59
    5460
    5561;/* Anything larger doesn't require a fixup */
  • trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp

    r54688 r54714  
    9494    PRELOCREC pRec;
    9595
    96     Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
     96    Assert(   uType == FIXUP_ABSOLUTE
     97           || (uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL && pSource == pDest && PATM_IS_FIXUP_TYPE(pSource))
     98           || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
    9799
    98100    LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
     
    162164#endif
    163165
    164                 /**
    165                  * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH
    166                  * A DIFFERENT HYPERVISOR LAYOUT.
     166                /*
     167                 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING
     168                 * A SAVED STATE WITH A DIFFERENT HYPERVISOR LAYOUT.
    167169                 */
    168170                switch (pAsmRecord->aRelocs[i].uType)
    169171                {
    170                 case PATM_VMFLAGS:
    171                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
    172                     break;
    173 
    174                 case PATM_PENDINGACTION:
    175                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
    176                     break;
    177 
    178                 case PATM_FIXUP:
    179                     /* Offset in aRelocs[i].uInfo is from the base of the function. */
    180                     dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo
    181                          + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
    182                     break;
     172                    /*
     173                     * PATMGCSTATE member fixups.
     174                     */
     175                    case PATM_VMFLAGS:
     176                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
     177                        break;
     178                    case PATM_PENDINGACTION:
     179                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
     180                        break;
     181                    case PATM_STACKPTR:
     182                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
     183                        break;
     184                    case PATM_INTERRUPTFLAG:
     185                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
     186                        break;
     187                    case PATM_INHIBITIRQADDR:
     188                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
     189                        break;
     190                    case PATM_TEMP_EAX:
     191                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
     192                        break;
     193                    case PATM_TEMP_ECX:
     194                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
     195                        break;
     196                    case PATM_TEMP_EDI:
     197                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
     198                        break;
     199                    case PATM_TEMP_EFLAGS:
     200                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
     201                        break;
     202                    case PATM_TEMP_RESTORE_FLAGS:
     203                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
     204                        break;
     205                    case PATM_CALL_PATCH_TARGET_ADDR:
     206                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
     207                        break;
     208                    case PATM_CALL_RETURN_ADDR:
     209                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
     210                        break;
    183211#ifdef VBOX_WITH_STATISTICS
    184                 case PATM_ALLPATCHCALLS:
    185                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
    186                     break;
    187 
    188                 case PATM_IRETEFLAGS:
    189                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
    190                     break;
    191 
    192                 case PATM_IRETCS:
    193                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
    194                     break;
    195 
    196                 case PATM_IRETEIP:
    197                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
    198                     break;
    199 
    200                 case PATM_PERPATCHCALLS:
    201                     dest = patmPatchQueryStatAddress(pVM, pPatch);
    202                     break;
     212                    case PATM_ALLPATCHCALLS:
     213                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
     214                        break;
     215                    case PATM_IRETEFLAGS:
     216                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
     217                        break;
     218                    case PATM_IRETCS:
     219                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
     220                        break;
     221                    case PATM_IRETEIP:
     222                        dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
     223                        break;
    203224#endif
    204                 case PATM_STACKPTR:
    205                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
    206                     break;
    207 
    208                 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
    209                  * part to store the original return addresses.
    210                  */
    211                 case PATM_STACKBASE:
    212                     dest = pVM->patm.s.pGCStackGC;
    213                     break;
    214 
    215                 case PATM_STACKBASE_GUEST:
    216                     dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
    217                     break;
    218 
    219                 case PATM_RETURNADDR:   /* absolute guest address; no fixup required */
    220                     Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
    221                     dest = pCallInfo->pReturnGC;
    222                     break;
    223 
    224                 case PATM_PATCHNEXTBLOCK:  /* relative address of instruction following this block */
    225                     Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
    226 
    227                     /** @note hardcoded assumption that we must return to the instruction following this block */
    228                     dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction;
    229                     break;
    230 
    231                 case PATM_CALLTARGET:   /* relative to patch address; no fixup required */
    232                     Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
    233 
    234                     /* Address must be filled in later. (see patmr3SetBranchTargets)  */
    235                     patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
    236                     dest = PATM_ILLEGAL_DESTINATION;
    237                     break;
    238 
    239                 case PATM_PATCHBASE:    /* Patch GC base address */
    240                     dest = pVM->patm.s.pPatchMemGC;
    241                     break;
    242 
    243                 case PATM_CPUID_STD_PTR:
    244                     dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
    245                     break;
    246 
    247                 case PATM_CPUID_EXT_PTR:
    248                     dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
    249                     break;
    250 
    251                 case PATM_CPUID_CENTAUR_PTR:
    252                     dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
    253                     break;
    254 
    255                 case PATM_CPUID_DEF_PTR:
    256                     dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
    257                     break;
    258 
    259                 case PATM_CPUID_STD_MAX:
    260                     dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM);
    261                     break;
    262 
    263                 case PATM_CPUID_EXT_MAX:
    264                     dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM);
    265                     break;
    266 
    267                 case PATM_CPUID_CENTAUR_MAX:
    268                     dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM);
    269                     break;
    270 
    271                 case PATM_INTERRUPTFLAG:
    272                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
    273                     break;
    274 
    275                 case PATM_INHIBITIRQADDR:
    276                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
    277                     break;
    278 
    279                 case PATM_NEXTINSTRADDR:
    280                     Assert(pCallInfo);
    281                     /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
    282                     dest = pCallInfo->pNextInstrGC;
    283                     break;
    284 
    285                 case PATM_CURINSTRADDR:
    286                     Assert(pCallInfo);
    287                     dest = pCallInfo->pCurInstrGC;
    288                     break;
    289 
    290                 case PATM_VM_FORCEDACTIONS:
    291                     /* @todo dirty assumptions when correcting this fixup during saved state loading. */
    292                     dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
    293                     break;
    294 
    295                 case PATM_TEMP_EAX:
    296                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
    297                     break;
    298                 case PATM_TEMP_ECX:
    299                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
    300                     break;
    301                 case PATM_TEMP_EDI:
    302                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
    303                     break;
    304                 case PATM_TEMP_EFLAGS:
    305                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
    306                     break;
    307                 case PATM_TEMP_RESTORE_FLAGS:
    308                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
    309                     break;
    310                 case PATM_CALL_PATCH_TARGET_ADDR:
    311                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
    312                     break;
    313                 case PATM_CALL_RETURN_ADDR:
    314                     dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
    315                     break;
    316 
    317                 /* Relative address of global patm lookup and call function. */
    318                 case PATM_LOOKUP_AND_CALL_FUNCTION:
    319                 {
    320                     RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
    321                     Assert(pVM->patm.s.pfnHelperCallGC);
    322                     Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
    323 
    324                     /* Relative value is target minus address of instruction after the actual call instruction. */
    325                     dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
    326                     break;
    327                 }
    328 
    329                 case PATM_RETURN_FUNCTION:
    330                 {
    331                     RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
    332                     Assert(pVM->patm.s.pfnHelperRetGC);
    333                     Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
    334 
    335                     /* Relative value is target minus address of instruction after the actual call instruction. */
    336                     dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
    337                     break;
    338                 }
    339 
    340                 case PATM_IRET_FUNCTION:
    341                 {
    342                     RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
    343                     Assert(pVM->patm.s.pfnHelperIretGC);
    344                     Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
    345 
    346                     /* Relative value is target minus address of instruction after the actual call instruction. */
    347                     dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
    348                     break;
    349                 }
    350 
    351                 case PATM_LOOKUP_AND_JUMP_FUNCTION:
    352                 {
    353                     RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
    354                     Assert(pVM->patm.s.pfnHelperJumpGC);
    355                     Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
    356 
    357                     /* Relative value is target minus address of instruction after the actual call instruction. */
    358                     dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
    359                     break;
    360                 }
    361 
    362                 default:
    363                     dest = PATM_ILLEGAL_DESTINATION;
    364                     AssertRelease(0);
    365                     break;
     225
     226
     227                    case PATM_FIXUP:
     228                        /* Offset in aRelocs[i].uInfo is from the base of the function. */
     229                        dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo
     230                             + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
     231                        break;
     232
     233#ifdef VBOX_WITH_STATISTICS
     234                    case PATM_PERPATCHCALLS:
     235                        dest = patmPatchQueryStatAddress(pVM, pPatch);
     236                        break;
     237#endif
     238
     239                    /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
     240                     * part to store the original return addresses.
     241                     */
     242                    case PATM_STACKBASE:
     243                        dest = pVM->patm.s.pGCStackGC;
     244                        break;
     245
     246                    case PATM_STACKBASE_GUEST:
     247                        dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
     248                        break;
     249
     250                    case PATM_RETURNADDR:   /* absolute guest address; no fixup required */
     251                        Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
     252                        dest = pCallInfo->pReturnGC;
     253                        break;
     254
     255                    case PATM_PATCHNEXTBLOCK:  /* relative address of instruction following this block */
     256                        Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
     257
     258                        /** @note hardcoded assumption that we must return to the instruction following this block */
     259                        dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction;
     260                        break;
     261
     262                    case PATM_CALLTARGET:   /* relative to patch address; no fixup required */
     263                        Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
     264
     265                        /* Address must be filled in later. (see patmr3SetBranchTargets)  */
     266                        patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
     267                        dest = PATM_ILLEGAL_DESTINATION;
     268                        break;
     269
     270                    case PATM_PATCHBASE:    /* Patch GC base address */
     271                        dest = pVM->patm.s.pPatchMemGC;
     272                        break;
     273
     274                    case PATM_NEXTINSTRADDR:
     275                        Assert(pCallInfo);
     276                        /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
     277                        dest = pCallInfo->pNextInstrGC;
     278                        break;
     279
     280                    case PATM_CURINSTRADDR:
     281                        Assert(pCallInfo);
     282                        dest = pCallInfo->pCurInstrGC;
     283                        break;
     284
     285                    /* Relative address of global patm lookup and call function. */
     286                    case PATM_LOOKUP_AND_CALL_FUNCTION:
     287                    {
     288                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
     289                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
     290                        Assert(pVM->patm.s.pfnHelperCallGC);
     291                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
     292
     293                        /* Relative value is target minus address of instruction after the actual call instruction. */
     294                        dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
     295                        break;
     296                    }
     297
     298                    case PATM_RETURN_FUNCTION:
     299                    {
     300                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
     301                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
     302                        Assert(pVM->patm.s.pfnHelperRetGC);
     303                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
     304
     305                        /* Relative value is target minus address of instruction after the actual call instruction. */
     306                        dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
     307                        break;
     308                    }
     309
     310                    case PATM_IRET_FUNCTION:
     311                    {
     312                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
     313                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
     314                        Assert(pVM->patm.s.pfnHelperIretGC);
     315                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
     316
     317                        /* Relative value is target minus address of instruction after the actual call instruction. */
     318                        dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
     319                        break;
     320                    }
     321
     322                    case PATM_LOOKUP_AND_JUMP_FUNCTION:
     323                    {
     324                        RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
     325                                                + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
     326                        Assert(pVM->patm.s.pfnHelperJumpGC);
     327                        Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
     328
     329                        /* Relative value is target minus address of instruction after the actual call instruction. */
     330                        dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
     331                        break;
     332                    }
     333
     334                    case PATM_CPUID_STD_MAX: /* saved state only */
     335                        dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM);
     336                        break;
     337                    case PATM_CPUID_EXT_MAX: /* saved state only */
     338                        dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM);
     339                        break;
     340                    case PATM_CPUID_CENTAUR_MAX: /* saved state only */
     341                        dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM);
     342                        break;
     343
     344                    /*
     345                     * The following fixups needs to be recalculated when loading saved state
     346                     * Note! Earlier saved state versions had different hacks for detecting these.
     347                     */
     348                    case PATM_VM_FORCEDACTIONS:
     349                        dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
     350                        break;
     351                    case PATM_CPUID_DEF_PTR:
     352                        dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
     353                        break;
     354                    case PATM_CPUID_ARRAY_PTR:
     355                        dest = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);
     356                        break;
     357                    case PATM_CPUID_ARRAY_END_PTR:
     358                        dest = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);
     359                        break;
     360                    case PATM_CPUID_ARRAY_ENTRY_SIZE:
     361                        dest = sizeof(CPUMCPUIDLEAF);
     362                        break;
     363                    case PATM_CPUID_UNKNOWN_METHOD:
     364                        dest = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM);
     365                        break;
     366
     367                    case PATM_CPUID_STD_PTR: /* saved state only */
     368                        dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
     369                        break;
     370                    case PATM_CPUID_EXT_PTR: /* saved state only */
     371                        dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
     372                        break;
     373                    case PATM_CPUID_CENTAUR_PTR: /* saved state only */
     374                        dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
     375                        break;
     376
     377                    default:
     378                        dest = PATM_ILLEGAL_DESTINATION;
     379                        AssertReleaseFailed();
     380                        break;
    366381                }
    367382
     
    369384                if (pAsmRecord->aRelocs[i].uType < PATM_NO_FIXUP)
    370385                {
    371                     patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE);
     386                    patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL,
     387                                        pAsmRecord->aRelocs[i].uType /*pSources*/, pAsmRecord->aRelocs[i].uType /*pDest*/);
    372388                }
    373389                break;
  • trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp

    r54688 r54714  
    561561    rec.Core.Key = 0;
    562562
    563     if (rec.uType == FIXUP_ABSOLUTE)
    564     {
    565         /* Core.Key abused to store the fixup type. */
    566         if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))
    567             rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;
    568         else if (*pFixup == CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM))
    569             rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;
    570         else if (*pFixup == CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM))
    571             rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;
    572         else if (*pFixup == CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM))
    573             rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;
    574         else if (*pFixup == CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM))
    575             rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;
    576     }
    577563
    578564    /* Save the lookup record. */
     
    11171103    {
    11181104    case FIXUP_ABSOLUTE:
     1105    case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
    11191106    {
    1120         if (pRec->pSource && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource))
     1107        Assert(   pRec->uType != PATM_SAVED_STATE_VERSION_NO_RAW_MEM
     1108               || (pRec->pSource == pRec->pDest && PATM_IS_FIXUP_TYPE(pRec->pSource)) );
     1109
     1110        /* bird: What is this for exactly?  Only the MMIO fixups used to have pSource set. */
     1111        if (    pRec->pSource
     1112            && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource)
     1113            && pRec->uType != FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
    11211114            break;
    11221115
     
    12551248            *pFixup = (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
    12561249        }
    1257         /* Boldly ASSUMES:
     1250        /*
     1251         * For PATM_SAVED_STATE_VERSION_FIXUP_HACK and earlier boldly ASSUME:
    12581252         * 1. That pCPUMCtxGC is in the VM structure and that its location is
    12591253         *    at the first page of the same 4 MB chunk.
    12601254         * 2. That the forced actions were in the first 32 bytes of the VM
    12611255         *    structure.
    1262          * 3. That the CPUM leafs are less than 8KB into the structure. */
     1256         * 3. That the CPUM leafs are less than 8KB into the structure.
     1257         */
    12631258        else if (   uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
    12641259                 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
     
    12661261            LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", uFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
    12671262            *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
     1263            pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS;
     1264            pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    12681265        }
    12691266        else if (   uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
     
    12721269            static int cCpuidFixup = 0;
    12731270
    1274             /* very dirty assumptions about the cpuid patch and cpuid ordering. */
     1271            /* Very dirty assumptions about the cpuid patch and cpuid ordering. */
    12751272            switch (cCpuidFixup & 3)
    12761273            {
    12771274            case 0:
    12781275                *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
     1276                pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR;
     1277                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    12791278                break;
    12801279            case 1:
    12811280                *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
     1281                pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR;
     1282                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    12821283                break;
    12831284            case 2:
    12841285                *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
     1286                pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR;
     1287                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    12851288                break;
    12861289            case 3:
    12871290                *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
     1291                pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR;
     1292                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    12881293                break;
    12891294            }
     
    12911296            cCpuidFixup++;
    12921297        }
    1293         else if (uVersion >= PATM_SAVED_STATE_VERSION_MEM)
     1298        /*
     1299         * For PATM_SAVED_STATE_VERSION_MEM thru PATM_SAVED_STATE_VERSION_NO_RAW_MEM
     1300         * we abused Core.Key to store the type for fixups needing correcting on load.
     1301         */
     1302        else if (   uVersion >= PATM_SAVED_STATE_VERSION_MEM
     1303                 && uVersion <= PATM_SAVED_STATE_VERSION_NO_RAW_MEM)
    12941304        {
    12951305            /* Core.Key abused to store the type of fixup. */
     
    12981308            case PATM_FIXUP_CPU_FF_ACTION:
    12991309                *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
     1310                pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS;
     1311                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    13001312                LogFlow(("Changing cpu ff action fixup from %x to %x\n", uFixup, *pFixup));
    13011313                break;
    13021314            case PATM_FIXUP_CPUID_DEFAULT:
    13031315                *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
     1316                pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR;
     1317                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    13041318                LogFlow(("Changing cpuid def fixup from %x to %x\n", uFixup, *pFixup));
    13051319                break;
    13061320            case PATM_FIXUP_CPUID_STANDARD:
    13071321                *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
     1322                pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR;
     1323                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    13081324                LogFlow(("Changing cpuid std fixup from %x to %x\n", uFixup, *pFixup));
    13091325                break;
    13101326            case PATM_FIXUP_CPUID_EXTENDED:
    13111327                *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
     1328                pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR;
     1329                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    13121330                LogFlow(("Changing cpuid ext fixup from %x to %x\n", uFixup, *pFixup));
    13131331                break;
    13141332            case PATM_FIXUP_CPUID_CENTAUR:
    13151333                *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
     1334                pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR;
     1335                pRec->uType   = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
    13161336                LogFlow(("Changing cpuid centaur fixup from %x to %x\n", uFixup, *pFixup));
    13171337                break;
     
    13191339                AssertMsgFailed(("Unexpected fixup value %p\n", (uintptr_t)pRec->Core.Key));
    13201340                break;
     1341            }
     1342        }
     1343        /*
     1344         * After PATM_SAVED_STATE_VERSION_NO_RAW_MEM we changed the fixup type
     1345         * and instead put the patch fixup code in the source and target addresses.
     1346         */
     1347        else if (   uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM
     1348                 && pRec->uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
     1349        {
     1350            Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_FIXUP_TYPE(pRec->pSource));
     1351            switch (pRec->pSource)
     1352            {
     1353                case PATM_VM_FORCEDACTIONS:
     1354                    *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
     1355                    break;
     1356                case PATM_CPUID_DEF_PTR:
     1357                    *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
     1358                    break;
     1359                case PATM_CPUID_ARRAY_PTR:
     1360                    *pFixup = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);
     1361                    break;
     1362                case PATM_CPUID_ARRAY_END_PTR:
     1363                    *pFixup = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);
     1364                    break;
     1365                case PATM_CPUID_ARRAY_ENTRY_SIZE:
     1366                    *pFixup = sizeof(CPUMCPUIDLEAF);
     1367                    break;
     1368                case PATM_CPUID_UNKNOWN_METHOD:
     1369                    *pFixup = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM);
     1370                    break;
     1371                case PATM_CPUID_STD_PTR: /* Saved again patches only. */
     1372                    *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
     1373                    break;
     1374                case PATM_CPUID_EXT_PTR: /* Saved again patches only. */
     1375                    *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
     1376                    break;
     1377                case PATM_CPUID_CENTAUR_PTR: /* Saved again patches only. */
     1378                    *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
     1379                    break;
    13211380            }
    13221381        }
  • trunk/src/VBox/VMM/include/PATMA.h

    r54687 r54714  
    2020
    2121/** @name Patch Fixup Types
     22 * @remarks These fixups types are part of the saved state.
    2223 * @{ */
    2324#define PATM_VMFLAGS                            0xF1ABCD00
     
    3435#define PATM_FIXUP                              0xF1ABCD07
    3536#define PATM_PENDINGACTION                      0xF1ABCD08
    36 #define PATM_CPUID_STD_PTR                      0xF1ABCD09
    37 #define PATM_CPUID_EXT_PTR                      0xF1ABCD0a
     37#define PATM_CPUID_STD_PTR                      0xF1ABCD09  /**< Legacy, saved state only. */
     38#define PATM_CPUID_EXT_PTR                      0xF1ABCD0a  /**< Legacy, saved state only. */
    3839#define PATM_CPUID_DEF_PTR                      0xF1ABCD0b
    3940#define PATM_STACKBASE                          0xF1ABCD0c  /**< Stack to store our private patch return addresses */
     
    5152#define PATM_CALL_PATCH_TARGET_ADDR             0xF1ABCD18
    5253#define PATM_CALL_RETURN_ADDR                   0xF1ABCD19
    53 #define PATM_CPUID_CENTAUR_PTR                  0xF1ABCD1a
     54#define PATM_CPUID_CENTAUR_PTR                  0xF1ABCD1a  /**< Legacy, saved state only. */
     55#define PATM_CPUID_ARRAY_PTR                    0xF1ABCD1b
     56#define PATM_CPUID_ARRAY_END_PTR                0xF1ABCD1c
     57#define PATM_CPUID_ARRAY_ENTRY_SIZE             0xF1ABCD1d
     58#define PATM_CPUID_UNKNOWN_METHOD               0xF1ABCD1e
    5459
    5560/* Anything larger doesn't require a fixup */
     
    6772#define PATM_IRET_FUNCTION                      0xF1ABCE0A  /**< Relative address of global PATM iret function. */
    6873#define PATM_CPUID_CENTAUR_MAX                  0xF1ABCE0B
     74
     75/** Identifies an patch fixup type value (with reasonable accuracy). */
     76#define PATM_IS_FIXUP_TYPE(a_uValue) \
     77    ( ((a_uValue) & UINT32_C(0xfffffC00)) == UINT32_C(0xF1ABCC00) && ((a_uValue) & UINT32_C(0xff)) < UINT32_C(0x30) )
    6978/** @} */
    7079
  • trunk/src/VBox/VMM/include/PATMInternal.h

    r54688 r54714  
    3232/** @name Saved state version numbers.
    3333 * @{ */
     34/** New fixup type FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL. */
     35#define PATM_SAVED_STATE_VERSION                    57
    3436/** Uses normal structure serialization with markers and everything. */
    35 #define PATM_SAVED_STATE_VERSION                    56
     37#define PATM_SAVED_STATE_VERSION_NO_RAW_MEM         56
    3638/** Last version which saves structures as raw memory. */
    3739#define PATM_SAVED_STATE_VERSION_MEM                55
     
    98100#define PATM_MAX_INVALID_WRITES            16384
    99101
     102/** @name FIXUP_XXX - RELOCREC::uType values.
     103 * @{ */
     104/** Absolute fixup.  With one exception (MMIO cache), this does not take any
     105 * source or destination.  @sa FIXUP_ABSOLUTE_ASM.  */
    100106#define FIXUP_ABSOLUTE                     0
    101107#define FIXUP_REL_JMPTOPATCH               1
    102108#define FIXUP_REL_JMPTOGUEST               2
     109/** Absolute fixup in patch assembly code template.
     110 *
     111 * The source and desination addresses both set to the patch fixup type (see
     112 * PATM_IS_FIXUP_TYPE and friends in PATMA.h).  This is recent addition (CPUID
     113 * subleaf code), so when loading older saved states this is usally represented
     114 * as FIXUP_ABSOLUTE. */
     115#define FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL   3
     116/** @} */
     117
    103118
    104119#define PATM_ILLEGAL_DESTINATION           0xDEADBEEF
  • trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp

    r51288 r54714  
    46734673                   szNameC,
    46744674                   CPUMR3CpuIdUnknownLeafMethodName(enmUnknownMethod),
    4675                    DefUnknown.eax,
    4676                    DefUnknown.ebx,
    4677                    DefUnknown.ecx,
    4678                    DefUnknown.edx,
     4675                   DefUnknown.uEax,
     4676                   DefUnknown.uEbx,
     4677                   DefUnknown.uEcx,
     4678                   DefUnknown.uEdx,
    46794679                   szMsrMask,
    46804680                   szNameC,
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette