VirtualBox

Changeset 49893 in vbox for trunk/src


Ignore:
Timestamp:
Dec 13, 2013 12:40:20 AM (11 years ago)
Author:
vboxsync
Message:

MSR rewrite: initial hacking - half disabled.

Location:
trunk/src/VBox/VMM
Files:
4 added
18 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r49282 r49893  
    7777endif
    7878
     79ifdef VBOX_WITH_NEW_MSR_CODE
     80 VMM_COMMON_DEFS += VBOX_WITH_NEW_MSR_CODE
     81endif
     82
    7983
    8084#
     
    118122        VMMR3/CFGM.cpp \
    119123        VMMR3/CPUM.cpp \
     124        VMMR3/CPUMR3CpuId.cpp \
     125        VMMR3/CPUMR3Db.cpp \
    120126        VMMR3/CPUMDbg.cpp \
    121127        VMMR3/DBGF.cpp \
     
    187193       ,) \
    188194        VMMAll/CPUMAllRegs.cpp \
     195        VMMAll/CPUMAllMsrs.cpp \
    189196        VMMAll/CPUMStack.cpp \
    190197        VMMAll/DBGFAll.cpp \
     
    438445        VMMRZ/VMMRZ.cpp \
    439446        VMMAll/CPUMAllRegs.cpp \
     447        VMMAll/CPUMAllMsrs.cpp \
    440448        VMMAll/DBGFAll.cpp \
    441449        VMMAll/IEMAll.cpp \
     
    540548        VMMRZ/VMMRZ.cpp \
    541549        VMMAll/CPUMAllRegs.cpp \
     550        VMMAll/CPUMAllMsrs.cpp \
    542551        VMMAll/CPUMStack.cpp \
    543552        VMMAll/DBGFAll.cpp \
     
    610619 LIBRARIES += SSMStandalone
    611620 SSMStandalone_TEMPLATE = VBOXR3EXE
    612  SSMStandalone_DEFS     = IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE
     621 SSMStandalone_DEFS     = IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE CPUM_DB_STANDALONE
    613622 SSMStandalone_INCS     = include
    614  SSMStandalone_SOURCES  = VMMR3/SSM.cpp
     623 SSMStandalone_SOURCES  = \
     624        VMMR3/SSM.cpp \
     625        VMMR3/CPUMR3Db.cpp
    615626endif # !VBOX_ONLY_EXTPACKS
    616627
     
    704715endif # bird wants good stacks
    705716
     717
     718# Alias the CPU database entries.
     719$(foreach base,$(notdir $(basename $(wildcard $(PATH_SUB_CURRENT)/VMMR3/cpus/*.h))), $(eval $(base).o $(base).obj: CPUMR3Db.o))
     720
     721
    706722include $(FILE_KBUILD_SUB_FOOTER)
    707723
     
    732748LegacyandAMD64.o LegacyandAMD64.obj:           32BitToAMD64.o PAEToAMD64.o
    733749AMD64andLegacy.o AMD64andLegacy.obj:           AMD64To32Bit.o AMD64ToPAE.o
     750
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r49849 r49893  
    870870}
    871871
     872#ifndef VBOX_WITH_NEW_MSR_CODE
    872873
    873874/**
     
    15851586}
    15861587
     1588#endif /* !VBOX_WITH_NEW_MSR_CODE */
     1589
    15871590
    15881591VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit)
     
    18491852{
    18501853    return pVCpu->cpum.s.Guest.msrEFER;
     1854}
     1855
     1856
     1857/**
     1858 * Looks up a CPUID leaf in the CPUID leaf array.
     1859 *
     1860 * @returns Pointer to the leaf if found, NULL if not.
     1861 *
     1862 * @param   pVM                 Pointer to the cross context VM structure.
     1863 * @param   uLeaf               The leaf to get.
     1864 * @param   uSubLeaf            The subleaf, if applicable.  Just pass 0 if it
     1865 *                              isn't.
     1866 */
     1867PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf)
     1868{
     1869    unsigned            iEnd     = pVM->cpum.s.GuestInfo.cCpuIdLeaves;
     1870    if (iEnd)
     1871    {
     1872        unsigned        iStart   = 0;
     1873        PCPUMCPUIDLEAF  paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves);
     1874        for (;;)
     1875        {
     1876            unsigned i = iStart + (iEnd - iStart) / 2U;
     1877            if (uLeaf < paLeaves[i].uLeaf)
     1878            {
     1879                if (i <= iStart)
     1880                    return NULL;
     1881                iEnd = i;
     1882            }
     1883            else if (uLeaf > paLeaves[i].uLeaf)
     1884            {
     1885                i += 1;
     1886                if (i >= iEnd)
     1887                    return NULL;
     1888                iStart = i;
     1889            }
     1890            else
     1891            {
     1892                uSubLeaf &= paLeaves[i].fSubLeafMask;
     1893                if (uSubLeaf != paLeaves[i].uSubLeaf)
     1894                {
     1895                    /* Find the right subleaf.  We return the last one before
     1896                       uSubLeaf if we don't find an exact match. */
     1897                    if (uSubLeaf < paLeaves[i].uSubLeaf)
     1898                        while (   i > 0
     1899                               && uLeaf    == paLeaves[i].uLeaf
     1900                               && uSubLeaf  < paLeaves[i].uSubLeaf)
     1901                            i--;
     1902                    else
     1903                        while (   i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves
     1904                               && uLeaf    == paLeaves[i + 1].uLeaf
     1905                               && uSubLeaf >= paLeaves[i + 1].uSubLeaf)
     1906                            i++;
     1907                }
     1908                return &paLeaves[i];
     1909            }
     1910        }
     1911    }
     1912
     1913    return NULL;
    18511914}
    18521915
     
    18951958    if (    iLeaf == 4
    18961959        &&  cCurrentCacheIndex < 3
    1897         &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
     1960        &&  pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
    18981961    {
    18991962        uint32_t type, level, sharing, linesize,
     
    19972060VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
    19982061{
     2062    PCPUMCPUIDLEAF pLeaf;
     2063
    19992064    switch (enmFeature)
    20002065    {
     
    20032068         */
    20042069        case CPUMCPUIDFEATURE_APIC:
    2005             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2006                 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC;
    2007             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
    2008                 &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
    2009                 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
     2070            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2071            if (pLeaf)
     2072                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;
     2073
     2074            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2075            if (   pLeaf
     2076                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
     2077                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;
     2078
     2079            pVM->cpum.s.GuestFeatures.fApic = 1;
    20102080            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n"));
    20112081            break;
     
    20152085        */
    20162086        case CPUMCPUIDFEATURE_X2APIC:
    2017             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2018                 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC;
     2087            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2088            if (pLeaf)
     2089                pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;
     2090            pVM->cpum.s.GuestFeatures.fX2Apic = 1;
    20192091            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n"));
    20202092            break;
     
    20252097         */
    20262098        case CPUMCPUIDFEATURE_SEP:
    2027         {
    2028             if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP))
     2099            if (!pVM->cpum.s.HostFeatures.fSysEnter)
    20292100            {
    20302101                AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n"));
     
    20322103            }
    20332104
    2034             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2035                 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP;
     2105            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2106            if (pLeaf)
     2107                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;
     2108            pVM->cpum.s.GuestFeatures.fSysEnter = 1;
    20362109            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n"));
    20372110            break;
    2038         }
    20392111
    20402112        /*
     
    20432115         */
    20442116        case CPUMCPUIDFEATURE_SYSCALL:
    2045         {
    2046             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    2047                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
     2117            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2118            if (   !pLeaf
     2119                || !pVM->cpum.s.HostFeatures.fSysCall)
    20482120            {
    20492121#if HC_ARCH_BITS == 32
    2050                 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode.
    2051                  * Even when the cpu is capable of doing so in 64 bits mode.
    2052                  */
    2053                 if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    2054                     ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
    2055                     ||  !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))
     2122                /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit
     2123                   mode by Intel, even when the cpu is capable of doing so in
     2124                   64-bit mode.  Long mode requires syscall support. */
     2125                if (!pVM->cpum.s.HostFeatures.fLongMode)
    20562126#endif
    20572127                {
     
    20602130                }
    20612131            }
     2132
    20622133            /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */
    2063             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
     2134            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;
     2135            pVM->cpum.s.GuestFeatures.fSysCall = 1;
    20642136            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n"));
    20652137            break;
    2066         }
    20672138
    20682139        /*
     
    20712142         */
    20722143        case CPUMCPUIDFEATURE_PAE:
    2073         {
    2074             if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE))
     2144            if (!pVM->cpum.s.HostFeatures.fPae)
    20752145            {
    20762146                LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n"));
     
    20782148            }
    20792149
    2080             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2081                 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE;
    2082             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
    2083                 &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
    2084                 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
     2150            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2151            if (pLeaf)
     2152                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;
     2153
     2154            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2155            if (    pLeaf
     2156                &&  pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
     2157                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;
     2158
     2159            pVM->cpum.s.GuestFeatures.fPae = 1;
    20852160            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n"));
    20862161            break;
    2087         }
    20882162
    20892163        /*
     
    20922166         */
    20932167        case CPUMCPUIDFEATURE_LONG_MODE:
    2094         {
    2095             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    2096                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
     2168            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2169            if (   !pLeaf
     2170                || !pVM->cpum.s.HostFeatures.fLongMode)
    20972171            {
    20982172                LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n"));
     
    21012175
    21022176            /* Valid for both Intel and AMD. */
    2103             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
     2177            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
     2178            pVM->cpum.s.GuestFeatures.fLongMode = 1;
    21042179            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n"));
    21052180            break;
    2106         }
    21072181
    21082182        /*
     
    21112185         */
    21122186        case CPUMCPUIDFEATURE_NX:
    2113         {
    2114             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    2115                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))
     2187            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2188            if (   !pLeaf
     2189                || !pVM->cpum.s.HostFeatures.fNoExecute)
    21162190            {
    21172191                LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n"));
     
    21202194
    21212195            /* Valid for both Intel and AMD. */
    2122             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX;
     2196            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;
     2197            pVM->cpum.s.GuestFeatures.fNoExecute = 1;
    21232198            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n"));
    21242199            break;
    2125         }
     2200
    21262201
    21272202        /*
     
    21302205         */
    21312206        case CPUMCPUIDFEATURE_LAHF:
    2132         {
    2133             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    2134                 ||  !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))
     2207            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2208            if (   !pLeaf
     2209                || !pVM->cpum.s.HostFeatures.fLahfSahf)
    21352210            {
    21362211                LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n"));
     
    21392214
    21402215            /* Valid for both Intel and AMD. */
    2141             pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
     2216            pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
     2217            pVM->cpum.s.GuestFeatures.fLahfSahf = 1;
    21422218            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n"));
    21432219            break;
    2144         }
    2145 
     2220
     2221        /*
     2222         * Set the page attribute table bit.  This is alternative page level
     2223         * cache control that doesn't much matter when everything is
     2224         * virtualized, though it may when passing thru device memory.
     2225         */
    21462226        case CPUMCPUIDFEATURE_PAT:
    2147         {
    2148             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2149                 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT;
    2150             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
    2151                 &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
    2152                 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
     2227            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2228            if (pLeaf)
     2229                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;
     2230
     2231            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2232            if (   pLeaf
     2233                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
     2234                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;
     2235
     2236            pVM->cpum.s.GuestFeatures.fPat = 1;
    21532237            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n"));
    21542238            break;
    2155         }
    21562239
    21572240        /*
     
    21602243         */
    21612244        case CPUMCPUIDFEATURE_RDTSCP:
    2162         {
    2163             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001
    2164                 ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    2165                 ||  pVM->cpum.s.u8PortableCpuIdLevel > 0)
     2245            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2246            if (   !pLeaf
     2247                || !pVM->cpum.s.HostFeatures.fRdTscP
     2248                || pVM->cpum.s.u8PortableCpuIdLevel > 0)
    21662249            {
    21672250                if (!pVM->cpum.s.u8PortableCpuIdLevel)
     
    21712254
    21722255            /* Valid for both Intel and AMD. */
    2173             pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
     2256            pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
     2257            pVM->cpum.s.HostFeatures.fRdTscP = 1;
    21742258            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n"));
    21752259            break;
    2176         }
    21772260
    21782261       /*
     
    21802263        */
    21812264        case CPUMCPUIDFEATURE_HVP:
    2182             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
     2265            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2266            if (pLeaf)
    21832267                pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP;
     2268            pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1;
    21842269            LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n"));
    21852270            break;
     
    21892274            break;
    21902275    }
     2276
    21912277    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    21922278    {
     
    22082294    switch (enmFeature)
    22092295    {
     2296        case CPUMCPUIDFEATURE_APIC:         return pVM->cpum.s.GuestFeatures.fApic;
     2297        case CPUMCPUIDFEATURE_X2APIC:       return pVM->cpum.s.GuestFeatures.fX2Apic;
     2298        case CPUMCPUIDFEATURE_SYSCALL:      return pVM->cpum.s.GuestFeatures.fSysCall;
     2299        case CPUMCPUIDFEATURE_SEP:          return pVM->cpum.s.GuestFeatures.fSysEnter;
     2300        case CPUMCPUIDFEATURE_PAE:          return pVM->cpum.s.GuestFeatures.fPae;
     2301        case CPUMCPUIDFEATURE_NX:           return pVM->cpum.s.GuestFeatures.fNoExecute;
     2302        case CPUMCPUIDFEATURE_LAHF:         return pVM->cpum.s.GuestFeatures.fLahfSahf;
     2303        case CPUMCPUIDFEATURE_LONG_MODE:    return pVM->cpum.s.GuestFeatures.fLongMode;
     2304        case CPUMCPUIDFEATURE_PAT:          return pVM->cpum.s.GuestFeatures.fPat;
     2305        case CPUMCPUIDFEATURE_RDTSCP:       return pVM->cpum.s.GuestFeatures.fRdTscP;
     2306        case CPUMCPUIDFEATURE_HVP:          return pVM->cpum.s.GuestFeatures.fHypervisorPresent;
     2307
     2308        case CPUMCPUIDFEATURE_INVALID:
     2309        case CPUMCPUIDFEATURE_32BIT_HACK:
     2310            break;
     2311    }
     2312    AssertFailed();
     2313    return false;
     2314}
     2315
     2316
     2317/**
     2318 * Clears a CPUID feature bit.
     2319 *
     2320 * @param   pVM             Pointer to the VM.
     2321 * @param   enmFeature      The feature to clear.
     2322 */
     2323VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
     2324{
     2325    PCPUMCPUIDLEAF pLeaf;
     2326    switch (enmFeature)
     2327    {
     2328        case CPUMCPUIDFEATURE_APIC:
     2329            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2330            if (pLeaf)
     2331                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;
     2332
     2333            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2334            if (   pLeaf
     2335                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
     2336                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
     2337
     2338            pVM->cpum.s.GuestFeatures.fApic = 0;
     2339            Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
     2340            break;
     2341
     2342        case CPUMCPUIDFEATURE_X2APIC:
     2343            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2344            if (pLeaf)
     2345                pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
     2346            pVM->cpum.s.GuestFeatures.fX2Apic = 0;
     2347            Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
     2348            break;
     2349
    22102350        case CPUMCPUIDFEATURE_PAE:
    2211         {
    2212             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2213                 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE);
    2214             break;
    2215         }
    2216 
    2217         case CPUMCPUIDFEATURE_NX:
    2218         {
    2219             if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    2220                 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX);
    2221         }
    2222 
    2223         case CPUMCPUIDFEATURE_SYSCALL:
    2224         {
    2225             if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    2226                 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL);
    2227         }
     2351            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2352            if (pLeaf)
     2353                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;
     2354
     2355            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2356            if (   pLeaf
     2357                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
     2358                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
     2359
     2360            pVM->cpum.s.GuestFeatures.fPae = 0;
     2361            Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
     2362            break;
     2363
     2364        case CPUMCPUIDFEATURE_PAT:
     2365            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2366            if (pLeaf)
     2367                pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;
     2368
     2369            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2370            if (   pLeaf
     2371                && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
     2372                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
     2373
     2374            pVM->cpum.s.GuestFeatures.fPat = 0;
     2375            Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
     2376            break;
     2377
     2378        case CPUMCPUIDFEATURE_LONG_MODE:
     2379            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2380            if (pLeaf)
     2381                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
     2382            pVM->cpum.s.GuestFeatures.fLongMode = 0;
     2383            break;
     2384
     2385        case CPUMCPUIDFEATURE_LAHF:
     2386            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2387            if (pLeaf)
     2388                pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
     2389            pVM->cpum.s.GuestFeatures.fLahfSahf = 0;
     2390            break;
    22282391
    22292392        case CPUMCPUIDFEATURE_RDTSCP:
    2230         {
    2231             if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    2232                 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    2233             break;
    2234         }
    2235 
    2236         case CPUMCPUIDFEATURE_LONG_MODE:
    2237         {
    2238             if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    2239                 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);
    2240             break;
    2241         }
     2393            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0);
     2394            if (pLeaf)
     2395                pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
     2396            pVM->cpum.s.GuestFeatures.fRdTscP = 0;
     2397            Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
     2398            break;
     2399
     2400        case CPUMCPUIDFEATURE_HVP:
     2401            pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0);
     2402            if (pLeaf)
     2403                pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;
     2404            pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0;
     2405            break;
    22422406
    22432407        default:
     
    22452409            break;
    22462410    }
    2247     return false;
    2248 }
    2249 
    2250 
    2251 /**
    2252  * Clears a CPUID feature bit.
    2253  *
    2254  * @param   pVM             Pointer to the VM.
    2255  * @param   enmFeature      The feature to clear.
    2256  */
    2257 VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature)
    2258 {
    2259     switch (enmFeature)
    2260     {
    2261         /*
    2262          * Set the APIC bit in both feature masks.
    2263          */
    2264         case CPUMCPUIDFEATURE_APIC:
    2265             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2266                 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC;
    2267             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
    2268                 &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
    2269                 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;
    2270             Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n"));
    2271             break;
    2272 
    2273         /*
    2274          * Clear the x2APIC bit in the standard feature mask.
    2275          */
    2276         case CPUMCPUIDFEATURE_X2APIC:
    2277             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2278                 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC;
    2279             Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n"));
    2280             break;
    2281 
    2282         case CPUMCPUIDFEATURE_PAE:
    2283         {
    2284             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2285                 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE;
    2286             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
    2287                 &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
    2288                 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;
    2289             Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n"));
    2290             break;
    2291         }
    2292 
    2293         case CPUMCPUIDFEATURE_PAT:
    2294         {
    2295             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2296                 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT;
    2297             if (    pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001
    2298                 &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
    2299                 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;
    2300             Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n"));
    2301             break;
    2302         }
    2303 
    2304         case CPUMCPUIDFEATURE_LONG_MODE:
    2305         {
    2306             if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    2307                 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;
    2308             break;
    2309         }
    2310 
    2311         case CPUMCPUIDFEATURE_LAHF:
    2312         {
    2313             if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    2314                 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;
    2315             break;
    2316         }
    2317 
    2318         case CPUMCPUIDFEATURE_RDTSCP:
    2319         {
    2320             if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001)
    2321                 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;
    2322             Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n"));
    2323             break;
    2324         }
    2325 
    2326         case CPUMCPUIDFEATURE_HVP:
    2327             if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1)
    2328                 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP;
    2329             break;
    2330 
    2331         default:
    2332             AssertMsgFailed(("enmFeature=%d\n", enmFeature));
    2333             break;
    2334     }
     2411
    23352412    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    23362413    {
     
    23492426VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM)
    23502427{
    2351     return pVM->cpum.s.enmHostCpuVendor;
     2428    return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor;
    23522429}
    23532430
     
    23612438VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM)
    23622439{
    2363     return pVM->cpum.s.enmGuestCpuVendor;
     2440    return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor;
    23642441}
    23652442
  • trunk/src/VBox/VMM/VMMAll/MMAll.cpp

    r45640 r49893  
    568568        TAG2STR(CFGM_USER);
    569569
     570        TAG2STR(CPUM_CTX);
     571        TAG2STR(CPUM_CPUID);
     572        TAG2STR(CPUM_MSRS);
     573
    570574        TAG2STR(CSAM);
    571575        TAG2STR(CSAM_PATCH);
  • trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp

    r44528 r49893  
    322322}
    323323
     324
    324325/**
    325326 * Wrapper for mmHyperAllocInternal
     
    327328VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
    328329{
    329     int rc;
    330 
    331     rc = mmHyperLock(pVM);
     330    int rc = mmHyperLock(pVM);
    332331    AssertRCReturn(rc, rc);
    333332
     
    339338    return rc;
    340339}
     340
     341
     342/**
     343 * Duplicates a block of memory.
     344 */
     345VMMDECL(int) MMHyperDupMem(PVM pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
     346{
     347    int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
     348    if (RT_SUCCESS(rc))
     349        memcpy(*ppv, pvSrc, cb);
     350    return rc;
     351}
     352
    341353
    342354/**
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r49623 r49893  
    7676#endif
    7777
     78/**
     79 * CPUID bits to unify among all cores.
     80 */
     81static struct
     82{
     83    uint32_t uLeaf;  /**< Leaf to check. */
     84    uint32_t ecx;    /**< which bits in ecx to unify between CPUs. */
     85    uint32_t edx;    /**< which bits in edx to unify between CPUs. */
     86}
     87const g_aCpuidUnifyBits[] =
     88{
     89    {
     90        0x00000001,
     91        X86_CPUID_FEATURE_ECX_CX16 | X86_CPUID_FEATURE_ECX_MONITOR,
     92        X86_CPUID_FEATURE_EDX_CX8
     93    }
     94};
     95
     96
    7897
    7998/*******************************************************************************
     
    114133
    115134/**
     135 *
     136 *
    116137 * Check the CPUID features of this particular CPU and disable relevant features
    117138 * for the guest which do not exist on this CPU. We have seen systems where the
     
    127148static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    128149{
     150    PVM     pVM   = (PVM)pvUser1;
     151    PCPUM   pCPUM = &pVM->cpum.s;
     152
    129153    NOREF(idCpu); NOREF(pvUser2);
    130 
    131     struct
    132     {
    133         uint32_t uLeave; /* leave to check */
    134         uint32_t ecx;    /* which bits in ecx to unify between CPUs */
    135         uint32_t edx;    /* which bits in edx to unify between CPUs */
    136     } aCpuidUnify[]
    137     =
    138     {
    139         { 0x00000001, X86_CPUID_FEATURE_ECX_CX16
    140                     | X86_CPUID_FEATURE_ECX_MONITOR,
    141                       X86_CPUID_FEATURE_EDX_CX8 }
    142     };
    143     PVM pVM = (PVM)pvUser1;
    144     PCPUM pCPUM = &pVM->cpum.s;
    145     for (uint32_t i = 0; i < RT_ELEMENTS(aCpuidUnify); i++)
    146     {
    147         uint32_t uLeave = aCpuidUnify[i].uLeave;
     154    for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
     155    {
     156        /* Note! Cannot use cpumCpuIdGetLeaf from here because we're not
     157                 necessarily in the VM process context.  So, we using the
     158                 legacy arrays as temporary storage. */
     159
     160        uint32_t   uLeaf = g_aCpuidUnifyBits[i].uLeaf;
     161        PCPUMCPUID pLegacyLeaf;
     162        if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
     163            pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
     164        else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
     165            pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
     166        else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
     167            pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
     168        else
     169            continue;
     170
    148171        uint32_t eax, ebx, ecx, edx;
    149 
    150         ASMCpuId_Idx_ECX(uLeave, 0, &eax, &ebx, &ecx, &edx);
    151         PCPUMCPUID paLeaves;
    152         if (uLeave < 0x80000000)
    153             paLeaves = &pCPUM->aGuestCpuIdStd[uLeave - 0x00000000];
    154         else if (uLeave < 0xc0000000)
    155             paLeaves = &pCPUM->aGuestCpuIdExt[uLeave - 0x80000000];
    156         else
    157             paLeaves = &pCPUM->aGuestCpuIdCentaur[uLeave - 0xc0000000];
    158         /* unify important bits */
    159         ASMAtomicAndU32(&paLeaves->ecx, ecx | ~aCpuidUnify[i].ecx);
    160         ASMAtomicAndU32(&paLeaves->edx, edx | ~aCpuidUnify[i].edx);
     172        ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx);
     173
     174        ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);
     175        ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx);
    161176    }
    162177}
     
    260275        }
    261276
     277        /*
     278         * Unify/cross check some CPUID feature bits on all available CPU cores
     279         * and threads.  We've seen CPUs where the monitor support differed.
     280         *
     281         * Because the hyper heap isn't always mapped into ring-0, we cannot
     282         * access it from a RTMpOnAll callback.  We use the legacy CPUID arrays
     283         * as temp ring-0 accessible memory instead, ASSUMING that they're all
     284         * up to date when we get here.
     285         */
    262286        RTMpOnAll(cpumR0CheckCpuid, pVM, NULL);
     287
     288        for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++)
     289        {
     290            uint32_t        uLeaf = g_aCpuidUnifyBits[i].uLeaf;
     291            PCPUMCPUIDLEAF  pLeaf = cpumCpuIdGetLeaf(pVM, uLeaf, 0);
     292            if (pLeaf)
     293            {
     294                PCPUMCPUID pLegacyLeaf;
     295                if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd))
     296                    pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf];
     297                else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt))
     298                    pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)];
     299                else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur))
     300                    pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)];
     301                else
     302                    continue;
     303
     304                pLeaf->uEcx = pLegacyLeaf->ecx;
     305                pLeaf->uEdx = pLegacyLeaf->edx;
     306            }
     307        }
     308
    263309    }
    264310
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49890 r49893  
    58995899        switch (pMsr->u32Msr)
    59005900        {
    5901             case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
     5901            case MSR_K8_TSC_AUX:        CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value);             break;
    59025902            case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR        = pMsr->u64Value;             break;
    59035903            case MSR_K6_STAR:           pMixedCtx->msrSTAR         = pMsr->u64Value;             break;
     
    81478147            AssertRC(rc2);
    81488148            Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS));
    8149             uint64_t u64GuestTscAuxMsr;
    8150             rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAuxMsr);
    8151             AssertRC(rc2);
    8152             hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, u64GuestTscAuxMsr, true /* fUpdateHostMsr */);
     8149            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */);
    81538150        }
    81548151        else
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r49538 r49893  
    5555#include <VBox/err.h>
    5656#include <VBox/log.h>
     57#include <iprt/asm-amd64-x86.h>
    5758#include <iprt/assert.h>
    58 #include <iprt/asm-amd64-x86.h>
     59#include <iprt/cpuset.h>
     60#include <iprt/mem.h>
     61#include <iprt/mp.h>
    5962#include <iprt/string.h>
    60 #include <iprt/mp.h>
    61 #include <iprt/cpuset.h>
    6263#include "internal/pgm.h"
    6364
     
    115116*   Internal Functions                                                         *
    116117*******************************************************************************/
    117 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
    118118static int cpumR3CpuIdInit(PVM pVM);
    119119static DECLCALLBACK(int)  cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
     
    581581
    582582    /*
    583      * Assert alignment and sizes.
     583     * Assert alignment, sizes and tables.
    584584     */
    585585    AssertCompileMemberAlignment(VM, cpum.s, 32);
     
    592592    AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
    593593    AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
     594#ifdef VBOX_STRICT
     595    int rc2 = cpumR3MsrStrictInitChecks();
     596    AssertRCReturn(rc2, rc2);
     597#endif
    594598
    595599    /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
    596600    pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
    597601    Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
     602
    598603
    599604    /* Calculate the offset from CPUMCPU to CPUM. */
     
    647652
    648653    /*
    649      * Detect the host CPU vendor.
    650      * (The guest CPU vendor is re-detected later on.)
    651      */
    652     uint32_t uEAX, uEBX, uECX, uEDX;
    653     ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
    654     pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX);
    655     pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor;
     654     * Gather info about the host CPU.
     655     */
     656    PCPUMCPUIDLEAF  paLeaves;
     657    uint32_t        cLeaves;
     658    int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
     659    AssertLogRelRCReturn(rc, rc);
     660
     661    rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
     662    RTMemFree(paLeaves);
     663    AssertLogRelRCReturn(rc, rc);
     664    pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
    656665
    657666    /*
     
    662671     * Register saved state data item.
    663672     */
    664     int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
    665                                    NULL, cpumR3LiveExec, NULL,
    666                                    NULL, cpumR3SaveExec, NULL,
    667                                    cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
     673    rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
     674                               NULL, cpumR3LiveExec, NULL,
     675                               NULL, cpumR3SaveExec, NULL,
     676                               cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
    668677    if (RT_FAILURE(rc))
    669678        return rc;
     
    700709
    701710/**
    702  * Detect the CPU vendor give n the
    703  *
    704  * @returns The vendor.
    705  * @param   uEAX                EAX from CPUID(0).
    706  * @param   uEBX                EBX from CPUID(0).
    707  * @param   uECX                ECX from CPUID(0).
    708  * @param   uEDX                EDX from CPUID(0).
    709  */
    710 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
    711 {
    712     if (ASMIsValidStdRange(uEAX))
    713     {
    714         if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
    715             return CPUMCPUVENDOR_AMD;
    716 
    717         if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
    718             return CPUMCPUVENDOR_INTEL;
    719 
    720         if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
    721             return CPUMCPUVENDOR_VIA;
    722 
    723         /** @todo detect the other buggers... */
    724     }
    725 
    726     return CPUMCPUVENDOR_UNKNOWN;
     711 * Loads MSR range overrides.
     712 *
     713 * This must be called before the MSR ranges are moved from the normal heap to
     714 * the hyper heap!
     715 *
     716 * @returns VBox status code (VMSetError called).
     717 * @param   pVM                 Pointer to the cross context VM structure
     718 * @param   pMsrNode            The CFGM node with the MSR overrides.
     719 */
     720static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
     721{
     722    for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
     723    {
     724        /*
     725         * Assemble a valid MSR range.
     726         */
     727        CPUMMSRRANGE MsrRange;
     728        MsrRange.offCpumCpu = 0;
     729        MsrRange.fReserved  = 0;
     730
     731        int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
     732        if (RT_FAILURE(rc))
     733            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
     734
     735        rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
     736        if (RT_FAILURE(rc))
     737            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
     738                              MsrRange.szName, rc);
     739
     740        rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
     741        if (RT_FAILURE(rc))
     742            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
     743                              MsrRange.szName, rc);
     744
     745        char szType[32];
     746        rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
     747        if (RT_FAILURE(rc))
     748            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
     749                              MsrRange.szName, rc);
     750        if (!RTStrICmp(szType, "FixedValue"))
     751        {
     752            MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
     753            MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
     754
     755            rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uInitOrReadValue, 0);
     756            if (RT_FAILURE(rc))
     757                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
     758                                  MsrRange.szName, rc);
     759
     760            rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
     761            if (RT_FAILURE(rc))
     762                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
     763                                  MsrRange.szName, rc);
     764
     765            rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
     766            if (RT_FAILURE(rc))
     767                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
     768                                  MsrRange.szName, rc);
     769        }
     770        else
     771            return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
     772                              "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
     773
     774        /*
     775         * Insert the range into the table (replaces/splits/shrinks existing
     776         * MSR ranges).
     777         */
     778        rc = cpumR3MsrRangesInsert(&pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, &MsrRange);
     779        if (RT_FAILURE(rc))
     780            return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
     781    }
     782
     783    return VINF_SUCCESS;
    727784}
     785
     786
     787/**
     788 * Loads CPUID leaf overrides.
     789 *
     790 * This must be called before the CPUID leaves are moved from the normal
     791 * heap to the hyper heap!
     792 *
     793 * @returns VBox status code (VMSetError called).
     794 * @param   pVM             Pointer to the cross context VM structure
     795 * @param   pParentNode     The CFGM node with the CPUID leaves.
     796 * @param   pszLabel        How to label the overrides we're loading.
     797 */
     798static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
     799{
     800    for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
     801    {
     802        /*
     803         * Get the leaf and subleaf numbers.
     804         */
     805        char szName[128];
     806        int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
     807        if (RT_FAILURE(rc))
     808            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
     809
     810        /* The leaf number is either specified directly or thru the node name. */
     811        uint32_t uLeaf;
     812        rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
     813        if (rc == VERR_CFGM_VALUE_NOT_FOUND)
     814        {
     815            rc = RTStrToUInt32Full(szName, 16, &uLeaf);
     816            if (rc != VINF_SUCCESS)
     817                return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
     818                                  "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
     819        }
     820        else if (RT_FAILURE(rc))
     821            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
     822                              pszLabel, szName, rc);
     823
     824        uint32_t uSubLeaf;
     825        rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
     826        if (RT_FAILURE(rc))
     827            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
     828                              pszLabel, szName, rc);
     829
     830        uint32_t fSubLeafMask;
     831        rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
     832        if (RT_FAILURE(rc))
     833            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
     834                              pszLabel, szName, rc);
     835
     836        /*
     837         * Look up the specified leaf, since the output register values
     838         * defaults to any existing values.  This allows overriding a single
     839         * register, without needing to know the other values.
     840         */
     841        PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
     842                                                   uLeaf, uSubLeaf);
     843        CPUMCPUIDLEAF   Leaf;
     844        if (pLeaf)
     845            Leaf = *pLeaf;
     846        else
     847            RT_ZERO(Leaf);
     848        Leaf.uLeaf          = uLeaf;
     849        Leaf.uSubLeaf       = uSubLeaf;
     850        Leaf.fSubLeafMask   = fSubLeafMask;
     851
     852        rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
     853        if (RT_FAILURE(rc))
     854            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
     855                              pszLabel, szName, rc);
     856        rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
     857        if (RT_FAILURE(rc))
     858            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
     859                              pszLabel, szName, rc);
     860        rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
     861        if (RT_FAILURE(rc))
     862            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
     863                              pszLabel, szName, rc);
     864        rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
     865        if (RT_FAILURE(rc))
     866            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
     867                              pszLabel, szName, rc);
     868
     869        /*
     870         * Insert the leaf into the table (replaces existing ones).
     871         */
     872        rc = cpumR3CpuIdInsert(&pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves, &Leaf);
     873        if (RT_FAILURE(rc))
     874            return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
     875    }
     876
     877    return VINF_SUCCESS;
     878}
     879
    728880
    729881
     
    815967
    816968
     969static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
     970{
     971    /*
     972     * Install the CPUID information.
     973     */
     974    int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32,
     975                           MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3);
     976
     977    AssertLogRelRCReturn(rc, rc);
     978
     979    pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
     980    pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
     981    Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
     982    Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
     983
     984    /*
     985     * Explode the guest CPU features.
     986     */
     987    rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
     988    AssertLogRelRCReturn(rc, rc);
     989
     990
     991    /*
     992     * Populate the legacy arrays.  Currently used for everything, later only
     993     * for patch manager.
     994     */
     995    struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
     996    {
     997        { pCPUM->aGuestCpuIdStd,        RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     0x00000000 },
     998        { pCPUM->aGuestCpuIdExt,        RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     0x80000000 },
     999        { pCPUM->aGuestCpuIdCentaur,    RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 },
     1000        { pCPUM->aGuestCpuIdHyper,      RT_ELEMENTS(pCPUM->aGuestCpuIdHyper),   0x40000000 },
     1001    };
     1002    for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
     1003    {
     1004        uint32_t    cLeft       = aOldRanges[i].cCpuIds;
     1005        uint32_t    uLeaf       = aOldRanges[i].uBase + cLeft;
     1006        PCPUMCPUID  pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
     1007        while (cLeft-- > 0)
     1008        {
     1009            uLeaf--;
     1010            pLegacyLeaf--;
     1011
     1012            PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf, 0);
     1013            if (pLeaf)
     1014            {
     1015                pLegacyLeaf->eax = pLeaf->uEax;
     1016                pLegacyLeaf->ebx = pLeaf->uEbx;
     1017                pLegacyLeaf->ecx = pLeaf->uEcx;
     1018                pLegacyLeaf->edx = pLeaf->uEdx;
     1019            }
     1020            else
     1021                *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId;
     1022        }
     1023    }
     1024
     1025    pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId;
     1026
     1027    return VINF_SUCCESS;
     1028}
     1029
     1030
    8171031/**
    8181032 * Initializes the emulated CPU's cpuid information.
     
    8251039    PCPUM       pCPUM    = &pVM->cpum.s;
    8261040    PCFGMNODE   pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
    827     uint32_t    i;
    8281041    int         rc;
    8291042
    830 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \
    831     if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fMask)) == (uValue) ) \
     1043#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
     1044    if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
    8321045    { \
    833         LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg & (fMask))); \
    834         pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fMask); \
    835     }
    836 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \
    837     if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fBitMask)) ) \
     1046        LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
     1047        (a_pLeafReg) &= ~(uint32_t)(fMask); \
     1048    }
     1049#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
     1050    if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
    8381051    { \
    839         LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \
    840         pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fBitMask); \
     1052        LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
     1053        (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
    8411054    }
    8421055
     
    8471060     * Enables the Synthetic CPU.  The Vendor ID and Processor Name are
    8481061     * completely overridden by VirtualBox custom strings.  Some
    849      * CPUID information is withheld, like the cache info. */
    850     rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu",  &pCPUM->fSyntheticCpu, false);
     1062     * CPUID information is withheld, like the cache info.
     1063     *
     1064     * This is obsoleted by PortableCpuIdLevel. */
     1065    bool fSyntheticCpu;
     1066    rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu",  &fSyntheticCpu, false);
    8511067    AssertRCReturn(rc, rc);
    8521068
     
    8561072     * values should only be used when older CPUs are involved since it may
    8571073     * harm performance and maybe also cause problems with specific guests. */
    858     rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0);
    859     AssertRCReturn(rc, rc);
    860 
    861     AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG);
    862 
    863     /*
    864      * Get the host CPUID leaves and redetect the guest CPU vendor (could've
    865      * been overridden).
    866      */
    867     /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
    868      * Overrides the host CPUID leaf values used for calculating the guest CPUID
    869      * leaves.  This can be used to preserve the CPUID values when moving a VM to a
    870      * different machine.  Another use is restricting (or extending) the feature set
    871      * exposed to the guest. */
    872     PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
    873     rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     pHostOverrideCfg);
    874     AssertRCReturn(rc, rc);
    875     rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     pHostOverrideCfg);
    876     AssertRCReturn(rc, rc);
    877     rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg);
    878     AssertRCReturn(rc, rc);
    879 
    880     pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx,
    881                                                   pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx);
    882 
    883     /*
    884      * Determine the default leaf.
    885      *
    886      * Intel returns values of the highest standard function, while AMD
    887      * returns zeros. VIA on the other hand seems to returning nothing or
    888      * perhaps some random garbage, we don't try to duplicate this behavior.
    889      */
    890     ASMCpuIdExSlow(pCPUM->aGuestCpuIdStd[0].eax + 10, 0, 0, 0, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */
    891                    &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
    892                    &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
     1074    rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0);
     1075    AssertLogRelRCReturn(rc, rc);
     1076
     1077    /** @cfgm{CPUM/GuestCpuName, string}
     1078     * The name of of the CPU we're to emulate.  The default is the host CPU.
     1079     * Note! CPUs other than "host" one is currently unsupported. */
     1080    char szCpuName[128];
     1081    rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host");
     1082    AssertLogRelRCReturn(rc, rc);
    8931083
    8941084    /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}
     
    8961086     */
    8971087    bool fCmpXchg16b;
    898     rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); AssertRCReturn(rc, rc);
     1088    rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false);
     1089    AssertLogRelRCReturn(rc, rc);
    8991090
    9001091    /** @cfgm{/CPUM/MONITOR, boolean, true}
     
    9021093     */
    9031094    bool fMonitor;
    904     rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); AssertRCReturn(rc, rc);
    905 
    906     /* Cpuid 1 & 0x80000001:
     1095    rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true);
     1096    AssertLogRelRCReturn(rc, rc);
     1097
     1098    /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
     1099     * Expose MWAIT extended features to the guest.  For now we expose just MWAIT
     1100     * break on interrupt feature (bit 1).
     1101     */
     1102    bool fMWaitExtensions;
     1103    rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false);
     1104    AssertLogRelRCReturn(rc, rc);
     1105
     1106    /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
     1107     * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
     1108     * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
     1109     * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
     1110     */
     1111    bool fNt4LeafLimit;
     1112    rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false);
     1113    AssertLogRelRCReturn(rc, rc);
     1114
     1115    /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
     1116     * Restrict the reported CPU family+model+stepping of intel CPUs.  This is
     1117     * probably going to be a temporary hack, so don't depend on this.
     1118     * The 1st byte of the value is the stepping, the 2nd byte value is the model
     1119     * number and the 3rd byte value is the family, and the 4th value must be zero.
     1120     */
     1121    uint32_t uMaxIntelFamilyModelStep;
     1122    rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
     1123    AssertLogRelRCReturn(rc, rc);
     1124
     1125    /*
     1126     * Get the guest CPU data from the database and/or the host.
     1127     */
     1128    rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo);
     1129    if (RT_FAILURE(rc))
     1130        return rc == VERR_CPUM_DB_CPU_NOT_FOUND
     1131             ? VMSetError(pVM, rc, RT_SRC_POS,
     1132                          "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName)
     1133             : rc;
     1134
     1135    /** @cfgm{CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
     1136     * Overrides the guest MSRs.
     1137     */
     1138    rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
     1139
     1140    /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
     1141     * Overrides the CPUID leaf values (from the host CPU usually) used for
     1142     * calculating the guest CPUID leaves.  This can be used to preserve the CPUID
     1143     * values when moving a VM to a different machine.  Another use is restricting
     1144     * (or extending) the feature set exposed to the guest. */
     1145    if (RT_SUCCESS(rc))
     1146        rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
     1147
     1148    if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
     1149        rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
     1150                        "Found unsupported configuration node '/CPUM/CPUID/'. "
     1151                        "Please use IMachine::setCPUIDLeaf() instead.");
     1152
     1153    /*
     1154     * Pre-exploded the CPUID info.
     1155     */
     1156    if (RT_SUCCESS(rc))
     1157        rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
     1158    if (RT_FAILURE(rc))
     1159    {
     1160        RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3);
     1161        pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL;
     1162        RTMemFree(pCPUM->GuestInfo.paMsrRangesR3);
     1163        pCPUM->GuestInfo.paMsrRangesR3 = NULL;
     1164        return rc;
     1165    }
     1166
     1167
     1168    /* ... split this function about here ... */
     1169
     1170
     1171    PCPUMCPUIDLEAF pStdLeaf0 = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
     1172    AssertLogRelReturn(pStdLeaf0, VERR_CPUM_IPE_2);
     1173
     1174
     1175    /* Cpuid 1:
    9071176     * Only report features we can support.
    9081177     *
     
    9101179     *       options may require adjusting (i.e. stripping what was enabled).
    9111180     */
    912     pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
     1181    PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
     1182    AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
     1183    pStdFeatureLeaf->uEdx        &= X86_CPUID_FEATURE_EDX_FPU
    9131184                                  | X86_CPUID_FEATURE_EDX_VME
    9141185                                  | X86_CPUID_FEATURE_EDX_DE
     
    9411212                                  //| X86_CPUID_FEATURE_EDX_PBE   - no pending break enabled.
    9421213                                  | 0;
    943     pCPUM->aGuestCpuIdStd[1].ecx &= 0
     1214    pStdFeatureLeaf->uEcx        &= 0
    9441215                                  | X86_CPUID_FEATURE_ECX_SSE3
    9451216                                  /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
     
    9611232    if (pCPUM->u8PortableCpuIdLevel > 0)
    9621233    {
    963         PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
    964         PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
    965         PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3,  X86_CPUID_FEATURE_ECX_SSE3);
    966         PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, CX16,  X86_CPUID_FEATURE_ECX_CX16);
    967         PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2,  X86_CPUID_FEATURE_EDX_SSE2);
    968         PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE,   X86_CPUID_FEATURE_EDX_SSE);
    969         PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
    970         PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV,  X86_CPUID_FEATURE_EDX_CMOV);
    971 
    972         Assert(!(pCPUM->aGuestCpuIdStd[1].edx & (  X86_CPUID_FEATURE_EDX_SEP
     1234        PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
     1235        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
     1236        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3,  X86_CPUID_FEATURE_ECX_SSE3);
     1237        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16,  X86_CPUID_FEATURE_ECX_CX16);
     1238        PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2,  X86_CPUID_FEATURE_EDX_SSE2);
     1239        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE,   X86_CPUID_FEATURE_EDX_SSE);
     1240        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
     1241        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV,  X86_CPUID_FEATURE_EDX_CMOV);
     1242
     1243        Assert(!(pStdFeatureLeaf->uEdx        & (  X86_CPUID_FEATURE_EDX_SEP
    9731244                                                 | X86_CPUID_FEATURE_EDX_PSN
    9741245                                                 | X86_CPUID_FEATURE_EDX_DS
     
    9781249                                                 | X86_CPUID_FEATURE_EDX_PBE
    9791250                                                 )));
    980         Assert(!(pCPUM->aGuestCpuIdStd[1].ecx & (  X86_CPUID_FEATURE_ECX_PCLMUL
     1251        Assert(!(pStdFeatureLeaf->uEcx        & (  X86_CPUID_FEATURE_ECX_PCLMUL
    9811252                                                 | X86_CPUID_FEATURE_ECX_DTES64
    9821253                                                 | X86_CPUID_FEATURE_ECX_CPLDS
     
    10081279     * ASSUMES that this is ALWAYS the AMD defined feature set if present.
    10091280     */
    1010     pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
     1281    PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
     1282                                                        UINT32_C(0x80000001), 0);
     1283    if (pExtFeatureLeaf)
     1284    {
     1285        pExtFeatureLeaf->uEdx    &= X86_CPUID_AMD_FEATURE_EDX_FPU
    10111286                                  | X86_CPUID_AMD_FEATURE_EDX_VME
    10121287                                  | X86_CPUID_AMD_FEATURE_EDX_DE
     
    10371312                                  | X86_CPUID_AMD_FEATURE_EDX_3DNOW
    10381313                                  | 0;
    1039     pCPUM->aGuestCpuIdExt[1].ecx &= 0
     1314        pExtFeatureLeaf->uEcx    &= 0
    10401315                                  //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
    10411316                                  //| X86_CPUID_AMD_FEATURE_ECX_CMPL
     
    10541329                                  //| X86_CPUID_AMD_FEATURE_ECX_WDT
    10551330                                  | 0;
    1056     if (pCPUM->u8PortableCpuIdLevel > 0)
    1057     {
    1058         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
    1059         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW,      X86_CPUID_AMD_FEATURE_EDX_3DNOW);
    1060         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX,   X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
    1061         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR,      X86_CPUID_AMD_FEATURE_EDX_FFXSR);
    1062         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP,     X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    1063         PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF,  X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
    1064         PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV,       X86_CPUID_AMD_FEATURE_EDX_CMOV);
    1065 
    1066         Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & (  X86_CPUID_AMD_FEATURE_ECX_CMPL
    1067                                                  | X86_CPUID_AMD_FEATURE_ECX_SVM
    1068                                                  | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
    1069                                                  | X86_CPUID_AMD_FEATURE_ECX_CR8L
    1070                                                  | X86_CPUID_AMD_FEATURE_ECX_ABM
    1071                                                  | X86_CPUID_AMD_FEATURE_ECX_SSE4A
    1072                                                  | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
    1073                                                  | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
    1074                                                  | X86_CPUID_AMD_FEATURE_ECX_OSVW
    1075                                                  | X86_CPUID_AMD_FEATURE_ECX_IBS
    1076                                                  | X86_CPUID_AMD_FEATURE_ECX_SSE5
    1077                                                  | X86_CPUID_AMD_FEATURE_ECX_SKINIT
    1078                                                  | X86_CPUID_AMD_FEATURE_ECX_WDT
    1079                                                  | UINT32_C(0xffffc000)
    1080                                                  )));
    1081         Assert(!(pCPUM->aGuestCpuIdExt[1].edx & (  RT_BIT(10)
    1082                                                  | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
    1083                                                  | RT_BIT(18)
    1084                                                  | RT_BIT(19)
    1085                                                  | RT_BIT(21)
    1086                                                  | X86_CPUID_AMD_FEATURE_EDX_AXMMX
    1087                                                  | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
    1088                                                  | RT_BIT(28)
    1089                                                  )));
    1090     }
    1091 
    1092     /*
    1093      * Apply the Synthetic CPU modifications. (TODO: move this up)
    1094      */
    1095     if (pCPUM->fSyntheticCpu)
    1096     {
    1097         static const char s_szVendor[13]    = "VirtualBox  ";
    1098         static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000            "; /* includes null terminator */
    1099 
    1100         pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
    1101 
    1102         /* Limit the nr of standard leaves; 5 for monitor/mwait */
    1103         pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
    1104 
    1105         /* 0: Vendor */
    1106         pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0];
    1107         pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2];
    1108         pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1];
    1109 
    1110         /* 1.eax: Version information.  family : model : stepping */
    1111         pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
    1112 
    1113         /* Leaves 2 - 4 are Intel only - zero them out */
    1114         memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
    1115         memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
    1116         memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
    1117 
    1118         /* Leaf 5 = monitor/mwait */
    1119 
    1120         /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
    1121         pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
    1122         /* AMD only - set to zero. */
    1123         pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
    1124 
    1125         /* 0x800000001: shared feature bits are set dynamically. */
    1126         memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
    1127 
    1128         /* 0x800000002-4: Processor Name String Identifier. */
    1129         pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0];
    1130         pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1];
    1131         pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2];
    1132         pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3];
    1133         pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4];
    1134         pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5];
    1135         pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6];
    1136         pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7];
    1137         pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8];
    1138         pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9];
    1139         pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10];
    1140         pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11];
    1141 
    1142         /* 0x800000005-7 - reserved -> zero */
    1143         memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
    1144         memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
    1145         memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
    1146 
    1147         /* 0x800000008: only the max virtual and physical address size. */
    1148         pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
     1331        if (pCPUM->u8PortableCpuIdLevel > 0)
     1332        {
     1333            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
     1334            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW,      X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     1335            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX,   X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
     1336            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR,      X86_CPUID_AMD_FEATURE_EDX_FFXSR);
     1337            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP,     X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     1338            PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF,  X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
     1339            PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV,       X86_CPUID_AMD_FEATURE_EDX_CMOV);
     1340
     1341            Assert(!(pExtFeatureLeaf->uEcx & (  X86_CPUID_AMD_FEATURE_ECX_CMPL
     1342                                              | X86_CPUID_AMD_FEATURE_ECX_SVM
     1343                                              | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
     1344                                              | X86_CPUID_AMD_FEATURE_ECX_CR8L
     1345                                              | X86_CPUID_AMD_FEATURE_ECX_ABM
     1346                                              | X86_CPUID_AMD_FEATURE_ECX_SSE4A
     1347                                              | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
     1348                                              | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
     1349                                              | X86_CPUID_AMD_FEATURE_ECX_OSVW
     1350                                              | X86_CPUID_AMD_FEATURE_ECX_IBS
     1351                                              | X86_CPUID_AMD_FEATURE_ECX_SSE5
     1352                                              | X86_CPUID_AMD_FEATURE_ECX_SKINIT
     1353                                              | X86_CPUID_AMD_FEATURE_ECX_WDT
     1354                                              | UINT32_C(0xffffc000)
     1355                                              )));
     1356            Assert(!(pExtFeatureLeaf->uEdx & (  RT_BIT(10)
     1357                                              | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
     1358                                              | RT_BIT(18)
     1359                                              | RT_BIT(19)
     1360                                              | RT_BIT(21)
     1361                                              | X86_CPUID_AMD_FEATURE_EDX_AXMMX
     1362                                              | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
     1363                                              | RT_BIT(28)
     1364                                              )));
     1365        }
    11491366    }
    11501367
     
    11531370     * (APIC-ID := 0 and #LogCpus := 0)
    11541371     */
    1155     pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
     1372    pStdFeatureLeaf->uEbx &= 0x0000ffff;
    11561373#ifdef VBOX_WITH_MULTI_CORE
    1157     if (    pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC
    1158         &&  pVM->cCpus > 1)
     1374    if (pVM->cCpus > 1)
    11591375    {
    11601376        /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
    1161         pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
    1162         pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT;  /* necessary for hyper-threading *or* multi-core CPUs */
     1377        pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16);
     1378        pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT;  /* necessary for hyper-threading *or* multi-core CPUs */
    11631379    }
    11641380#endif
     
    11701386     * Safe to expose; restrict the number of calls to 1 for the portable case.
    11711387     */
    1172     if (    pCPUM->u8PortableCpuIdLevel > 0
    1173         &&  pCPUM->aGuestCpuIdStd[0].eax >= 2
    1174         && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1)
    1175     {
    1176         LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff));
    1177         pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe);
     1388    PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0);
     1389    if (   pCPUM->u8PortableCpuIdLevel > 0
     1390        && pCurLeaf
     1391        && (pCurLeaf->uEax & 0xff) > 1)
     1392    {
     1393        LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
     1394        pCurLeaf->uEax &= UINT32_C(0xfffffffe);
    11781395    }
    11791396
     
    11851402     * Safe to expose
    11861403     */
    1187     if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
    1188     {
    1189         pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
     1404    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0);
     1405    if (   !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN)
     1406        && pCurLeaf)
     1407    {
     1408        pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
    11901409        if (pCPUM->u8PortableCpuIdLevel > 0)
    1191             pCPUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;
     1410            pCurLeaf->uEax = pCurLeaf->uEbx = 0;
    11921411    }
    11931412
     
    12021421     * Note: These SMP values are constant regardless of ECX
    12031422     */
    1204     pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
    1205     pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
     1423    CPUMCPUIDLEAF NewLeaf;
     1424    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
     1425    if (pCurLeaf)
     1426    {
     1427        NewLeaf.uLeaf        = 4;
     1428        NewLeaf.uSubLeaf     = 0;
     1429        NewLeaf.fSubLeafMask = 0;
     1430        NewLeaf.uEax         = 0;
     1431        NewLeaf.uEbx         = 0;
     1432        NewLeaf.uEcx         = 0;
     1433        NewLeaf.uEdx         = 0;
     1434        NewLeaf.fFlags       = 0;
    12061435#ifdef VBOX_WITH_MULTI_CORE
    1207     if (   pVM->cCpus > 1
    1208         && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
    1209     {
    1210         AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
    1211         /* One logical processor with possibly multiple cores. */
    1212         /* See  http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
    1213         pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26);   /* 6 bits only -> 64 cores! */
    1214     }
     1436        if (   pVM->cCpus > 1
     1437            && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
     1438        {
     1439            AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
     1440            /* One logical processor with possibly multiple cores. */
     1441            /* See  http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
     1442            NewLeaf.uEax |= ((pVM->cCpus - 1) << 26);   /* 6 bits only -> 64 cores! */
     1443        }
    12151444#endif
     1445        rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
     1446        AssertLogRelRCReturn(rc, rc);
     1447    }
    12161448
    12171449    /* Cpuid 5:     Monitor/mwait Leaf
     
    12241456     * Safe to expose
    12251457     */
    1226     if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
    1227         pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
    1228 
    1229     pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
    1230     /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
    1231      * Expose MWAIT extended features to the guest.  For now we expose
    1232      * just MWAIT break on interrupt feature (bit 1).
    1233      */
    1234     bool fMWaitExtensions;
    1235     rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc);
    1236     if (fMWaitExtensions)
    1237     {
    1238         pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
    1239         /** @todo: for now we just expose host's MWAIT C-states, although conceptually
    1240            it shall be part of our power management virtualization model */
     1458    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
     1459    if (pCurLeaf)
     1460    {
     1461        if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
     1462            pCurLeaf->uEax = pCurLeaf->uEbx = 0;
     1463
     1464        pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
     1465        if (fMWaitExtensions)
     1466        {
     1467            pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
     1468            /** @todo: for now we just expose host's MWAIT C-states, although conceptually
     1469               it shall be part of our power management virtualization model */
    12411470#if 0
    1242         /* MWAIT sub C-states */
    1243         pCPUM->aGuestCpuIdStd[5].edx =
    1244                 (0 << 0)  /* 0 in C0 */ |
    1245                 (2 << 4)  /* 2 in C1 */ |
    1246                 (2 << 8)  /* 2 in C2 */ |
    1247                 (2 << 12) /* 2 in C3 */ |
    1248                 (0 << 16) /* 0 in C4 */
    1249                 ;
     1471            /* MWAIT sub C-states */
     1472            pCurLeaf->uEdx =
     1473                    (0 << 0)  /* 0 in C0 */ |
     1474                    (2 << 4)  /* 2 in C1 */ |
     1475                    (2 << 8)  /* 2 in C2 */ |
     1476                    (2 << 12) /* 2 in C3 */ |
     1477                    (0 << 16) /* 0 in C4 */
     1478                    ;
    12501479#endif
    1251     }
    1252     else
    1253         pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
     1480        }
     1481        else
     1482            pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
     1483    }
    12541484
    12551485    /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
     
    12701500     * VIA:               Reserved
    12711501     */
    1272     if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
    1273     {
    1274         Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
    1275 
    1276         pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
    1277 
    1278         if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
     1502    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0);
     1503    if (pCurLeaf)
     1504    {
     1505        Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
     1506
     1507        pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
     1508
     1509        if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    12791510        {
    12801511            /* Only expose the TSC invariant capability bit to the guest. */
    1281             pCPUM->aGuestCpuIdExt[7].edx    &= 0
     1512            pCurLeaf->uEdx                  &= 0
    12821513                                            //| X86_CPUID_AMD_ADVPOWER_EDX_TS
    12831514                                            //| X86_CPUID_AMD_ADVPOWER_EDX_FID
     
    13001531        }
    13011532        else
    1302             pCPUM->aGuestCpuIdExt[7].edx    = 0;
     1533            pCurLeaf->uEdx = 0;
    13031534    }
    13041535
     
    13121543     *                    EBX, ECX, EDX - reserved
    13131544     */
    1314     if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
     1545    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0);
     1546    if (pCurLeaf)
    13151547    {
    13161548        /* Only expose the virtual and physical address sizes to the guest. */
    1317         pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
    1318         pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
     1549        pCurLeaf->uEax &= UINT32_C(0x0000ffff);
     1550        pCurLeaf->uEbx = pCurLeaf->uEdx = 0;  /* reserved */
    13191551        /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
    13201552         * NC (0-7) Number of cores; 0 equals 1 core */
    1321         pCPUM->aGuestCpuIdExt[8].ecx = 0;
     1553        pCurLeaf->uEcx = 0;
    13221554#ifdef VBOX_WITH_MULTI_CORE
    13231555        if (    pVM->cCpus > 1
    1324             &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
     1556            &&  pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    13251557        {
    13261558            /* Legacy method to determine the number of cores. */
    1327             pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
    1328             pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
     1559            pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
     1560            if (pExtFeatureLeaf)
     1561                pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
    13291562        }
    13301563#endif
    13311564    }
    13321565
    1333     /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
    1334      * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
    1335      * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
    1336      * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
    1337      */
    1338     bool fNt4LeafLimit;
    1339     rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc);
    1340     if (fNt4LeafLimit && pCPUM->aGuestCpuIdStd[0].eax > 3)
    1341         pCPUM->aGuestCpuIdStd[0].eax = 3;
    1342 
    1343     /*
    1344      * Limit it the number of entries and fill the remaining with the defaults.
     1566
     1567    /*
     1568     * Limit it the number of entries, zapping the remainder.
    13451569     *
    13461570     * The limits are masking off stuff about power saving and similar, this
     
    13481572     * info too in these leaves (like words about having a constant TSC).
    13491573     */
    1350     if (pCPUM->aGuestCpuIdStd[0].eax > 5)
    1351         pCPUM->aGuestCpuIdStd[0].eax = 5;
    1352     for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
    1353         pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
    1354 
    1355     if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
    1356         pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
    1357     for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
    1358            ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
    1359            : 0;
    1360          i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
    1361          i++)
    1362         pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
     1574    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
     1575    if (pCurLeaf)
     1576    {
     1577        if (pCurLeaf->uEax > 5)
     1578        {
     1579            pCurLeaf->uEax = 5;
     1580            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1581                                   UINT32_C(0x00000006), UINT32_C(0x000fffff));
     1582        }
     1583
     1584        /* NT4 hack, no zapping of extra leaves here. */
     1585        if (fNt4LeafLimit && pCurLeaf->uEax > 3)
     1586            pCurLeaf->uEax = 3;
     1587    }
     1588
     1589    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0);
     1590    if (pCurLeaf)
     1591    {
     1592        if (pCurLeaf->uEax > UINT32_C(0x80000008))
     1593        {
     1594            pCurLeaf->uEax = UINT32_C(0x80000008);
     1595            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1596                                   UINT32_C(0x80000008), UINT32_C(0x800fffff));
     1597        }
     1598    }
    13631599
    13641600    /*
     
    13701606     * temperature/hz/++ stuff, include it as well (static).
    13711607     */
    1372     if (    pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
    1373         &&  pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
    1374     {
    1375         pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
    1376         pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
    1377         for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
    1378              i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
    1379              i++)
    1380             pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
    1381     }
    1382     else
    1383         for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
    1384             pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
     1608    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0);
     1609    if (pCurLeaf)
     1610    {
     1611        if (   pCurLeaf->uEax >= UINT32_C(0xc0000000)
     1612            && pCurLeaf->uEax <= UINT32_C(0xc0000004))
     1613        {
     1614            pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002));
     1615            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1616                                   UINT32_C(0xc0000002), UINT32_C(0xc00fffff));
     1617
     1618            pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
     1619                                          UINT32_C(0xc0000001), 0);
     1620            if (pCurLeaf)
     1621                pCurLeaf->uEdx = 0; /* all features hidden */
     1622        }
     1623        else
     1624            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1625                                   UINT32_C(0xc0000000), UINT32_C(0xc00fffff));
     1626    }
    13851627
    13861628    /*
     
    13911633     * Currently we do not support any hypervisor-specific interface.
    13921634     */
    1393     pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001);
    1394     pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx
    1395                                    = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256;   /* 'VBox' */
    1396     pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e;                            /* 'none' */
    1397     pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx
    1398                                    = pCPUM->aGuestCpuIdHyper[1].edx = 0;    /* Reserved */
     1635    NewLeaf.uLeaf        = UINT32_C(0x40000000);
     1636    NewLeaf.uSubLeaf     = 0;
     1637    NewLeaf.fSubLeafMask = 0;
     1638    NewLeaf.uEax         = UINT32_C(0x40000001);
     1639    NewLeaf.uEbx         = 0x786f4256 /* 'VBox' */;
     1640    NewLeaf.uEcx         = 0x786f4256 /* 'VBox' */;
     1641    NewLeaf.uEdx         = 0x786f4256 /* 'VBox' */;
     1642    NewLeaf.fFlags       = 0;
     1643    rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
     1644    AssertLogRelRCReturn(rc, rc);
     1645
     1646    NewLeaf.uLeaf        = UINT32_C(0x40000001);
     1647    NewLeaf.uEax         = 0x656e6f6e;                            /* 'none' */
     1648    NewLeaf.uEbx         = 0;
     1649    NewLeaf.uEcx         = 0;
     1650    NewLeaf.uEdx         = 0;
     1651    NewLeaf.fFlags       = 0;
     1652    rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
     1653    AssertLogRelRCReturn(rc, rc);
    13991654
    14001655    /*
    14011656     * Mini CPU selection support for making Mac OS X happy.
    14021657     */
    1403     if (pCPUM->enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
    1404     {
    1405         /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
    1406          * Restrict the reported CPU family+model+stepping of intel CPUs.  This is
    1407          * probably going to be a temporary hack, so don't depend on this.
    1408          * The 1st byte of the value is the stepping, the 2nd byte value is the model
    1409          * number and the 3rd byte value is the family, and the 4th value must be zero.
    1410          */
    1411         uint32_t uMaxIntelFamilyModelStep;
    1412         rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
    1413         AssertRCReturn(rc, rc);
    1414         uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pCPUM->aGuestCpuIdStd[1].eax),
    1415                                                                 ASMGetCpuModelIntel(pCPUM->aGuestCpuIdStd[1].eax),
    1416                                                                 ASMGetCpuFamily(pCPUM->aGuestCpuIdStd[1].eax),
     1658    if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
     1659    {
     1660        uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax),
     1661                                                                ASMGetCpuModelIntel(pStdFeatureLeaf->uEax),
     1662                                                                ASMGetCpuFamily(pStdFeatureLeaf->uEax),
    14171663                                                                0);
    14181664        if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
    14191665        {
    1420             uint32_t uNew = pCPUM->aGuestCpuIdStd[1].eax & UINT32_C(0xf0003000);
     1666            uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
    14211667            uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
    14221668            uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
     
    14261672                uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
    14271673            LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
    1428                     pCPUM->aGuestCpuIdStd[1].eax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
    1429             pCPUM->aGuestCpuIdStd[1].eax = uNew;
     1674                    pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
     1675            pStdFeatureLeaf->uEax = uNew;
    14301676        }
    14311677    }
    14321678
    1433     /*
    1434      * Load CPUID overrides from configuration.
    1435      * Note: Kind of redundant now, but allows unchanged overrides
    1436      */
    1437     /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
    1438      * Overrides the CPUID leaf values. */
    1439     PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID");
    1440     rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     pOverrideCfg);
    1441     AssertRCReturn(rc, rc);
    1442     rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     pOverrideCfg);
    1443     AssertRCReturn(rc, rc);
    1444     rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg);
    1445     AssertRCReturn(rc, rc);
    1446 
    1447     /*
    1448      * Check if PAE was explicitely enabled by the user.
    1449      */
     1679
     1680    /*
     1681     * Move the MSR and CPUID arrays over on the hypervisor heap, and explode
     1682     * guest CPU features again.
     1683     */
     1684    void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3;
     1685    int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves);
     1686    RTMemFree(pvFree);
     1687
     1688    pvFree = pCPUM->GuestInfo.paMsrRangesR3;
     1689    int rc2 = MMHyperDupMem(pVM, pvFree,
     1690                            sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32,
     1691                            MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3);
     1692    RTMemFree(pvFree);
     1693    AssertLogRelRCReturn(rc1, rc1);
     1694    AssertLogRelRCReturn(rc2, rc2);
     1695
     1696    pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3);
     1697    pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3);
     1698    cpumR3MsrRegStats(pVM);
     1699
     1700    /*
     1701     * Some more configuration that we're applying at the end of everything
     1702     * via the CPUMSetGuestCpuIdFeature API.
     1703     */
     1704
     1705    /* Check if PAE was explicitely enabled by the user. */
    14501706    bool fEnable;
    14511707    rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false);      AssertRCReturn(rc, rc);
     
    14531709        CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
    14541710
    1455     /*
    1456      * We don't normally enable NX for raw-mode, so give the user a chance to
    1457      * force it on.
    1458      */
     1711    /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
    14591712    rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false);                 AssertRCReturn(rc, rc);
    14601713    if (fEnable)
    14611714        CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    14621715
    1463     /*
    1464      * We don't enable the Hypervisor Present bit by default, but it may
    1465      * be needed by some guests.
    1466      */
     1716    /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */
    14671717    rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false);                AssertRCReturn(rc, rc);
    14681718    if (fEnable)
     
    14881738{
    14891739    LogFlow(("CPUMR3Relocate\n"));
     1740
     1741    pVM->cpum.s.GuestInfo.paMsrRangesRC   = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3);
     1742    pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
    14901743
    14911744    /* Recheck the guest DRx values in raw-mode. */
     
    15521805 * Used by CPUMR3Reset and CPU hot plugging.
    15531806 *
    1554  * @param   pVCpu               Pointer to the VMCPU.
    1555  */
    1556 VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
     1807 * @param   pVM         Pointer to the cross context VM structure.
     1808 * @param   pVCpu       Pointer to the cross context virtual CPU structure of
     1809 *                      the CPU that is being reset.  This may differ from the
     1810 *                      current EMT.
     1811 */
     1812VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
    15571813{
    15581814    /** @todo anything different for VCPU > 0? */
     
    16351891                                                        supports all bits, since a zero value here should be read as 0xffbf. */
    16361892
     1893    /*
     1894     * MSRs.
     1895     */
    16371896    /* Init PAT MSR */
    16381897    pCtx->msrPAT                    = UINT64_C(0x0007040600070406); /** @todo correct? */
     
    16421901    Assert(!pCtx->msrEFER);
    16431902
     1903    /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
     1904       is supposed to be here, just trying provide useful/sensible values. */
     1905    PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
     1906    if (pRange)
     1907    {
     1908        pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
     1909                                               | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
     1910                                               | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
     1911                                               | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
     1912        pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
     1913                            | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
     1914        pRange->fWrGpMask  &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
     1915    }
     1916
     1917    /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
     1918
    16441919    /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
    16451920     *        called from each EMT while we're getting called by CPUMR3Reset()
    16461921     *        iteratively on the same thread. Fix later.  */
    1647 #if 0
     1922#if 0 /** @todo r=bird: This we will do in TM, not here. */
    16481923    /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
    16491924    CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
     
    16731948    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    16741949    {
    1675         CPUMR3ResetCpu(&pVM->aCpus[i]);
     1950        CPUMR3ResetCpu(pVM, &pVM->aCpus[i]);
    16761951
    16771952#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    17252000    SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
    17262001    SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
     2002}
     2003
     2004
     2005static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
     2006{
     2007    uint32_t cCpuIds;
     2008    int rc = SSMR3GetU32(pSSM, &cCpuIds);
     2009    if (RT_SUCCESS(rc))
     2010    {
     2011        if (cCpuIds < 64)
     2012        {
     2013            for (uint32_t i = 0; i < cCpuIds; i++)
     2014            {
     2015                CPUMCPUID CpuId;
     2016                rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
     2017                if (RT_FAILURE(rc))
     2018                    break;
     2019
     2020                CPUMCPUIDLEAF NewLeaf;
     2021                NewLeaf.uLeaf           = uBase + i;
     2022                NewLeaf.uSubLeaf        = 0;
     2023                NewLeaf.fSubLeafMask    = 0;
     2024                NewLeaf.uEax            = CpuId.eax;
     2025                NewLeaf.uEbx            = CpuId.ebx;
     2026                NewLeaf.uEcx            = CpuId.ecx;
     2027                NewLeaf.uEdx            = CpuId.edx;
     2028                NewLeaf.fFlags          = 0;
     2029                rc = cpumR3CpuIdInsert(ppaLeaves, pcLeaves, &NewLeaf);
     2030            }
     2031        }
     2032        else
     2033            rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
     2034    }
     2035    if (RT_FAILURE(rc))
     2036    {
     2037        RTMemFree(*ppaLeaves);
     2038        *ppaLeaves = NULL;
     2039        *pcLeaves = 0;
     2040    }
     2041    return rc;
     2042}
     2043
     2044
     2045static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
     2046{
     2047    *ppaLeaves = NULL;
     2048    *pcLeaves = 0;
     2049
     2050    int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
     2051    if (RT_SUCCESS(rc))
     2052        rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
     2053    if (RT_SUCCESS(rc))
     2054        rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
     2055
     2056    return rc;
    17272057}
    17282058
     
    18092139            && !(aHostRaw##set [1].reg & bit) \
    18102140            && !(aHostOverride##set [1].reg & bit) \
    1811             && !(aGuestOverride##set [1].reg & bit) \
    18122141           ) \
    18132142        { \
     
    18232152            && !(aHostRaw##set [1].reg & bit) \
    18242153            && !(aHostOverride##set [1].reg & bit) \
    1825             && !(aGuestOverride##set [1].reg & bit) \
    18262154           ) \
    18272155            LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
     
    18322160            && !(aHostRaw##set [1].reg & bit) \
    18332161            && !(aHostOverride##set [1].reg & bit) \
    1834             && !(aGuestOverride##set [1].reg & bit) \
    18352162           ) \
    18362163            LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
     
    18452172            && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
    18462173            && !(aHostOverride##set [1].reg & bit) \
    1847             && !(aGuestOverride##set [1].reg & bit) \
    18482174           ) \
    18492175        { \
     
    18602186            && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
    18612187            && !(aHostOverride##set [1].reg & bit) \
    1862             && !(aGuestOverride##set [1].reg & bit) \
    18632188           ) \
    18642189            LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
     
    18702195            && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
    18712196            && !(aHostOverride##set [1].reg & bit) \
    1872             && !(aGuestOverride##set [1].reg & bit) \
    18732197           ) \
    18742198            LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
     
    18852209                 : aHostRawStd[1].reg      & (StdBit)) \
    18862210            && !(aHostOverrideExt[1].reg   & (ExtBit)) \
    1887             && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
    18882211           ) \
    18892212        { \
     
    19012224                 : aHostRawStd[1].reg      & (StdBit)) \
    19022225            && !(aHostOverrideExt[1].reg   & (ExtBit)) \
    1903             && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
    19042226           ) \
    19052227            LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
     
    19122234                 : aHostRawStd[1].reg      & (StdBit)) \
    19132235            && !(aHostOverrideExt[1].reg   & (ExtBit)) \
    1914             && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
    19152236           ) \
    19162237            LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
     
    19212242     * Load them into stack buffers first.
    19222243     */
    1923     CPUMCPUID   aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
    1924     uint32_t    cGuestCpuIdStd;
    1925     int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
    1926     if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
    1927         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1928     SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
    1929 
    1930     CPUMCPUID   aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
    1931     uint32_t    cGuestCpuIdExt;
    1932     rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
    1933     if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
    1934         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1935     SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
    1936 
    1937     CPUMCPUID   aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
    1938     uint32_t    cGuestCpuIdCentaur;
    1939     rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
    1940     if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
    1941         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1942     SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
     2244    PCPUMCPUIDLEAF paLeaves;
     2245    uint32_t       cLeaves;
     2246    int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves);
     2247    AssertRCReturn(rc, rc);
     2248
     2249    /** @todo we'll be leaking paLeaves on error return... */
    19432250
    19442251    CPUMCPUID   GuestCpuIdDef;
     
    19512258    if (cRawStd > RT_ELEMENTS(aRawStd))
    19522259        return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1953     SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
     2260    rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
     2261    AssertRCReturn(rc, rc);
     2262    for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
     2263        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
    19542264
    19552265    CPUMCPUID   aRawExt[32];
     
    19602270    rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
    19612271    AssertRCReturn(rc, rc);
    1962 
    1963     /*
    1964      * Note that we support restoring less than the current amount of standard
    1965      * leaves because we've been allowed more is newer version of VBox.
    1966      *
    1967      * So, pad new entries with the default.
    1968      */
    1969     for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
    1970         aGuestCpuIdStd[i] = GuestCpuIdDef;
    1971 
    1972     for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
    1973         aGuestCpuIdExt[i] = GuestCpuIdDef;
    1974 
    1975     for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
    1976         aGuestCpuIdCentaur[i] = GuestCpuIdDef;
    1977 
    1978     for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
    1979         ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
    1980 
    19812272    for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
    19822273        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
     
    19992290     * Note! We currently only need the feature leaves, so skip rest.
    20002291     */
    2001     PCFGMNODE   pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID");
    2002     CPUMCPUID   aGuestOverrideStd[2];
    2003     memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd));
    2004     cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg);
    2005 
    2006     CPUMCPUID   aGuestOverrideExt[2];
    2007     memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt));
    2008     cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg);
    2009 
    2010     pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
     2292    PCFGMNODE   pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
    20112293    CPUMCPUID   aHostOverrideStd[2];
    20122294    memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
     
    22592541     *      "EMU?" - Can this be emulated?
    22602542     */
     2543    CPUMCPUID aGuestCpuIdStd[2];
     2544    RT_ZERO(aGuestCpuIdStd);
     2545    cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
     2546
    22612547    /* CPUID(1).ecx */
    22622548    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);    // -> EMU
     
    23282614
    23292615    /* CPUID(0x80000000). */
    2330     if (    aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001)
    2331         &&  aGuestCpuIdExt[0].eax <  UINT32_C(0x8000007f))
     2616    CPUMCPUID aGuestCpuIdExt[2];
     2617    RT_ZERO(aGuestCpuIdExt);
     2618    if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
    23322619    {
    23332620        /** @todo deal with no 0x80000001 on the host. */
     
    24072694     * We're good, commit the CPU ID leaves.
    24082695     */
    2409     memcpy(&pVM->cpum.s.aGuestCpuIdStd[0],     &aGuestCpuIdStd[0],     sizeof(aGuestCpuIdStd));
    2410     memcpy(&pVM->cpum.s.aGuestCpuIdExt[0],     &aGuestCpuIdExt[0],     sizeof(aGuestCpuIdExt));
    2411     memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
    2412     pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
     2696    MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
     2697    pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR;
     2698    pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;
     2699    pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef;
     2700    rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves);
     2701    RTMemFree(paLeaves);
     2702    AssertLogRelRCReturn(rc, rc);
     2703
    24132704
    24142705#undef CPUID_CHECK_RET
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r49072 r49893  
    23982398                        PGMR3ResetCpu(pVM, pVCpu);
    23992399                        TRPMR3ResetCpu(pVCpu);
    2400                         CPUMR3ResetCpu(pVCpu);
     2400                        CPUMR3ResetCpu(pVM, pVCpu);
    24012401                        EMR3ResetCpu(pVCpu);
    24022402                        HMR3ResetCpu(pVCpu);
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r48629 r49893  
    21682168    }
    21692169
     2170    /** @todo query from CPUM. */
    21702171    pVM->pgm.s.GCPhysInvAddrMask = 0;
    21712172    for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++)
  • trunk/src/VBox/VMM/VMMR3/VM.cpp

    r48528 r49893  
    43804380    PDMR3ResetCpu(pVCpu);
    43814381    TRPMR3ResetCpu(pVCpu);
    4382     CPUMR3ResetCpu(pVCpu);
     4382    CPUMR3ResetCpu(pVM, pVCpu);
    43834383    EMR3ResetCpu(pVCpu);
    43844384    HMR3ResetCpu(pVCpu);
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r49147 r49893  
    14201420
    14211421    PGMR3ResetCpu(pVM, pVCpu);
    1422     CPUMR3ResetCpu(pVCpu);
     1422    CPUMR3ResetCpu(pVM, pVCpu);
    14231423
    14241424    return VINF_EM_WAIT_SIPI;
  • trunk/src/VBox/VMM/VMMR3/VMMTests.cpp

    r49383 r49893  
    872872     * Do the experiments.
    873873     */
    874     uint32_t uMsr   = 0xc0011011;
    875     uint64_t uValue = 0x10000;
     874    uint32_t uMsr   = 0x00000277;
     875    uint64_t uValue = UINT64_C(0x0007010600070106);
    876876#if 0
     877    uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13));
     878    uValue |= RT_BIT_64(13);
    877879    rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
    878880                     RCPtrValues, RCPtrValues + sizeof(uint64_t));
    879881    RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
    880882             uMsr, pauValues[0], uValue, pauValues[1], rc);
    881 #endif
     883#elif 1
     884    const uint64_t uOrgValue = uValue;
     885    uint32_t       cChanges = 0;
     886    for (int iBit = 63; iBit >= 58; iBit--)
     887    {
     888        uValue = uOrgValue & ~RT_BIT_64(iBit);
     889        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
     890                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
     891        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n",
     892                 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
     893                 (pauValues[0] ^  pauValues[1]) & RT_BIT_64(iBit) ?  "changed" : "unchanged");
     894        cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
     895
     896        uValue = uOrgValue | RT_BIT_64(iBit);
     897        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
     898                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
     899        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset   bit=%u -> %s\n",
     900                 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
     901                 (pauValues[0] ^  pauValues[1]) & RT_BIT_64(iBit) ?  "changed" : "unchanged");
     902        cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
     903    }
     904    RTPrintf("%u change(s)\n", cChanges);
     905#else
     906    uint64_t fWriteable = 0;
    882907    for (uint32_t i = 0; i <= 63; i++)
    883908    {
    884909        uValue = RT_BIT_64(i);
     910# if 0
     911        if (uValue & (0x7))
     912            continue;
     913# endif
    885914        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
    886915                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
    887916        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
    888917                 uMsr, pauValues[0], uValue, pauValues[1], rc);
     918        if (RT_SUCCESS(rc))
     919            fWriteable |= RT_BIT_64(i);
    889920    }
    890921
     
    900931    RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
    901932             uMsr, pauValues[0], uValue, pauValues[1], rc);
     933
     934    uValue = fWriteable;
     935    rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
     936                     RCPtrValues, RCPtrValues + sizeof(uint64_t));
     937    RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n",
     938             uMsr, pauValues[0], uValue, pauValues[1], rc);
     939
     940#endif
    902941
    903942    /*
  • trunk/src/VBox/VMM/VMMRC/VMMRC.cpp

    r49147 r49893  
    55
    66/*
    7  * Copyright (C) 2006-2012 Oracle Corporation
     7 * Copyright (C) 2006-2013 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    4949static DECLCALLBACK(int) vmmGCTestTmpPFHandlerCorruptFS(PVM pVM, PCPUMCTXCORE pRegFrame);
    5050DECLASM(bool)   vmmRCSafeMsrRead(uint32_t uMsr, uint64_t *pu64Value);
     51DECLASM(bool)   vmmRCSafeMsrWrite(uint32_t uMsr, uint64_t u64Value);
    5152
    5253
     
    378379
    379380
     381/**
     382 * Tries to write the given value to an MSR, returns the effect and restors the
     383 * original value.
     384 *
     385 * This is called directly via VMMR3CallRC.
     386 *
     387 * @returns VBox status code.
     388 * @param   pVM             The VM handle.
     389 * @param   uMsr            The MSR to start at.
     390 * @param   u32ValueLow     The low part of the value to write.
     391 * @param   u32ValueHi      The high part of the value to write.
     392 * @param   puValueBefore   The value before writing.
     393 * @param   puValueAfter    The value read back after writing.
     394 */
     395extern "C" VMMRCDECL(int)
     396VMMRCTestTestWriteMsr(PVM pVM, uint32_t uMsr, uint32_t u32ValueLow, uint32_t u32ValueHi,
     397                      uint64_t *puValueBefore, uint64_t *puValueAfter)
     398{
     399    AssertPtrReturn(puValueBefore, VERR_INVALID_POINTER);
     400    AssertPtrReturn(puValueAfter, VERR_INVALID_POINTER);
     401    ASMIntDisable();
     402
     403    int      rc           = VINF_SUCCESS;
     404    uint64_t uValueBefore = UINT64_MAX;
     405    uint64_t uValueAfter  = UINT64_MAX;
     406    if (vmmRCSafeMsrRead(uMsr, &uValueBefore))
     407    {
     408        if (!vmmRCSafeMsrWrite(uMsr, RT_MAKE_U64(u32ValueLow, u32ValueHi)))
     409            rc = VERR_WRITE_PROTECT;
     410        if (!vmmRCSafeMsrRead(uMsr, &uValueAfter) && RT_SUCCESS(rc))
     411            rc = VERR_READ_ERROR;
     412        vmmRCSafeMsrWrite(uMsr, uValueBefore);
     413    }
     414    else
     415        rc = VERR_ACCESS_DENIED;
     416
     417    *puValueBefore = uValueBefore;
     418    *puValueAfter  = uValueAfter;
     419    return rc;
     420}
     421
     422
    380423
    381424/**
  • trunk/src/VBox/VMM/VMMRC/VMMRCA.asm

    r49362 r49893  
    235235    pushf
    236236    cli
     237    push    esi
     238    push    edi
     239    push    ebx
     240    push    ebp
    237241
    238242    mov     ecx, [ebp + 8]              ; The MSR to read.
     
    247251    mov     [ecx + 4], edx
    248252
     253    mov     eax, 1
     254.return:
     255    pop     ebp
     256    pop     ebx
     257    pop     edi
     258    pop     esi
    249259    popf
    250     mov     eax, 1
    251260    leave
    252261    ret
    253262
    254263.trapped:
    255     popf
    256264    mov     eax, 0
    257     leave
    258     ret
     265    jmp     .return
    259266ENDPROC vmmRCSafeMsrRead
    260267
     
    271278    pushf
    272279    cli
     280    push    esi
     281    push    edi
     282    push    ebx
     283    push    ebp
    273284
    274285    mov     ecx, [ebp + 8]              ; The MSR to write to.
     
    279290    wrmsr
    280291
     292    mov     eax, 1
     293.return:
     294    pop     ebp
     295    pop     ebx
     296    pop     edi
     297    pop     esi
    281298    popf
    282     mov     eax, 1
    283299    leave
    284300    ret
    285301
    286302.trapped:
    287     popf
    288303    mov     eax, 0
    289     leave
    290     ret
     304    jmp     .return
    291305ENDPROC vmmRCSafeMsrWrite
    292306
  • trunk/src/VBox/VMM/include/CPUMInternal.h

    r49019 r49893  
    2222# include <VBox/cdefs.h>
    2323# include <VBox/types.h>
     24# include <VBox/vmm/stam.h>
    2425# include <iprt/x86.h>
    2526#else
     
    108109#endif
    109110#endif
     111
     112
     113/**
     114 * MSR read functions.
     115 */
     116typedef enum CPUMMSRRDFN
     117{
     118    /** Invalid zero value. */
     119    kCpumMsrRdFn_Invalid = 0,
     120    /** Return the CPUMMSRRANGE::uInitOrReadValue. */
     121    kCpumMsrRdFn_FixedValue,
     122    /** Alias to the MSR range starting at the MSR given by
     123     * CPUMMSRRANGE::uInitOrReadValue.  Must be used in pair with
     124     * kCpumMsrWrFn_MsrAlias. */
     125    kCpumMsrRdFn_MsrAlias,
     126    /** Write only register, GP all read attempts. */
     127    kCpumMsrRdFn_WriteOnly,
     128
     129    kCpumMsrRdFn_Ia32P5McAddr,
     130    kCpumMsrRdFn_Ia32P5McType,
     131    kCpumMsrRdFn_Ia32TimestampCounter,
     132    kCpumMsrRdFn_Ia32ApicBase,
     133    kCpumMsrRdFn_Ia32FeatureControl,
     134    kCpumMsrRdFn_Ia32SmmMonitorCtl,
     135    kCpumMsrRdFn_Ia32PmcN,
     136    kCpumMsrRdFn_Ia32MonitorFilterLineSize,
     137    kCpumMsrRdFn_Ia32MPerf,
     138    kCpumMsrRdFn_Ia32APerf,
     139    kCpumMsrRdFn_Ia32MtrrCap,               /**< Takes real CPU value for reference.  */
     140    kCpumMsrRdFn_Ia32MtrrPhysBaseN,         /**< Takes register number. */
     141    kCpumMsrRdFn_Ia32MtrrPhysMaskN,         /**< Takes register number. */
     142    kCpumMsrRdFn_Ia32MtrrFixed,             /**< Takes CPUMCPU offset. */
     143    kCpumMsrRdFn_Ia32MtrrDefType,
     144    kCpumMsrRdFn_Ia32Pat,
     145    kCpumMsrRdFn_Ia32SysEnterCs,
     146    kCpumMsrRdFn_Ia32SysEnterEsp,
     147    kCpumMsrRdFn_Ia32SysEnterEip,
     148    kCpumMsrRdFn_Ia32McgCap,
     149    kCpumMsrRdFn_Ia32McgStatus,
     150    kCpumMsrRdFn_Ia32McgCtl,
     151    kCpumMsrRdFn_Ia32DebugCtl,
     152    kCpumMsrRdFn_Ia32SmrrPhysBase,
     153    kCpumMsrRdFn_Ia32SmrrPhysMask,
     154    kCpumMsrRdFn_Ia32PlatformDcaCap,
     155    kCpumMsrRdFn_Ia32CpuDcaCap,
     156    kCpumMsrRdFn_Ia32Dca0Cap,
     157    kCpumMsrRdFn_Ia32PerfEvtSelN,           /**< Range value indicates the register number. */
     158    kCpumMsrRdFn_Ia32PerfStatus,            /**< Range value returned. */
     159    kCpumMsrRdFn_Ia32PerfCtl,               /**< Range value returned. */
     160    kCpumMsrRdFn_Ia32FixedCtrN,             /**< Takes register number of start of range. */
     161    kCpumMsrRdFn_Ia32PerfCapabilities,      /**< Takes reference value. */
     162    kCpumMsrRdFn_Ia32FixedCtrCtrl,
     163    kCpumMsrRdFn_Ia32PerfGlobalStatus,      /**< Takes reference value. */
     164    kCpumMsrRdFn_Ia32PerfGlobalCtrl,
     165    kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl,
     166    kCpumMsrRdFn_Ia32PebsEnable,
     167    kCpumMsrRdFn_Ia32ClockModulation,       /**< Range value returned. */
     168    kCpumMsrRdFn_Ia32ThermInterrupt,        /**< Range value returned. */
     169    kCpumMsrRdFn_Ia32ThermStatus,           /**< Range value returned. */
     170    kCpumMsrRdFn_Ia32Therm2Ctl,             /**< Range value returned. */
     171    kCpumMsrRdFn_Ia32MiscEnable,            /**< Range value returned. */
     172    kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN,  /**< Takes bank number. */
     173    kCpumMsrRdFn_Ia32McNCtl2,               /**< Takes register number of start of range. */
     174    kCpumMsrRdFn_Ia32DsArea,
     175    kCpumMsrRdFn_Ia32TscDeadline,
     176    kCpumMsrRdFn_Ia32X2ApicN,
     177    kCpumMsrRdFn_Ia32VmxBase,               /**< Takes real value as reference. */
     178    kCpumMsrRdFn_Ia32VmxPinbasedCtls,       /**< Takes real value as reference. */
     179    kCpumMsrRdFn_Ia32VmxProcbasedCtls,      /**< Takes real value as reference. */
     180    kCpumMsrRdFn_Ia32VmxExitCtls,           /**< Takes real value as reference. */
     181    kCpumMsrRdFn_Ia32VmxEntryCtls,          /**< Takes real value as reference. */
     182    kCpumMsrRdFn_Ia32VmxMisc,               /**< Takes real value as reference. */
     183    kCpumMsrRdFn_Ia32VmxCr0Fixed0,          /**< Takes real value as reference. */
     184    kCpumMsrRdFn_Ia32VmxCr0Fixed1,          /**< Takes real value as reference. */
     185    kCpumMsrRdFn_Ia32VmxCr4Fixed0,          /**< Takes real value as reference. */
     186    kCpumMsrRdFn_Ia32VmxCr4Fixed1,          /**< Takes real value as reference. */
     187    kCpumMsrRdFn_Ia32VmxVmcsEnum,           /**< Takes real value as reference. */
     188    kCpumMsrRdFn_Ia32VmxProcBasedCtls2,     /**< Takes real value as reference. */
     189    kCpumMsrRdFn_Ia32VmxEptVpidCap,         /**< Takes real value as reference. */
     190    kCpumMsrRdFn_Ia32VmxTruePinbasedCtls,   /**< Takes real value as reference. */
     191    kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls,  /**< Takes real value as reference. */
     192    kCpumMsrRdFn_Ia32VmxTrueExitCtls,       /**< Takes real value as reference. */
     193    kCpumMsrRdFn_Ia32VmxTrueEntryCtls,      /**< Takes real value as reference. */
     194
     195    kCpumMsrRdFn_Amd64Efer,
     196    kCpumMsrRdFn_Amd64SyscallTarget,
     197    kCpumMsrRdFn_Amd64LongSyscallTarget,
     198    kCpumMsrRdFn_Amd64CompSyscallTarget,
     199    kCpumMsrRdFn_Amd64SyscallFlagMask,
     200    kCpumMsrRdFn_Amd64FsBase,
     201    kCpumMsrRdFn_Amd64GsBase,
     202    kCpumMsrRdFn_Amd64KernelGsBase,
     203    kCpumMsrRdFn_Amd64TscAux,
     204
     205    kCpumMsrRdFn_IntelEblCrPowerOn,
     206    kCpumMsrRdFn_IntelPlatformInfo100MHz,
     207    kCpumMsrRdFn_IntelPlatformInfo133MHz,
     208    kCpumMsrRdFn_IntelPkgCStConfigControl,
     209    kCpumMsrRdFn_IntelPmgIoCaptureBase,
     210    kCpumMsrRdFn_IntelLastBranchFromToN,
     211    kCpumMsrRdFn_IntelLastBranchFromN,
     212    kCpumMsrRdFn_IntelLastBranchToN,
     213    kCpumMsrRdFn_IntelLastBranchTos,
     214    kCpumMsrRdFn_IntelBblCrCtl,
     215    kCpumMsrRdFn_IntelBblCrCtl3,
     216    kCpumMsrRdFn_IntelI7TemperatureTarget,  /**< Range value returned. */
     217    kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */
     218    kCpumMsrRdFn_IntelI7MiscPwrMgmt,
     219    kCpumMsrRdFn_IntelP6CrN,
     220    kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx,
     221    kCpumMsrRdFn_IntelCpuId1FeatureMaskEax,
     222    kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx,
     223    kCpumMsrRdFn_IntelI7SandyAesNiCtl,
     224    kCpumMsrRdFn_IntelI7TurboRatioLimit,    /**< Returns range value. */
     225    kCpumMsrRdFn_IntelI7LbrSelect,
     226    kCpumMsrRdFn_IntelI7SandyErrorControl,
     227    kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */
     228    kCpumMsrRdFn_IntelI7PowerCtl,
     229    kCpumMsrRdFn_IntelI7SandyPebsNumAlt,
     230    kCpumMsrRdFn_IntelI7PebsLdLat,
     231    kCpumMsrRdFn_IntelI7PkgCnResidencyN,     /**< Takes C-state number. */
     232    kCpumMsrRdFn_IntelI7CoreCnResidencyN,    /**< Takes C-state number. */
     233    kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */
     234    kCpumMsrRdFn_IntelI7SandyVrMiscConfig,   /**< Takes real value as reference. */
     235    kCpumMsrRdFn_IntelI7SandyRaplPowerUnit,  /**< Takes real value as reference. */
     236    kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN,     /**< Takes real value as reference. */
     237    kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */
     238    kCpumMsrRdFn_IntelI7RaplPkgPowerLimit,   /**< Takes real value as reference. */
     239    kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */
     240    kCpumMsrRdFn_IntelI7RaplPkgPerfStatus,   /**< Takes real value as reference. */
     241    kCpumMsrRdFn_IntelI7RaplPkgPowerInfo,    /**< Takes real value as reference. */
     242    kCpumMsrRdFn_IntelI7RaplDramPowerLimit,  /**< Takes real value as reference. */
     243    kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */
     244    kCpumMsrRdFn_IntelI7RaplDramPerfStatus,  /**< Takes real value as reference. */
     245    kCpumMsrRdFn_IntelI7RaplDramPowerInfo,   /**< Takes real value as reference. */
     246    kCpumMsrRdFn_IntelI7RaplPp0PowerLimit,   /**< Takes real value as reference. */
     247    kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */
     248    kCpumMsrRdFn_IntelI7RaplPp0Policy,       /**< Takes real value as reference. */
     249    kCpumMsrRdFn_IntelI7RaplPp0PerfStatus,   /**< Takes real value as reference. */
     250    kCpumMsrRdFn_IntelI7RaplPp1PowerLimit,   /**< Takes real value as reference. */
     251    kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */
     252    kCpumMsrRdFn_IntelI7RaplPp1Policy,       /**< Takes real value as reference. */
     253
     254    kCpumMsrRdFn_P6LastBranchFromIp,
     255    kCpumMsrRdFn_P6LastBranchToIp,
     256    kCpumMsrRdFn_P6LastIntFromIp,
     257    kCpumMsrRdFn_P6LastIntToIp,
     258
     259    kCpumMsrRdFn_AmdFam15hTscRate,
     260    kCpumMsrRdFn_AmdFam15hLwpCfg,
     261    kCpumMsrRdFn_AmdFam15hLwpCbAddr,
     262    kCpumMsrRdFn_AmdFam10hMc4MiscN,
     263    kCpumMsrRdFn_AmdK8PerfCtlN,
     264    kCpumMsrRdFn_AmdK8PerfCtrN,
     265    kCpumMsrRdFn_AmdK8SysCfg,               /**< Range value returned. */
     266    kCpumMsrRdFn_AmdK8HwCr,
     267    kCpumMsrRdFn_AmdK8IorrBaseN,
     268    kCpumMsrRdFn_AmdK8IorrMaskN,
     269    kCpumMsrRdFn_AmdK8TopOfMemN,
     270    kCpumMsrRdFn_AmdK8NbCfg1,
     271    kCpumMsrRdFn_AmdK8McXcptRedir,
     272    kCpumMsrRdFn_AmdK8CpuNameN,
     273    kCpumMsrRdFn_AmdK8HwThermalCtrl,        /**< Range value returned. */
     274    kCpumMsrRdFn_AmdK8SwThermalCtrl,
     275    kCpumMsrRdFn_AmdK8McCtlMaskN,
     276    kCpumMsrRdFn_AmdK8SmiOnIoTrapN,
     277    kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts,
     278    kCpumMsrRdFn_AmdK8IntPendingMessage,
     279    kCpumMsrRdFn_AmdK8SmiTriggerIoCycle,
     280    kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr,
     281    kCpumMsrRdFn_AmdFam10hTrapCtlMaybe,
     282    kCpumMsrRdFn_AmdFam10hPStateCurLimit,   /**< Returns range value. */
     283    kCpumMsrRdFn_AmdFam10hPStateControl,    /**< Returns range value. */
     284    kCpumMsrRdFn_AmdFam10hPStateStatus,     /**< Returns range value. */
     285    kCpumMsrRdFn_AmdFam10hPStateN,          /**< Returns range value. This isn't an register index! */
     286    kCpumMsrRdFn_AmdFam10hCofVidControl,    /**< Returns range value. */
     287    kCpumMsrRdFn_AmdFam10hCofVidStatus,     /**< Returns range value. */
     288    kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr,
     289    kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer,
     290    kCpumMsrRdFn_AmdK8SmmBase,
     291    kCpumMsrRdFn_AmdK8SmmAddr,
     292    kCpumMsrRdFn_AmdK8SmmMask,
     293    kCpumMsrRdFn_AmdK8VmCr,
     294    kCpumMsrRdFn_AmdK8IgnNe,
     295    kCpumMsrRdFn_AmdK8SmmCtl,
     296    kCpumMsrRdFn_AmdK8VmHSavePa,
     297    kCpumMsrRdFn_AmdFam10hVmLockKey,
     298    kCpumMsrRdFn_AmdFam10hSmmLockKey,
     299    kCpumMsrRdFn_AmdFam10hLocalSmiStatus,
     300    kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength,
     301    kCpumMsrRdFn_AmdFam10hOsVisWrkStatus,
     302    kCpumMsrRdFn_AmdFam16hL2IPerfCtlN,
     303    kCpumMsrRdFn_AmdFam16hL2IPerfCtrN,
     304    kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN,
     305    kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN,
     306    kCpumMsrRdFn_AmdK7MicrocodeCtl,         /**< Returns range value. */
     307    kCpumMsrRdFn_AmdK7ClusterIdMaybe,       /**< Returns range value. */
     308    kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax,
     309    kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx,
     310    kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx,
     311    kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx,
     312    kCpumMsrRdFn_AmdK7DebugStatusMaybe,
     313    kCpumMsrRdFn_AmdK7BHTraceBaseMaybe,
     314    kCpumMsrRdFn_AmdK7BHTracePtrMaybe,
     315    kCpumMsrRdFn_AmdK7BHTraceLimitMaybe,
     316    kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe,
     317    kCpumMsrRdFn_AmdK7FastFlushCountMaybe,
     318    kCpumMsrRdFn_AmdK7NodeId,
     319    kCpumMsrRdFn_AmdK7DrXAddrMaskN,      /**< Takes register index. */
     320    kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe,
     321    kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe,
     322    kCpumMsrRdFn_AmdK7LoadStoreCfg,
     323    kCpumMsrRdFn_AmdK7InstrCacheCfg,
     324    kCpumMsrRdFn_AmdK7DataCacheCfg,
     325    kCpumMsrRdFn_AmdK7BusUnitCfg,
     326    kCpumMsrRdFn_AmdK7DebugCtl2Maybe,
     327    kCpumMsrRdFn_AmdFam15hFpuCfg,
     328    kCpumMsrRdFn_AmdFam15hDecoderCfg,
     329    kCpumMsrRdFn_AmdFam10hBusUnitCfg2,
     330    kCpumMsrRdFn_AmdFam15hCombUnitCfg,
     331    kCpumMsrRdFn_AmdFam15hCombUnitCfg2,
     332    kCpumMsrRdFn_AmdFam15hCombUnitCfg3,
     333    kCpumMsrRdFn_AmdFam15hExecUnitCfg,
     334    kCpumMsrRdFn_AmdFam15hLoadStoreCfg2,
     335    kCpumMsrRdFn_AmdFam10hIbsFetchCtl,
     336    kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr,
     337    kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr,
     338    kCpumMsrRdFn_AmdFam10hIbsOpExecCtl,
     339    kCpumMsrRdFn_AmdFam10hIbsOpRip,
     340    kCpumMsrRdFn_AmdFam10hIbsOpData,
     341    kCpumMsrRdFn_AmdFam10hIbsOpData2,
     342    kCpumMsrRdFn_AmdFam10hIbsOpData3,
     343    kCpumMsrRdFn_AmdFam10hIbsDcLinAddr,
     344    kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr,
     345    kCpumMsrRdFn_AmdFam10hIbsCtl,
     346    kCpumMsrRdFn_AmdFam14hIbsBrTarget,
     347
     348    /** End of valid MSR read function indexes. */
     349    kCpumMsrRdFn_End
     350} CPUMMSRRDFN;
     351
     352/**
     353 * MSR write functions.
     354 */
     355typedef enum CPUMMSRWRFN
     356{
     357    /** Invalid zero value. */
     358    kCpumMsrWrFn_Invalid = 0,
     359    /** Writes are ignored, the fWrGpMask is observed though. */
     360    kCpumMsrWrFn_IgnoreWrite,
     361    /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */
     362    kCpumMsrWrFn_ReadOnly,
     363    /** Alias to the MSR range starting at the MSR given by
     364     * CPUMMSRRANGE::uInitOrReadValue.  Must be used in pair with
     365     * kCpumMsrRdFn_MsrAlias. */
     366    kCpumMsrWrFn_MsrAlias,
     367
     368    kCpumMsrWrFn_Ia32P5McAddr,
     369    kCpumMsrWrFn_Ia32P5McType,
     370    kCpumMsrWrFn_Ia32TimestampCounter,
     371    kCpumMsrWrFn_Ia32ApicBase,
     372    kCpumMsrWrFn_Ia32FeatureControl,
     373    kCpumMsrWrFn_Ia32BiosUpdateTrigger,
     374    kCpumMsrWrFn_Ia32SmmMonitorCtl,
     375    kCpumMsrWrFn_Ia32PmcN,
     376    kCpumMsrWrFn_Ia32MonitorFilterLineSize,
     377    kCpumMsrWrFn_Ia32MPerf,
     378    kCpumMsrWrFn_Ia32APerf,
     379    kCpumMsrWrFn_Ia32MtrrPhysBaseN,         /**< Takes register number. */
     380    kCpumMsrWrFn_Ia32MtrrPhysMaskN,         /**< Takes register number. */
     381    kCpumMsrWrFn_Ia32MtrrFixed,             /**< Takes CPUMCPU offset. */
     382    kCpumMsrWrFn_Ia32MtrrDefType,
     383    kCpumMsrWrFn_Ia32Pat,
     384    kCpumMsrWrFn_Ia32SysEnterCs,
     385    kCpumMsrWrFn_Ia32SysEnterEsp,
     386    kCpumMsrWrFn_Ia32SysEnterEip,
     387    kCpumMsrWrFn_Ia32McgStatus,
     388    kCpumMsrWrFn_Ia32McgCtl,
     389    kCpumMsrWrFn_Ia32DebugCtl,
     390    kCpumMsrWrFn_Ia32SmrrPhysBase,
     391    kCpumMsrWrFn_Ia32SmrrPhysMask,
     392    kCpumMsrWrFn_Ia32PlatformDcaCap,
     393    kCpumMsrWrFn_Ia32Dca0Cap,
     394    kCpumMsrWrFn_Ia32PerfEvtSelN,           /**< Range value indicates the register number. */
     395    kCpumMsrWrFn_Ia32PerfCtl,
     396    kCpumMsrWrFn_Ia32FixedCtrN,             /**< Takes register number of start of range. */
     397    kCpumMsrWrFn_Ia32PerfCapabilities,
     398    kCpumMsrWrFn_Ia32FixedCtrCtrl,
     399    kCpumMsrWrFn_Ia32PerfGlobalStatus,
     400    kCpumMsrWrFn_Ia32PerfGlobalCtrl,
     401    kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl,
     402    kCpumMsrWrFn_Ia32PebsEnable,
     403    kCpumMsrWrFn_Ia32ClockModulation,
     404    kCpumMsrWrFn_Ia32ThermInterrupt,
     405    kCpumMsrWrFn_Ia32ThermStatus,
     406    kCpumMsrWrFn_Ia32Therm2Ctl,
     407    kCpumMsrWrFn_Ia32MiscEnable,
     408    kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN,  /**< Takes bank number. */
     409    kCpumMsrWrFn_Ia32McNCtl2,               /**< Takes register number of start of range. */
     410    kCpumMsrWrFn_Ia32DsArea,
     411    kCpumMsrWrFn_Ia32TscDeadline,
     412    kCpumMsrWrFn_Ia32X2ApicN,
     413
     414    kCpumMsrWrFn_Amd64Efer,
     415    kCpumMsrWrFn_Amd64SyscallTarget,
     416    kCpumMsrWrFn_Amd64LongSyscallTarget,
     417    kCpumMsrWrFn_Amd64CompSyscallTarget,
     418    kCpumMsrWrFn_Amd64SyscallFlagMask,
     419    kCpumMsrWrFn_Amd64FsBase,
     420    kCpumMsrWrFn_Amd64GsBase,
     421    kCpumMsrWrFn_Amd64KernelGsBase,
     422    kCpumMsrWrFn_Amd64TscAux,
     423    kCpumMsrWrFn_IntelEblCrPowerOn,
     424    kCpumMsrWrFn_IntelPkgCStConfigControl,
     425    kCpumMsrWrFn_IntelPmgIoCaptureBase,
     426    kCpumMsrWrFn_IntelLastBranchFromToN,
     427    kCpumMsrWrFn_IntelLastBranchFromN,
     428    kCpumMsrWrFn_IntelLastBranchToN,
     429    kCpumMsrWrFn_IntelLastBranchTos,
     430    kCpumMsrWrFn_IntelBblCrCtl,
     431    kCpumMsrWrFn_IntelBblCrCtl3,
     432    kCpumMsrWrFn_IntelI7TemperatureTarget,
     433    kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */
     434    kCpumMsrWrFn_IntelI7MiscPwrMgmt,
     435    kCpumMsrWrFn_IntelP6CrN,
     436    kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx,
     437    kCpumMsrWrFn_IntelCpuId1FeatureMaskEax,
     438    kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx,
     439    kCpumMsrWrFn_IntelI7SandyAesNiCtl,
     440    kCpumMsrWrFn_IntelI7TurboRatioLimit,
     441    kCpumMsrWrFn_IntelI7LbrSelect,
     442    kCpumMsrWrFn_IntelI7SandyErrorControl,
     443    kCpumMsrWrFn_IntelI7PowerCtl,
     444    kCpumMsrWrFn_IntelI7SandyPebsNumAlt,
     445    kCpumMsrWrFn_IntelI7PebsLdLat,
     446    kCpumMsrWrFn_IntelI7SandyVrCurrentConfig,
     447    kCpumMsrWrFn_IntelI7SandyVrMiscConfig,
     448    kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN,
     449    kCpumMsrWrFn_IntelI7RaplPkgPowerLimit,
     450    kCpumMsrWrFn_IntelI7RaplDramPowerLimit,
     451    kCpumMsrWrFn_IntelI7RaplPp0PowerLimit,
     452    kCpumMsrWrFn_IntelI7RaplPp0Policy,
     453    kCpumMsrWrFn_IntelI7RaplPp1PowerLimit,
     454    kCpumMsrWrFn_IntelI7RaplPp1Policy,
     455
     456    kCpumMsrWrFn_P6LastIntFromIp,
     457    kCpumMsrWrFn_P6LastIntToIp,
     458
     459    kCpumMsrWrFn_AmdFam15hTscRate,
     460    kCpumMsrWrFn_AmdFam15hLwpCfg,
     461    kCpumMsrWrFn_AmdFam15hLwpCbAddr,
     462    kCpumMsrWrFn_AmdFam10hMc4MiscN,
     463    kCpumMsrWrFn_AmdK8PerfCtlN,
     464    kCpumMsrWrFn_AmdK8PerfCtrN,
     465    kCpumMsrWrFn_AmdK8SysCfg,
     466    kCpumMsrWrFn_AmdK8HwCr,
     467    kCpumMsrWrFn_AmdK8IorrBaseN,
     468    kCpumMsrWrFn_AmdK8IorrMaskN,
     469    kCpumMsrWrFn_AmdK8TopOfMemN,
     470    kCpumMsrWrFn_AmdK8NbCfg1,
     471    kCpumMsrWrFn_AmdK8McXcptRedir,
     472    kCpumMsrWrFn_AmdK8CpuNameN,
     473    kCpumMsrWrFn_AmdK8HwThermalCtrl,
     474    kCpumMsrWrFn_AmdK8SwThermalCtrl,
     475    kCpumMsrWrFn_AmdK8McCtlMaskN,
     476    kCpumMsrWrFn_AmdK8SmiOnIoTrapN,
     477    kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts,
     478    kCpumMsrWrFn_AmdK8IntPendingMessage,
     479    kCpumMsrWrFn_AmdK8SmiTriggerIoCycle,
     480    kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr,
     481    kCpumMsrWrFn_AmdFam10hTrapCtlMaybe,
     482    kCpumMsrWrFn_AmdFam10hPStateControl,
     483    kCpumMsrWrFn_AmdFam10hPStateStatus,
     484    kCpumMsrWrFn_AmdFam10hPStateN,
     485    kCpumMsrWrFn_AmdFam10hCofVidControl,
     486    kCpumMsrWrFn_AmdFam10hCofVidStatus,
     487    kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr,
     488    kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer,
     489    kCpumMsrWrFn_AmdK8SmmBase,
     490    kCpumMsrWrFn_AmdK8SmmAddr,
     491    kCpumMsrWrFn_AmdK8SmmMask,
     492    kCpumMsrWrFn_AmdK8VmCr,
     493    kCpumMsrWrFn_AmdK8IgnNe,
     494    kCpumMsrWrFn_AmdK8SmmCtl,
     495    kCpumMsrWrFn_AmdK8VmHSavePa,
     496    kCpumMsrWrFn_AmdFam10hVmLockKey,
     497    kCpumMsrWrFn_AmdFam10hSmmLockKey,
     498    kCpumMsrWrFn_AmdFam10hLocalSmiStatus,
     499    kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength,
     500    kCpumMsrWrFn_AmdFam10hOsVisWrkStatus,
     501    kCpumMsrWrFn_AmdFam16hL2IPerfCtlN,
     502    kCpumMsrWrFn_AmdFam16hL2IPerfCtrN,
     503    kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN,
     504    kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN,
     505    kCpumMsrWrFn_AmdK7MicrocodeCtl,
     506    kCpumMsrWrFn_AmdK7ClusterIdMaybe,
     507    kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax,
     508    kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx,
     509    kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx,
     510    kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx,
     511    kCpumMsrWrFn_AmdK7DebugStatusMaybe,
     512    kCpumMsrWrFn_AmdK7BHTraceBaseMaybe,
     513    kCpumMsrWrFn_AmdK7BHTracePtrMaybe,
     514    kCpumMsrWrFn_AmdK7BHTraceLimitMaybe,
     515    kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe,
     516    kCpumMsrWrFn_AmdK7FastFlushCountMaybe,
     517    kCpumMsrWrFn_AmdK7NodeId,
     518    kCpumMsrWrFn_AmdK7DrXAddrMaskN,      /**< Takes register index. */
     519    kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe,
     520    kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe,
     521    kCpumMsrWrFn_AmdK7LoadStoreCfg,
     522    kCpumMsrWrFn_AmdK7InstrCacheCfg,
     523    kCpumMsrWrFn_AmdK7DataCacheCfg,
     524    kCpumMsrWrFn_AmdK7BusUnitCfg,
     525    kCpumMsrWrFn_AmdK7DebugCtl2Maybe,
     526    kCpumMsrWrFn_AmdFam15hFpuCfg,
     527    kCpumMsrWrFn_AmdFam15hDecoderCfg,
     528    kCpumMsrWrFn_AmdFam10hBusUnitCfg2,
     529    kCpumMsrWrFn_AmdFam15hCombUnitCfg,
     530    kCpumMsrWrFn_AmdFam15hCombUnitCfg2,
     531    kCpumMsrWrFn_AmdFam15hCombUnitCfg3,
     532    kCpumMsrWrFn_AmdFam15hExecUnitCfg,
     533    kCpumMsrWrFn_AmdFam15hLoadStoreCfg2,
     534    kCpumMsrWrFn_AmdFam10hIbsFetchCtl,
     535    kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr,
     536    kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr,
     537    kCpumMsrWrFn_AmdFam10hIbsOpExecCtl,
     538    kCpumMsrWrFn_AmdFam10hIbsOpRip,
     539    kCpumMsrWrFn_AmdFam10hIbsOpData,
     540    kCpumMsrWrFn_AmdFam10hIbsOpData2,
     541    kCpumMsrWrFn_AmdFam10hIbsOpData3,
     542    kCpumMsrWrFn_AmdFam10hIbsDcLinAddr,
     543    kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr,
     544    kCpumMsrWrFn_AmdFam10hIbsCtl,
     545    kCpumMsrWrFn_AmdFam14hIbsBrTarget,
     546
     547    /** End of valid MSR write function indexes. */
     548    kCpumMsrWrFn_End
     549} CPUMMSRWRFN;
     550
     551/**
     552 * MSR range.
     553 */
     554typedef struct CPUMMSRRANGE
     555{
     556    /** The first MSR. [0] */
     557    uint32_t    uFirst;
     558    /** The last MSR. [4] */
     559    uint32_t    uLast;
     560    /** The read function (CPUMMSRRDFN). [8] */
     561    uint16_t    enmRdFn;
     562    /** The write function (CPUMMSRWRFN). [10] */
     563    uint16_t    enmWrFn;
     564    /** The offset of the 64-bit MSR value relative to the start of CPUMCPU.
     565     * UINT16_MAX if not used by the read and write functions.  [12] */
     566    uint16_t    offCpumCpu;
     567    /** Reserved for future hacks. [14] */
     568    uint16_t    fReserved;
     569    /** The init/read value. [16]
     570     * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR.
     571     * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid
     572     * offset into CPUM. */
     573    uint64_t    uInitOrReadValue;
     574    /** The bits to ignore when writing. [24]   */
     575    uint64_t    fWrIgnMask;
     576    /** The bits that will cause a GP(0) when writing. [32]
     577     * This is always checked prior to calling the write function.  Using
     578     * UINT64_MAX effectively marks the MSR as read-only. */
     579    uint64_t    fWrGpMask;
     580    /** The register name, if applicable. [40] */
     581    char        szName[56];
     582
     583#ifdef VBOX_WITH_STATISTICS
     584    /** The number of reads. */
     585    STAMCOUNTER cReads;
     586    /** The number of writes. */
     587    STAMCOUNTER cWrites;
     588    /** The number of times ignored bits were written. */
     589    STAMCOUNTER cIgnoredBits;
     590    /** The number of GPs generated. */
     591    STAMCOUNTER cGps;
     592#endif
     593} CPUMMSRRANGE;
     594#ifdef VBOX_WITH_STATISTICS
     595AssertCompileSize(CPUMMSRRANGE, 128);
     596#else
     597AssertCompileSize(CPUMMSRRANGE, 96);
     598#endif
     599/** Pointer to an MSR range. */
     600typedef CPUMMSRRANGE *PCPUMMSRRANGE;
     601/** Pointer to a const MSR range. */
     602typedef CPUMMSRRANGE const *PCCPUMMSRRANGE;
     603
     604
     605
     606
     607/**
     608 * CPU features and quirks.
     609 * This is mostly exploded CPUID info.
     610 */
     611typedef struct CPUMFEATURES
     612{
     613    /** The CPU vendor (CPUMCPUVENDOR). */
     614    uint8_t         enmCpuVendor;
     615    /** The CPU family. */
     616    uint8_t         uFamily;
     617    /** The CPU model. */
     618    uint8_t         uModel;
     619    /** The CPU stepping. */
     620    uint8_t         uStepping;
     621    /** The microarchitecture. */
     622    CPUMMICROARCH   enmMicroarch;
     623    /** The maximum physical address with of the CPU. */
     624    uint8_t         cMaxPhysAddrWidth;
     625    /** Alignment padding.  */
     626    uint8_t         abPadding[3];
     627
     628    /** Supports MSRs.  */
     629    uint32_t        fMsr : 1;
     630    /** Supports the page size extension (4/2 MB pages). */
     631    uint32_t        fPse : 1;
     632    /** Supports 36-bit page size extension (4 MB pages can map memory above
     633     *  4GB). */
     634    uint32_t        fPse36 : 1;
     635    /** Supports physical address extension (PAE).  */
     636    uint32_t        fPae : 1;
     637    /** Page attribute table (PAT) support (page level cache control). */
     638    uint32_t        fPat : 1;
     639    /** Supports the FXSAVE and FXRSTOR instructions. */
     640    uint32_t        fFxSaveRstor : 1;
     641    /** Intel SYSENTER/SYSEXIT support */
     642    uint32_t        fSysEnter : 1;
     643    /** First generation APIC. */
     644    uint32_t        fApic : 1;
     645    /** Second generation APIC. */
     646    uint32_t        fX2Apic : 1;
     647    /** Hypervisor present. */
     648    uint32_t        fHypervisorPresent : 1;
     649    /** MWAIT & MONITOR instructions supported. */
     650    uint32_t        fMonitorMWait : 1;
     651
     652    /** AMD64: Supports long mode.  */
     653    uint32_t        fLongMode : 1;
     654    /** AMD64: SYSCALL/SYSRET support. */
     655    uint32_t        fSysCall : 1;
     656    /** AMD64: No-execute page table bit. */
     657    uint32_t        fNoExecute : 1;
     658    /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */
     659    uint32_t        fLahfSahf : 1;
     660    /** AMD64: Supports RDTSCP. */
     661    uint32_t        fRdTscP : 1;
     662
     663    /** Indicates that FPU instruction and data pointers may leak.
     664     * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer
     665     * is only saved and restored if an exception is pending.   */
     666    uint32_t        fLeakyFxSR : 1;
     667
     668    /** Alignment padding.  */
     669    uint32_t        fPadding : 9;
     670
     671    uint64_t        auPadding[2];
     672} CPUMFEATURES;
     673AssertCompileSize(CPUMFEATURES, 32);
     674/** Pointer to a CPU feature structure. */
     675typedef CPUMFEATURES *PCPUMFEATURES;
     676/** Pointer to a const CPU feature structure. */
     677typedef CPUMFEATURES const *PCCPUMFEATURES;
     678
     679
     680/**
     681 * CPU info
     682 */
     683typedef struct CPUMINFO
     684{
     685    /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */
     686    uint32_t                    cMsrRanges;
     687    /** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR
     688     * instruction.  Older hardware has been observed to ignore higher bits. */
     689    uint32_t                    fMsrMask;
     690
     691    /** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */
     692    uint32_t                    cCpuIdLeaves;
     693    /** The index of the first extended CPUID leaf in the array.
     694     *  Set to cCpuIdLeaves if none present. */
     695    uint32_t                    iFirstExtCpuIdLeaf;
     696    /** How to handle unknown CPUID leaves. */
     697    CPUMUKNOWNCPUID             enmUnknownCpuIdMethod;
     698    /** For use with CPUMUKNOWNCPUID_DEFAULTS. */
     699    CPUMCPUID                   DefCpuId;
     700
     701    /** Alignment padding.  */
     702    uint32_t                    uPadding;
     703
     704    /** Pointer to the MSR ranges (ring-0 pointer). */
     705    R0PTRTYPE(PCPUMMSRRANGE)    paMsrRangesR0;
     706    /** Pointer to the CPUID leaves (ring-0 pointer). */
     707    R0PTRTYPE(PCPUMCPUIDLEAF)   paCpuIdLeavesR0;
     708
     709    /** Pointer to the MSR ranges (ring-3 pointer). */
     710    R3PTRTYPE(PCPUMMSRRANGE)    paMsrRangesR3;
     711    /** Pointer to the CPUID leaves (ring-3 pointer). */
     712    R3PTRTYPE(PCPUMCPUIDLEAF)   paCpuIdLeavesR3;
     713
     714    /** Pointer to the MSR ranges (raw-mode context pointer). */
     715    RCPTRTYPE(PCPUMMSRRANGE)    paMsrRangesRC;
     716    /** Pointer to the CPUID leaves (raw-mode context pointer). */
     717    RCPTRTYPE(PCPUMCPUIDLEAF)   paCpuIdLeavesRC;
     718} CPUMINFO;
     719/** Pointer to a CPU info structure. */
     720typedef CPUMINFO *PCPUMINFO;
     721/** Pointer to a const CPU info structure. */
     722typedef CPUMINFO const *CPCPUMINFO;
    110723
    111724
     
    310923    } CPUFeaturesExt;
    311924
    312     /** Host CPU manufacturer. */
    313     CPUMCPUVENDOR           enmHostCpuVendor;
    314     /** Guest CPU manufacturer. */
    315     CPUMCPUVENDOR           enmGuestCpuVendor;
    316 
    317925    /** CR4 mask */
    318926    struct
     
    322930    } CR4;
    323931
    324     /** Synthetic CPU type? */
    325     bool                    fSyntheticCpu;
    326932    /** The (more) portable CPUID level.  */
    327933    uint8_t                 u8PortableCpuIdLevel;
     
    329935     * This is used to verify load order dependencies (PGM). */
    330936    bool                    fPendingRestore;
    331     uint8_t                 abPadding[HC_ARCH_BITS == 64 ? 5 : 1];
     937    uint8_t                 abPadding[HC_ARCH_BITS == 64 ? 6 : 2];
    332938
    333939    /** The standard set of CpuId leaves. */
     
    345951    uint8_t                 abPadding2[4];
    346952#endif
     953
     954    /** Guest CPU info. */
     955    CPUMINFO                GuestInfo;
     956    /** Guest CPU feature information. */
     957    CPUMFEATURES            GuestFeatures;
     958    /** Host CPU feature information. */
     959    CPUMFEATURES            HostFeatures;
     960
     961    /** @name MSR statistics.
     962     * @{ */
     963    STAMCOUNTER             cMsrWrites;
     964    STAMCOUNTER             cMsrWritesToIgnoredBits;
     965    STAMCOUNTER             cMsrWritesRaiseGp;
     966    STAMCOUNTER             cMsrWritesUnknown;
     967    STAMCOUNTER             cMsrReads;
     968    STAMCOUNTER             cMsrReadsRaiseGp;
     969    STAMCOUNTER             cMsrReadsUnknown;
     970    /** @} */
    347971} CPUM;
    348972/** Pointer to the CPUM instance data residing in the shared VM structure. */
     
    4301054RT_C_DECLS_BEGIN
    4311055
     1056PCPUMCPUIDLEAF      cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf);
     1057
    4321058#ifdef IN_RING3
    4331059int                 cpumR3DbgInit(PVM pVM);
     1060PCPUMCPUIDLEAF      cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf);
     1061bool                cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf,
     1062                                             PCPUMCPUID pLeagcy);
     1063int                 cpumR3CpuIdInsert(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf);
     1064void                cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast);
     1065int                 cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures);
     1066int                 cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo);
     1067int                 cpumR3MsrRangesInsert(PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
     1068int                 cpumR3MsrStrictInitChecks(void);
     1069int                 cpumR3MsrRegStats(PVM pVM);
     1070PCPUMMSRRANGE       cpumLookupMsrRange(PVM pVM, uint32_t idMsr);
    4341071#endif
    4351072
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r49019 r49893  
    6767    .CPUFeaturesExt.ecx   resd    1
    6868
    69     .enmHostCpuVendor     resd    1
    70     .enmGuestCpuVendor    resd    1
    71 
    7269    ; CR4 masks
    7370    .CR4.AndMask          resd    1
    7471    .CR4.OrMask           resd    1
    7572    ; entered rawmode?
    76     .fSyntheticCpu        resb    1
    7773    .u8PortableCpuIdLevel resb    1
    7874    .fPendingRestore      resb    1
    7975%if RTHCPTR_CB == 8
    80     .abPadding            resb    5
     76    .abPadding            resb    6
    8177%else
    82     .abPadding            resb    1
     78    .abPadding            resb    2
    8379%endif
    8480
     
    9389    .abPadding2           resb    4
    9490%endif
     91
     92    .GuestInfo            resb    RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*10
     93    .GuestFeatures        resb    32
     94    .HostFeatures         resb    32
     95
     96    .cMsrWrites                 resq  1
     97    .cMsrWritesToIgnoredBits    resq  1
     98    .cMsrWritesRaiseGp          resq  1
     99    .cMsrWritesUnknown          resq  1
     100    .cMsrReads                  resq  1
     101    .cMsrReadsRaiseGp           resq  1
     102    .cMsrReadsUnknown           resq  1
    95103endstruc
    96104
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r47844 r49893  
    3232    GEN_CHECK_OFF(CPUM, CPUFeaturesExt);
    3333    GEN_CHECK_OFF(CPUM, CPUFeaturesExt);
    34     GEN_CHECK_OFF(CPUM, enmHostCpuVendor);
    35     GEN_CHECK_OFF(CPUM, enmGuestCpuVendor);
    3634    GEN_CHECK_OFF(CPUM, CR4);
    3735#ifndef VBOX_FOR_DTRACE_LIB
    38     GEN_CHECK_OFF(CPUM, fSyntheticCpu);
    3936    GEN_CHECK_OFF(CPUM, u8PortableCpuIdLevel);
    4037    GEN_CHECK_OFF(CPUM, fPendingRestore);
  • trunk/src/VBox/VMM/tools/Makefile.kmk

    r49282 r49893  
    5151
    5252
     53#
     54# CPU report program (CPUM DB).
     55#
     56PROGRAMS += VBoxCpuReport
     57VBoxCpuReport_TEMPLATE := VBoxR3Static
     58VBoxCpuReport_DEFS      = IN_VMM_R3
     59VBoxCpuReport_INCS      = ../include
     60VBoxCpuReport_SOURCES   = \
     61        VBoxCpuReport.cpp \
     62       ../VMMR3/CPUMR3CpuId.cpp
     63VBoxCpuReport_LIBS      = \
     64        $(PATH_STAGE_LIB)/SUPR3Static$(VBOX_SUFF_LIB) \
     65       $(VBOX_LIB_RUNTIME_STATIC)
     66VBoxCpuReport_LDFLAGS.darwin = \
     67        -framework IOKit -framework CoreFoundation -framework CoreServices
     68
     69
    5370include $(FILE_KBUILD_SUB_FOOTER)
    5471
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette