VirtualBox

Changeset 49893 in vbox for trunk/src/VBox/VMM/VMMR3


Ignore:
Timestamp:
Dec 13, 2013 12:40:20 AM (11 years ago)
Author:
vboxsync
Message:

MSR rewrite: initial hacking - half disabled.

Location:
trunk/src/VBox/VMM/VMMR3
Files:
2 added
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r49538 r49893  
    5555#include <VBox/err.h>
    5656#include <VBox/log.h>
     57#include <iprt/asm-amd64-x86.h>
    5758#include <iprt/assert.h>
    58 #include <iprt/asm-amd64-x86.h>
     59#include <iprt/cpuset.h>
     60#include <iprt/mem.h>
     61#include <iprt/mp.h>
    5962#include <iprt/string.h>
    60 #include <iprt/mp.h>
    61 #include <iprt/cpuset.h>
    6263#include "internal/pgm.h"
    6364
     
    115116*   Internal Functions                                                         *
    116117*******************************************************************************/
    117 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);
    118118static int cpumR3CpuIdInit(PVM pVM);
    119119static DECLCALLBACK(int)  cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
     
    581581
    582582    /*
    583      * Assert alignment and sizes.
     583     * Assert alignment, sizes and tables.
    584584     */
    585585    AssertCompileMemberAlignment(VM, cpum.s, 32);
     
    592592    AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
    593593    AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
     594#ifdef VBOX_STRICT
     595    int rc2 = cpumR3MsrStrictInitChecks();
     596    AssertRCReturn(rc2, rc2);
     597#endif
    594598
    595599    /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
    596600    pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
    597601    Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
     602
    598603
    599604    /* Calculate the offset from CPUMCPU to CPUM. */
     
    647652
    648653    /*
    649      * Detect the host CPU vendor.
    650      * (The guest CPU vendor is re-detected later on.)
    651      */
    652     uint32_t uEAX, uEBX, uECX, uEDX;
    653     ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
    654     pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX);
    655     pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor;
     654     * Gather info about the host CPU.
     655     */
     656    PCPUMCPUIDLEAF  paLeaves;
     657    uint32_t        cLeaves;
     658    int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
     659    AssertLogRelRCReturn(rc, rc);
     660
     661    rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
     662    RTMemFree(paLeaves);
     663    AssertLogRelRCReturn(rc, rc);
     664    pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
    656665
    657666    /*
     
    662671     * Register saved state data item.
    663672     */
    664     int rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
    665                                    NULL, cpumR3LiveExec, NULL,
    666                                    NULL, cpumR3SaveExec, NULL,
    667                                    cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
     673    rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
     674                               NULL, cpumR3LiveExec, NULL,
     675                               NULL, cpumR3SaveExec, NULL,
     676                               cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
    668677    if (RT_FAILURE(rc))
    669678        return rc;
     
    700709
    701710/**
    702  * Detect the CPU vendor give n the
    703  *
    704  * @returns The vendor.
    705  * @param   uEAX                EAX from CPUID(0).
    706  * @param   uEBX                EBX from CPUID(0).
    707  * @param   uECX                ECX from CPUID(0).
    708  * @param   uEDX                EDX from CPUID(0).
    709  */
    710 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
    711 {
    712     if (ASMIsValidStdRange(uEAX))
    713     {
    714         if (ASMIsAmdCpuEx(uEBX, uECX, uEDX))
    715             return CPUMCPUVENDOR_AMD;
    716 
    717         if (ASMIsIntelCpuEx(uEBX, uECX, uEDX))
    718             return CPUMCPUVENDOR_INTEL;
    719 
    720         if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX))
    721             return CPUMCPUVENDOR_VIA;
    722 
    723         /** @todo detect the other buggers... */
    724     }
    725 
    726     return CPUMCPUVENDOR_UNKNOWN;
     711 * Loads MSR range overrides.
     712 *
     713 * This must be called before the MSR ranges are moved from the normal heap to
     714 * the hyper heap!
     715 *
     716 * @returns VBox status code (VMSetError called).
     717 * @param   pVM                 Pointer to the cross context VM structure
     718 * @param   pMsrNode            The CFGM node with the MSR overrides.
     719 */
     720static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)
     721{
     722    for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))
     723    {
     724        /*
     725         * Assemble a valid MSR range.
     726         */
     727        CPUMMSRRANGE MsrRange;
     728        MsrRange.offCpumCpu = 0;
     729        MsrRange.fReserved  = 0;
     730
     731        int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));
     732        if (RT_FAILURE(rc))
     733            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);
     734
     735        rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);
     736        if (RT_FAILURE(rc))
     737            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",
     738                              MsrRange.szName, rc);
     739
     740        rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);
     741        if (RT_FAILURE(rc))
     742            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",
     743                              MsrRange.szName, rc);
     744
     745        char szType[32];
     746        rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");
     747        if (RT_FAILURE(rc))
     748            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",
     749                              MsrRange.szName, rc);
     750        if (!RTStrICmp(szType, "FixedValue"))
     751        {
     752            MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;
     753            MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;
     754
     755            rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uInitOrReadValue, 0);
     756            if (RT_FAILURE(rc))
     757                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",
     758                                  MsrRange.szName, rc);
     759
     760            rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);
     761            if (RT_FAILURE(rc))
     762                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",
     763                                  MsrRange.szName, rc);
     764
     765            rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);
     766            if (RT_FAILURE(rc))
     767                return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",
     768                                  MsrRange.szName, rc);
     769        }
     770        else
     771            return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
     772                              "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);
     773
     774        /*
     775         * Insert the range into the table (replaces/splits/shrinks existing
     776         * MSR ranges).
     777         */
     778        rc = cpumR3MsrRangesInsert(&pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, &MsrRange);
     779        if (RT_FAILURE(rc))
     780            return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);
     781    }
     782
     783    return VINF_SUCCESS;
    727784}
     785
     786
     787/**
     788 * Loads CPUID leaf overrides.
     789 *
     790 * This must be called before the CPUID leaves are moved from the normal
     791 * heap to the hyper heap!
     792 *
     793 * @returns VBox status code (VMSetError called).
     794 * @param   pVM             Pointer to the cross context VM structure
     795 * @param   pParentNode     The CFGM node with the CPUID leaves.
     796 * @param   pszLabel        How to label the overrides we're loading.
     797 */
     798static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)
     799{
     800    for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))
     801    {
     802        /*
     803         * Get the leaf and subleaf numbers.
     804         */
     805        char szName[128];
     806        int rc = CFGMR3GetName(pNode, szName, sizeof(szName));
     807        if (RT_FAILURE(rc))
     808            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);
     809
     810        /* The leaf number is either specified directly or thru the node name. */
     811        uint32_t uLeaf;
     812        rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);
     813        if (rc == VERR_CFGM_VALUE_NOT_FOUND)
     814        {
     815            rc = RTStrToUInt32Full(szName, 16, &uLeaf);
     816            if (rc != VINF_SUCCESS)
     817                return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,
     818                                  "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);
     819        }
     820        else if (RT_FAILURE(rc))
     821            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",
     822                              pszLabel, szName, rc);
     823
     824        uint32_t uSubLeaf;
     825        rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);
     826        if (RT_FAILURE(rc))
     827            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",
     828                              pszLabel, szName, rc);
     829
     830        uint32_t fSubLeafMask;
     831        rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);
     832        if (RT_FAILURE(rc))
     833            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",
     834                              pszLabel, szName, rc);
     835
     836        /*
     837         * Look up the specified leaf, since the output register values
     838         * defaults to any existing values.  This allows overriding a single
     839         * register, without needing to know the other values.
     840         */
     841        PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,
     842                                                   uLeaf, uSubLeaf);
     843        CPUMCPUIDLEAF   Leaf;
     844        if (pLeaf)
     845            Leaf = *pLeaf;
     846        else
     847            RT_ZERO(Leaf);
     848        Leaf.uLeaf          = uLeaf;
     849        Leaf.uSubLeaf       = uSubLeaf;
     850        Leaf.fSubLeafMask   = fSubLeafMask;
     851
     852        rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);
     853        if (RT_FAILURE(rc))
     854            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",
     855                              pszLabel, szName, rc);
     856        rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);
     857        if (RT_FAILURE(rc))
     858            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",
     859                              pszLabel, szName, rc);
     860        rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);
     861        if (RT_FAILURE(rc))
     862            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",
     863                              pszLabel, szName, rc);
     864        rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);
     865        if (RT_FAILURE(rc))
     866            return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",
     867                              pszLabel, szName, rc);
     868
     869        /*
     870         * Insert the leaf into the table (replaces existing ones).
     871         */
     872        rc = cpumR3CpuIdInsert(&pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves, &Leaf);
     873        if (RT_FAILURE(rc))
     874            return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);
     875    }
     876
     877    return VINF_SUCCESS;
     878}
     879
    728880
    729881
     
    815967
    816968
     969static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)
     970{
     971    /*
     972     * Install the CPUID information.
     973     */
     974    int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32,
     975                           MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3);
     976
     977    AssertLogRelRCReturn(rc, rc);
     978
     979    pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
     980    pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);
     981    Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
     982    Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);
     983
     984    /*
     985     * Explode the guest CPU features.
     986     */
     987    rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
     988    AssertLogRelRCReturn(rc, rc);
     989
     990
     991    /*
     992     * Populate the legacy arrays.  Currently used for everything, later only
     993     * for patch manager.
     994     */
     995    struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =
     996    {
     997        { pCPUM->aGuestCpuIdStd,        RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     0x00000000 },
     998        { pCPUM->aGuestCpuIdExt,        RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     0x80000000 },
     999        { pCPUM->aGuestCpuIdCentaur,    RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 },
     1000        { pCPUM->aGuestCpuIdHyper,      RT_ELEMENTS(pCPUM->aGuestCpuIdHyper),   0x40000000 },
     1001    };
     1002    for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)
     1003    {
     1004        uint32_t    cLeft       = aOldRanges[i].cCpuIds;
     1005        uint32_t    uLeaf       = aOldRanges[i].uBase + cLeft;
     1006        PCPUMCPUID  pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];
     1007        while (cLeft-- > 0)
     1008        {
     1009            uLeaf--;
     1010            pLegacyLeaf--;
     1011
     1012            PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf, 0);
     1013            if (pLeaf)
     1014            {
     1015                pLegacyLeaf->eax = pLeaf->uEax;
     1016                pLegacyLeaf->ebx = pLeaf->uEbx;
     1017                pLegacyLeaf->ecx = pLeaf->uEcx;
     1018                pLegacyLeaf->edx = pLeaf->uEdx;
     1019            }
     1020            else
     1021                *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId;
     1022        }
     1023    }
     1024
     1025    pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId;
     1026
     1027    return VINF_SUCCESS;
     1028}
     1029
     1030
    8171031/**
    8181032 * Initializes the emulated CPU's cpuid information.
     
    8251039    PCPUM       pCPUM    = &pVM->cpum.s;
    8261040    PCFGMNODE   pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");
    827     uint32_t    i;
    8281041    int         rc;
    8291042
    830 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \
    831     if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fMask)) == (uValue) ) \
     1043#define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \
     1044    if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \
    8321045    { \
    833         LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg & (fMask))); \
    834         pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fMask); \
    835     }
    836 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \
    837     if (pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg & (fBitMask)) ) \
     1046        LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \
     1047        (a_pLeafReg) &= ~(uint32_t)(fMask); \
     1048    }
     1049#define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \
     1050    if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \
    8381051    { \
    839         LogRel(("PortableCpuId: " #LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \
    840         pCPUM->aGuestCpuId##LeafSuffReg &= ~(uint32_t)(fBitMask); \
     1052        LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \
     1053        (a_pLeafReg) &= ~(uint32_t)(fBitMask); \
    8411054    }
    8421055
     
    8471060     * Enables the Synthetic CPU.  The Vendor ID and Processor Name are
    8481061     * completely overridden by VirtualBox custom strings.  Some
    849      * CPUID information is withheld, like the cache info. */
    850     rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu",  &pCPUM->fSyntheticCpu, false);
     1062     * CPUID information is withheld, like the cache info.
     1063     *
     1064     * This is obsoleted by PortableCpuIdLevel. */
     1065    bool fSyntheticCpu;
     1066    rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu",  &fSyntheticCpu, false);
    8511067    AssertRCReturn(rc, rc);
    8521068
     
    8561072     * values should only be used when older CPUs are involved since it may
    8571073     * harm performance and maybe also cause problems with specific guests. */
    858     rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0);
    859     AssertRCReturn(rc, rc);
    860 
    861     AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG);
    862 
    863     /*
    864      * Get the host CPUID leaves and redetect the guest CPU vendor (could've
    865      * been overridden).
    866      */
    867     /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
    868      * Overrides the host CPUID leaf values used for calculating the guest CPUID
    869      * leaves.  This can be used to preserve the CPUID values when moving a VM to a
    870      * different machine.  Another use is restricting (or extending) the feature set
    871      * exposed to the guest. */
    872     PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID");
    873     rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     pHostOverrideCfg);
    874     AssertRCReturn(rc, rc);
    875     rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     pHostOverrideCfg);
    876     AssertRCReturn(rc, rc);
    877     rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg);
    878     AssertRCReturn(rc, rc);
    879 
    880     pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx,
    881                                                   pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx);
    882 
    883     /*
    884      * Determine the default leaf.
    885      *
    886      * Intel returns values of the highest standard function, while AMD
    887      * returns zeros. VIA on the other hand seems to returning nothing or
    888      * perhaps some random garbage, we don't try to duplicate this behavior.
    889      */
    890     ASMCpuIdExSlow(pCPUM->aGuestCpuIdStd[0].eax + 10, 0, 0, 0, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */
    891                    &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx,
    892                    &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx);
     1074    rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0);
     1075    AssertLogRelRCReturn(rc, rc);
     1076
     1077    /** @cfgm{CPUM/GuestCpuName, string}
     1078     * The name of of the CPU we're to emulate.  The default is the host CPU.
     1079     * Note! CPUs other than "host" one is currently unsupported. */
     1080    char szCpuName[128];
     1081    rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host");
     1082    AssertLogRelRCReturn(rc, rc);
    8931083
    8941084    /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}
     
    8961086     */
    8971087    bool fCmpXchg16b;
    898     rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); AssertRCReturn(rc, rc);
     1088    rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false);
     1089    AssertLogRelRCReturn(rc, rc);
    8991090
    9001091    /** @cfgm{/CPUM/MONITOR, boolean, true}
     
    9021093     */
    9031094    bool fMonitor;
    904     rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); AssertRCReturn(rc, rc);
    905 
    906     /* Cpuid 1 & 0x80000001:
     1095    rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true);
     1096    AssertLogRelRCReturn(rc, rc);
     1097
     1098    /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
     1099     * Expose MWAIT extended features to the guest.  For now we expose just MWAIT
     1100     * break on interrupt feature (bit 1).
     1101     */
     1102    bool fMWaitExtensions;
     1103    rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false);
     1104    AssertLogRelRCReturn(rc, rc);
     1105
     1106    /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
     1107     * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
     1108     * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
     1109     * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
     1110     */
     1111    bool fNt4LeafLimit;
     1112    rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false);
     1113    AssertLogRelRCReturn(rc, rc);
     1114
     1115    /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
     1116     * Restrict the reported CPU family+model+stepping of intel CPUs.  This is
     1117     * probably going to be a temporary hack, so don't depend on this.
     1118     * The 1st byte of the value is the stepping, the 2nd byte value is the model
     1119     * number and the 3rd byte value is the family, and the 4th value must be zero.
     1120     */
     1121    uint32_t uMaxIntelFamilyModelStep;
     1122    rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
     1123    AssertLogRelRCReturn(rc, rc);
     1124
     1125    /*
     1126     * Get the guest CPU data from the database and/or the host.
     1127     */
     1128    rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo);
     1129    if (RT_FAILURE(rc))
     1130        return rc == VERR_CPUM_DB_CPU_NOT_FOUND
     1131             ? VMSetError(pVM, rc, RT_SRC_POS,
     1132                          "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName)
     1133             : rc;
     1134
     1135    /** @cfgm{CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}
     1136     * Overrides the guest MSRs.
     1137     */
     1138    rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));
     1139
     1140    /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
     1141     * Overrides the CPUID leaf values (from the host CPU usually) used for
     1142     * calculating the guest CPUID leaves.  This can be used to preserve the CPUID
     1143     * values when moving a VM to a different machine.  Another use is restricting
     1144     * (or extending) the feature set exposed to the guest. */
     1145    if (RT_SUCCESS(rc))
     1146        rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");
     1147
     1148    if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */
     1149        rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,
     1150                        "Found unsupported configuration node '/CPUM/CPUID/'. "
     1151                        "Please use IMachine::setCPUIDLeaf() instead.");
     1152
     1153    /*
     1154     * Pre-exploded the CPUID info.
     1155     */
     1156    if (RT_SUCCESS(rc))
     1157        rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);
     1158    if (RT_FAILURE(rc))
     1159    {
     1160        RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3);
     1161        pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL;
     1162        RTMemFree(pCPUM->GuestInfo.paMsrRangesR3);
     1163        pCPUM->GuestInfo.paMsrRangesR3 = NULL;
     1164        return rc;
     1165    }
     1166
     1167
     1168    /* ... split this function about here ... */
     1169
     1170
     1171    PCPUMCPUIDLEAF pStdLeaf0 = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
     1172    AssertLogRelReturn(pStdLeaf0, VERR_CPUM_IPE_2);
     1173
     1174
     1175    /* Cpuid 1:
    9071176     * Only report features we can support.
    9081177     *
     
    9101179     *       options may require adjusting (i.e. stripping what was enabled).
    9111180     */
    912     pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU
     1181    PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);
     1182    AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);
     1183    pStdFeatureLeaf->uEdx        &= X86_CPUID_FEATURE_EDX_FPU
    9131184                                  | X86_CPUID_FEATURE_EDX_VME
    9141185                                  | X86_CPUID_FEATURE_EDX_DE
     
    9411212                                  //| X86_CPUID_FEATURE_EDX_PBE   - no pending break enabled.
    9421213                                  | 0;
    943     pCPUM->aGuestCpuIdStd[1].ecx &= 0
     1214    pStdFeatureLeaf->uEcx        &= 0
    9441215                                  | X86_CPUID_FEATURE_ECX_SSE3
    9451216                                  /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */
     
    9611232    if (pCPUM->u8PortableCpuIdLevel > 0)
    9621233    {
    963         PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
    964         PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
    965         PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3,  X86_CPUID_FEATURE_ECX_SSE3);
    966         PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, CX16,  X86_CPUID_FEATURE_ECX_CX16);
    967         PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2,  X86_CPUID_FEATURE_EDX_SSE2);
    968         PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE,   X86_CPUID_FEATURE_EDX_SSE);
    969         PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
    970         PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV,  X86_CPUID_FEATURE_EDX_CMOV);
    971 
    972         Assert(!(pCPUM->aGuestCpuIdStd[1].edx & (  X86_CPUID_FEATURE_EDX_SEP
     1234        PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));
     1235        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);
     1236        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3,  X86_CPUID_FEATURE_ECX_SSE3);
     1237        PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16,  X86_CPUID_FEATURE_ECX_CX16);
     1238        PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2,  X86_CPUID_FEATURE_EDX_SSE2);
     1239        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE,   X86_CPUID_FEATURE_EDX_SSE);
     1240        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);
     1241        PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV,  X86_CPUID_FEATURE_EDX_CMOV);
     1242
     1243        Assert(!(pStdFeatureLeaf->uEdx        & (  X86_CPUID_FEATURE_EDX_SEP
    9731244                                                 | X86_CPUID_FEATURE_EDX_PSN
    9741245                                                 | X86_CPUID_FEATURE_EDX_DS
     
    9781249                                                 | X86_CPUID_FEATURE_EDX_PBE
    9791250                                                 )));
    980         Assert(!(pCPUM->aGuestCpuIdStd[1].ecx & (  X86_CPUID_FEATURE_ECX_PCLMUL
     1251        Assert(!(pStdFeatureLeaf->uEcx        & (  X86_CPUID_FEATURE_ECX_PCLMUL
    9811252                                                 | X86_CPUID_FEATURE_ECX_DTES64
    9821253                                                 | X86_CPUID_FEATURE_ECX_CPLDS
     
    10081279     * ASSUMES that this is ALWAYS the AMD defined feature set if present.
    10091280     */
    1010     pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU
     1281    PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
     1282                                                        UINT32_C(0x80000001), 0);
     1283    if (pExtFeatureLeaf)
     1284    {
     1285        pExtFeatureLeaf->uEdx    &= X86_CPUID_AMD_FEATURE_EDX_FPU
    10111286                                  | X86_CPUID_AMD_FEATURE_EDX_VME
    10121287                                  | X86_CPUID_AMD_FEATURE_EDX_DE
     
    10371312                                  | X86_CPUID_AMD_FEATURE_EDX_3DNOW
    10381313                                  | 0;
    1039     pCPUM->aGuestCpuIdExt[1].ecx &= 0
     1314        pExtFeatureLeaf->uEcx    &= 0
    10401315                                  //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
    10411316                                  //| X86_CPUID_AMD_FEATURE_ECX_CMPL
     
    10541329                                  //| X86_CPUID_AMD_FEATURE_ECX_WDT
    10551330                                  | 0;
    1056     if (pCPUM->u8PortableCpuIdLevel > 0)
    1057     {
    1058         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
    1059         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW,      X86_CPUID_AMD_FEATURE_EDX_3DNOW);
    1060         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX,   X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
    1061         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR,      X86_CPUID_AMD_FEATURE_EDX_FFXSR);
    1062         PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP,     X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
    1063         PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF,  X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
    1064         PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV,       X86_CPUID_AMD_FEATURE_EDX_CMOV);
    1065 
    1066         Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & (  X86_CPUID_AMD_FEATURE_ECX_CMPL
    1067                                                  | X86_CPUID_AMD_FEATURE_ECX_SVM
    1068                                                  | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
    1069                                                  | X86_CPUID_AMD_FEATURE_ECX_CR8L
    1070                                                  | X86_CPUID_AMD_FEATURE_ECX_ABM
    1071                                                  | X86_CPUID_AMD_FEATURE_ECX_SSE4A
    1072                                                  | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
    1073                                                  | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
    1074                                                  | X86_CPUID_AMD_FEATURE_ECX_OSVW
    1075                                                  | X86_CPUID_AMD_FEATURE_ECX_IBS
    1076                                                  | X86_CPUID_AMD_FEATURE_ECX_SSE5
    1077                                                  | X86_CPUID_AMD_FEATURE_ECX_SKINIT
    1078                                                  | X86_CPUID_AMD_FEATURE_ECX_WDT
    1079                                                  | UINT32_C(0xffffc000)
    1080                                                  )));
    1081         Assert(!(pCPUM->aGuestCpuIdExt[1].edx & (  RT_BIT(10)
    1082                                                  | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
    1083                                                  | RT_BIT(18)
    1084                                                  | RT_BIT(19)
    1085                                                  | RT_BIT(21)
    1086                                                  | X86_CPUID_AMD_FEATURE_EDX_AXMMX
    1087                                                  | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
    1088                                                  | RT_BIT(28)
    1089                                                  )));
    1090     }
    1091 
    1092     /*
    1093      * Apply the Synthetic CPU modifications. (TODO: move this up)
    1094      */
    1095     if (pCPUM->fSyntheticCpu)
    1096     {
    1097         static const char s_szVendor[13]    = "VirtualBox  ";
    1098         static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000            "; /* includes null terminator */
    1099 
    1100         pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC;
    1101 
    1102         /* Limit the nr of standard leaves; 5 for monitor/mwait */
    1103         pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5);
    1104 
    1105         /* 0: Vendor */
    1106         pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0];
    1107         pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2];
    1108         pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1];
    1109 
    1110         /* 1.eax: Version information.  family : model : stepping */
    1111         pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1;
    1112 
    1113         /* Leaves 2 - 4 are Intel only - zero them out */
    1114         memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2]));
    1115         memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3]));
    1116         memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4]));
    1117 
    1118         /* Leaf 5 = monitor/mwait */
    1119 
    1120         /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */
    1121         pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008);
    1122         /* AMD only - set to zero. */
    1123         pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0;
    1124 
    1125         /* 0x800000001: shared feature bits are set dynamically. */
    1126         memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1]));
    1127 
    1128         /* 0x800000002-4: Processor Name String Identifier. */
    1129         pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0];
    1130         pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1];
    1131         pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2];
    1132         pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3];
    1133         pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4];
    1134         pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5];
    1135         pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6];
    1136         pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7];
    1137         pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8];
    1138         pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9];
    1139         pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10];
    1140         pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11];
    1141 
    1142         /* 0x800000005-7 - reserved -> zero */
    1143         memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5]));
    1144         memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6]));
    1145         memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7]));
    1146 
    1147         /* 0x800000008: only the max virtual and physical address size. */
    1148         pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
     1331        if (pCPUM->u8PortableCpuIdLevel > 0)
     1332        {
     1333            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
     1334            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW,      X86_CPUID_AMD_FEATURE_EDX_3DNOW);
     1335            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX,   X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);
     1336            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR,      X86_CPUID_AMD_FEATURE_EDX_FFXSR);
     1337            PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP,     X86_CPUID_EXT_FEATURE_EDX_RDTSCP);
     1338            PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF,  X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);
     1339            PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV,       X86_CPUID_AMD_FEATURE_EDX_CMOV);
     1340
     1341            Assert(!(pExtFeatureLeaf->uEcx & (  X86_CPUID_AMD_FEATURE_ECX_CMPL
     1342                                              | X86_CPUID_AMD_FEATURE_ECX_SVM
     1343                                              | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
     1344                                              | X86_CPUID_AMD_FEATURE_ECX_CR8L
     1345                                              | X86_CPUID_AMD_FEATURE_ECX_ABM
     1346                                              | X86_CPUID_AMD_FEATURE_ECX_SSE4A
     1347                                              | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE
     1348                                              | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF
     1349                                              | X86_CPUID_AMD_FEATURE_ECX_OSVW
     1350                                              | X86_CPUID_AMD_FEATURE_ECX_IBS
     1351                                              | X86_CPUID_AMD_FEATURE_ECX_SSE5
     1352                                              | X86_CPUID_AMD_FEATURE_ECX_SKINIT
     1353                                              | X86_CPUID_AMD_FEATURE_ECX_WDT
     1354                                              | UINT32_C(0xffffc000)
     1355                                              )));
     1356            Assert(!(pExtFeatureLeaf->uEdx & (  RT_BIT(10)
     1357                                              | X86_CPUID_EXT_FEATURE_EDX_SYSCALL
     1358                                              | RT_BIT(18)
     1359                                              | RT_BIT(19)
     1360                                              | RT_BIT(21)
     1361                                              | X86_CPUID_AMD_FEATURE_EDX_AXMMX
     1362                                              | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB
     1363                                              | RT_BIT(28)
     1364                                              )));
     1365        }
    11491366    }
    11501367
     
    11531370     * (APIC-ID := 0 and #LogCpus := 0)
    11541371     */
    1155     pCPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;
     1372    pStdFeatureLeaf->uEbx &= 0x0000ffff;
    11561373#ifdef VBOX_WITH_MULTI_CORE
    1157     if (    pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC
    1158         &&  pVM->cCpus > 1)
     1374    if (pVM->cCpus > 1)
    11591375    {
    11601376        /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */
    1161         pCPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);
    1162         pCPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT;  /* necessary for hyper-threading *or* multi-core CPUs */
     1377        pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16);
     1378        pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT;  /* necessary for hyper-threading *or* multi-core CPUs */
    11631379    }
    11641380#endif
     
    11701386     * Safe to expose; restrict the number of calls to 1 for the portable case.
    11711387     */
    1172     if (    pCPUM->u8PortableCpuIdLevel > 0
    1173         &&  pCPUM->aGuestCpuIdStd[0].eax >= 2
    1174         && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1)
    1175     {
    1176         LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff));
    1177         pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe);
     1388    PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0);
     1389    if (   pCPUM->u8PortableCpuIdLevel > 0
     1390        && pCurLeaf
     1391        && (pCurLeaf->uEax & 0xff) > 1)
     1392    {
     1393        LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));
     1394        pCurLeaf->uEax &= UINT32_C(0xfffffffe);
    11781395    }
    11791396
     
    11851402     * Safe to expose
    11861403     */
    1187     if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN))
    1188     {
    1189         pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0;
     1404    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0);
     1405    if (   !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN)
     1406        && pCurLeaf)
     1407    {
     1408        pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
    11901409        if (pCPUM->u8PortableCpuIdLevel > 0)
    1191             pCPUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;
     1410            pCurLeaf->uEax = pCurLeaf->uEbx = 0;
    11921411    }
    11931412
     
    12021421     * Note: These SMP values are constant regardless of ECX
    12031422     */
    1204     pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0;
    1205     pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0;
     1423    CPUMCPUIDLEAF NewLeaf;
     1424    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
     1425    if (pCurLeaf)
     1426    {
     1427        NewLeaf.uLeaf        = 4;
     1428        NewLeaf.uSubLeaf     = 0;
     1429        NewLeaf.fSubLeafMask = 0;
     1430        NewLeaf.uEax         = 0;
     1431        NewLeaf.uEbx         = 0;
     1432        NewLeaf.uEcx         = 0;
     1433        NewLeaf.uEdx         = 0;
     1434        NewLeaf.fFlags       = 0;
    12061435#ifdef VBOX_WITH_MULTI_CORE
    1207     if (   pVM->cCpus > 1
    1208         && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
    1209     {
    1210         AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
    1211         /* One logical processor with possibly multiple cores. */
    1212         /* See  http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
    1213         pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26);   /* 6 bits only -> 64 cores! */
    1214     }
     1436        if (   pVM->cCpus > 1
     1437            && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
     1438        {
     1439            AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);
     1440            /* One logical processor with possibly multiple cores. */
     1441            /* See  http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */
     1442            NewLeaf.uEax |= ((pVM->cCpus - 1) << 26);   /* 6 bits only -> 64 cores! */
     1443        }
    12151444#endif
     1445        rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
     1446        AssertLogRelRCReturn(rc, rc);
     1447    }
    12161448
    12171449    /* Cpuid 5:     Monitor/mwait Leaf
     
    12241456     * Safe to expose
    12251457     */
    1226     if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR))
    1227         pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0;
    1228 
    1229     pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
    1230     /** @cfgm{/CPUM/MWaitExtensions, boolean, false}
    1231      * Expose MWAIT extended features to the guest.  For now we expose
    1232      * just MWAIT break on interrupt feature (bit 1).
    1233      */
    1234     bool fMWaitExtensions;
    1235     rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc);
    1236     if (fMWaitExtensions)
    1237     {
    1238         pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
    1239         /** @todo: for now we just expose host's MWAIT C-states, although conceptually
    1240            it shall be part of our power management virtualization model */
     1458    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);
     1459    if (pCurLeaf)
     1460    {
     1461        if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))
     1462            pCurLeaf->uEax = pCurLeaf->uEbx = 0;
     1463
     1464        pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
     1465        if (fMWaitExtensions)
     1466        {
     1467            pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;
     1468            /** @todo: for now we just expose host's MWAIT C-states, although conceptually
     1469               it shall be part of our power management virtualization model */
    12411470#if 0
    1242         /* MWAIT sub C-states */
    1243         pCPUM->aGuestCpuIdStd[5].edx =
    1244                 (0 << 0)  /* 0 in C0 */ |
    1245                 (2 << 4)  /* 2 in C1 */ |
    1246                 (2 << 8)  /* 2 in C2 */ |
    1247                 (2 << 12) /* 2 in C3 */ |
    1248                 (0 << 16) /* 0 in C4 */
    1249                 ;
     1471            /* MWAIT sub C-states */
     1472            pCurLeaf->uEdx =
     1473                    (0 << 0)  /* 0 in C0 */ |
     1474                    (2 << 4)  /* 2 in C1 */ |
     1475                    (2 << 8)  /* 2 in C2 */ |
     1476                    (2 << 12) /* 2 in C3 */ |
     1477                    (0 << 16) /* 0 in C4 */
     1478                    ;
    12501479#endif
    1251     }
    1252     else
    1253         pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0;
     1480        }
     1481        else
     1482            pCurLeaf->uEcx = pCurLeaf->uEdx = 0;
     1483    }
    12541484
    12551485    /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.
     
    12701500     * VIA:               Reserved
    12711501     */
    1272     if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007))
    1273     {
    1274         Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID);
    1275 
    1276         pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0;
    1277 
    1278         if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
     1502    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0);
     1503    if (pCurLeaf)
     1504    {
     1505        Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);
     1506
     1507        pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;
     1508
     1509        if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    12791510        {
    12801511            /* Only expose the TSC invariant capability bit to the guest. */
    1281             pCPUM->aGuestCpuIdExt[7].edx    &= 0
     1512            pCurLeaf->uEdx                  &= 0
    12821513                                            //| X86_CPUID_AMD_ADVPOWER_EDX_TS
    12831514                                            //| X86_CPUID_AMD_ADVPOWER_EDX_FID
     
    13001531        }
    13011532        else
    1302             pCPUM->aGuestCpuIdExt[7].edx    = 0;
     1533            pCurLeaf->uEdx = 0;
    13031534    }
    13041535
     
    13121543     *                    EBX, ECX, EDX - reserved
    13131544     */
    1314     if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008))
     1545    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0);
     1546    if (pCurLeaf)
    13151547    {
    13161548        /* Only expose the virtual and physical address sizes to the guest. */
    1317         pCPUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);
    1318         pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0;  /* reserved */
     1549        pCurLeaf->uEax &= UINT32_C(0x0000ffff);
     1550        pCurLeaf->uEbx = pCurLeaf->uEdx = 0;  /* reserved */
    13191551        /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)
    13201552         * NC (0-7) Number of cores; 0 equals 1 core */
    1321         pCPUM->aGuestCpuIdExt[8].ecx = 0;
     1553        pCurLeaf->uEcx = 0;
    13221554#ifdef VBOX_WITH_MULTI_CORE
    13231555        if (    pVM->cCpus > 1
    1324             &&  pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)
     1556            &&  pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)
    13251557        {
    13261558            /* Legacy method to determine the number of cores. */
    1327             pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
    1328             pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
     1559            pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */
     1560            if (pExtFeatureLeaf)
     1561                pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;
    13291562        }
    13301563#endif
    13311564    }
    13321565
    1333     /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}
    1334      * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from
    1335      * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).
    1336      * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].
    1337      */
    1338     bool fNt4LeafLimit;
    1339     rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc);
    1340     if (fNt4LeafLimit && pCPUM->aGuestCpuIdStd[0].eax > 3)
    1341         pCPUM->aGuestCpuIdStd[0].eax = 3;
    1342 
    1343     /*
    1344      * Limit it the number of entries and fill the remaining with the defaults.
     1566
     1567    /*
     1568     * Limit it the number of entries, zapping the remainder.
    13451569     *
    13461570     * The limits are masking off stuff about power saving and similar, this
     
    13481572     * info too in these leaves (like words about having a constant TSC).
    13491573     */
    1350     if (pCPUM->aGuestCpuIdStd[0].eax > 5)
    1351         pCPUM->aGuestCpuIdStd[0].eax = 5;
    1352     for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++)
    1353         pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef;
    1354 
    1355     if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008))
    1356         pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008);
    1357     for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000)
    1358            ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1
    1359            : 0;
    1360          i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt);
    1361          i++)
    1362         pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef;
     1574    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);
     1575    if (pCurLeaf)
     1576    {
     1577        if (pCurLeaf->uEax > 5)
     1578        {
     1579            pCurLeaf->uEax = 5;
     1580            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1581                                   UINT32_C(0x00000006), UINT32_C(0x000fffff));
     1582        }
     1583
     1584        /* NT4 hack, no zapping of extra leaves here. */
     1585        if (fNt4LeafLimit && pCurLeaf->uEax > 3)
     1586            pCurLeaf->uEax = 3;
     1587    }
     1588
     1589    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0);
     1590    if (pCurLeaf)
     1591    {
     1592        if (pCurLeaf->uEax > UINT32_C(0x80000008))
     1593        {
     1594            pCurLeaf->uEax = UINT32_C(0x80000008);
     1595            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1596                                   UINT32_C(0x80000008), UINT32_C(0x800fffff));
     1597        }
     1598    }
    13631599
    13641600    /*
     
    13701606     * temperature/hz/++ stuff, include it as well (static).
    13711607     */
    1372     if (    pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000)
    1373         &&  pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004))
    1374     {
    1375         pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002));
    1376         pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */
    1377         for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000);
    1378              i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur);
    1379              i++)
    1380             pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
    1381     }
    1382     else
    1383         for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++)
    1384             pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef;
     1608    pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0);
     1609    if (pCurLeaf)
     1610    {
     1611        if (   pCurLeaf->uEax >= UINT32_C(0xc0000000)
     1612            && pCurLeaf->uEax <= UINT32_C(0xc0000004))
     1613        {
     1614            pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002));
     1615            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1616                                   UINT32_C(0xc0000002), UINT32_C(0xc00fffff));
     1617
     1618            pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,
     1619                                          UINT32_C(0xc0000001), 0);
     1620            if (pCurLeaf)
     1621                pCurLeaf->uEdx = 0; /* all features hidden */
     1622        }
     1623        else
     1624            cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,
     1625                                   UINT32_C(0xc0000000), UINT32_C(0xc00fffff));
     1626    }
    13851627
    13861628    /*
     
    13911633     * Currently we do not support any hypervisor-specific interface.
    13921634     */
    1393     pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001);
    1394     pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx
    1395                                    = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256;   /* 'VBox' */
    1396     pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e;                            /* 'none' */
    1397     pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx
    1398                                    = pCPUM->aGuestCpuIdHyper[1].edx = 0;    /* Reserved */
     1635    NewLeaf.uLeaf        = UINT32_C(0x40000000);
     1636    NewLeaf.uSubLeaf     = 0;
     1637    NewLeaf.fSubLeafMask = 0;
     1638    NewLeaf.uEax         = UINT32_C(0x40000001);
     1639    NewLeaf.uEbx         = 0x786f4256 /* 'VBox' */;
     1640    NewLeaf.uEcx         = 0x786f4256 /* 'VBox' */;
     1641    NewLeaf.uEdx         = 0x786f4256 /* 'VBox' */;
     1642    NewLeaf.fFlags       = 0;
     1643    rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
     1644    AssertLogRelRCReturn(rc, rc);
     1645
     1646    NewLeaf.uLeaf        = UINT32_C(0x40000001);
     1647    NewLeaf.uEax         = 0x656e6f6e;                            /* 'none' */
     1648    NewLeaf.uEbx         = 0;
     1649    NewLeaf.uEcx         = 0;
     1650    NewLeaf.uEdx         = 0;
     1651    NewLeaf.fFlags       = 0;
     1652    rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);
     1653    AssertLogRelRCReturn(rc, rc);
    13991654
    14001655    /*
    14011656     * Mini CPU selection support for making Mac OS X happy.
    14021657     */
    1403     if (pCPUM->enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)
    1404     {
    1405         /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}
    1406          * Restrict the reported CPU family+model+stepping of intel CPUs.  This is
    1407          * probably going to be a temporary hack, so don't depend on this.
    1408          * The 1st byte of the value is the stepping, the 2nd byte value is the model
    1409          * number and the 3rd byte value is the family, and the 4th value must be zero.
    1410          */
    1411         uint32_t uMaxIntelFamilyModelStep;
    1412         rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);
    1413         AssertRCReturn(rc, rc);
    1414         uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pCPUM->aGuestCpuIdStd[1].eax),
    1415                                                                 ASMGetCpuModelIntel(pCPUM->aGuestCpuIdStd[1].eax),
    1416                                                                 ASMGetCpuFamily(pCPUM->aGuestCpuIdStd[1].eax),
     1658    if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)
     1659    {
     1660        uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax),
     1661                                                                ASMGetCpuModelIntel(pStdFeatureLeaf->uEax),
     1662                                                                ASMGetCpuFamily(pStdFeatureLeaf->uEax),
    14171663                                                                0);
    14181664        if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)
    14191665        {
    1420             uint32_t uNew = pCPUM->aGuestCpuIdStd[1].eax & UINT32_C(0xf0003000);
     1666            uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);
    14211667            uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */
    14221668            uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */
     
    14261672                uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;
    14271673            LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",
    1428                     pCPUM->aGuestCpuIdStd[1].eax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
    1429             pCPUM->aGuestCpuIdStd[1].eax = uNew;
     1674                    pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));
     1675            pStdFeatureLeaf->uEax = uNew;
    14301676        }
    14311677    }
    14321678
    1433     /*
    1434      * Load CPUID overrides from configuration.
    1435      * Note: Kind of redundant now, but allows unchanged overrides
    1436      */
    1437     /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}
    1438      * Overrides the CPUID leaf values. */
    1439     PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID");
    1440     rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdStd),     pOverrideCfg);
    1441     AssertRCReturn(rc, rc);
    1442     rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0],     RT_ELEMENTS(pCPUM->aGuestCpuIdExt),     pOverrideCfg);
    1443     AssertRCReturn(rc, rc);
    1444     rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg);
    1445     AssertRCReturn(rc, rc);
    1446 
    1447     /*
    1448      * Check if PAE was explicitely enabled by the user.
    1449      */
     1679
     1680    /*
     1681     * Move the MSR and CPUID arrays over on the hypervisor heap, and explode
     1682     * guest CPU features again.
     1683     */
     1684    void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3;
     1685    int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves);
     1686    RTMemFree(pvFree);
     1687
     1688    pvFree = pCPUM->GuestInfo.paMsrRangesR3;
     1689    int rc2 = MMHyperDupMem(pVM, pvFree,
     1690                            sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32,
     1691                            MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3);
     1692    RTMemFree(pvFree);
     1693    AssertLogRelRCReturn(rc1, rc1);
     1694    AssertLogRelRCReturn(rc2, rc2);
     1695
     1696    pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3);
     1697    pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3);
     1698    cpumR3MsrRegStats(pVM);
     1699
     1700    /*
     1701     * Some more configuration that we're applying at the end of everything
     1702     * via the CPUMSetGuestCpuIdFeature API.
     1703     */
     1704
     1705    /* Check if PAE was explicitely enabled by the user. */
    14501706    bool fEnable;
    14511707    rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false);      AssertRCReturn(rc, rc);
     
    14531709        CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
    14541710
    1455     /*
    1456      * We don't normally enable NX for raw-mode, so give the user a chance to
    1457      * force it on.
    1458      */
     1711    /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */
    14591712    rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false);                 AssertRCReturn(rc, rc);
    14601713    if (fEnable)
    14611714        CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    14621715
    1463     /*
    1464      * We don't enable the Hypervisor Present bit by default, but it may
    1465      * be needed by some guests.
    1466      */
     1716    /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */
    14671717    rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false);                AssertRCReturn(rc, rc);
    14681718    if (fEnable)
     
    14881738{
    14891739    LogFlow(("CPUMR3Relocate\n"));
     1740
     1741    pVM->cpum.s.GuestInfo.paMsrRangesRC   = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3);
     1742    pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
    14901743
    14911744    /* Recheck the guest DRx values in raw-mode. */
     
    15521805 * Used by CPUMR3Reset and CPU hot plugging.
    15531806 *
    1554  * @param   pVCpu               Pointer to the VMCPU.
    1555  */
    1556 VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu)
     1807 * @param   pVM         Pointer to the cross context VM structure.
     1808 * @param   pVCpu       Pointer to the cross context virtual CPU structure of
     1809 *                      the CPU that is being reset.  This may differ from the
     1810 *                      current EMT.
     1811 */
     1812VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
    15571813{
    15581814    /** @todo anything different for VCPU > 0? */
     
    16351891                                                        supports all bits, since a zero value here should be read as 0xffbf. */
    16361892
     1893    /*
     1894     * MSRs.
     1895     */
    16371896    /* Init PAT MSR */
    16381897    pCtx->msrPAT                    = UINT64_C(0x0007040600070406); /** @todo correct? */
     
    16421901    Assert(!pCtx->msrEFER);
    16431902
     1903    /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
     1904       is supposed to be here, just trying provide useful/sensible values. */
     1905    PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
     1906    if (pRange)
     1907    {
     1908        pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
     1909                                               | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
     1910                                               | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
     1911                                               | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
     1912        pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
     1913                            | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
     1914        pRange->fWrGpMask  &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
     1915    }
     1916
     1917    /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
     1918
    16441919    /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
    16451920     *        called from each EMT while we're getting called by CPUMR3Reset()
    16461921     *        iteratively on the same thread. Fix later.  */
    1647 #if 0
     1922#if 0 /** @todo r=bird: This we will do in TM, not here. */
    16481923    /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
    16491924    CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
     
    16731948    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    16741949    {
    1675         CPUMR3ResetCpu(&pVM->aCpus[i]);
     1950        CPUMR3ResetCpu(pVM, &pVM->aCpus[i]);
    16761951
    16771952#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    17252000    SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));
    17262001    SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));
     2002}
     2003
     2004
     2005static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
     2006{
     2007    uint32_t cCpuIds;
     2008    int rc = SSMR3GetU32(pSSM, &cCpuIds);
     2009    if (RT_SUCCESS(rc))
     2010    {
     2011        if (cCpuIds < 64)
     2012        {
     2013            for (uint32_t i = 0; i < cCpuIds; i++)
     2014            {
     2015                CPUMCPUID CpuId;
     2016                rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));
     2017                if (RT_FAILURE(rc))
     2018                    break;
     2019
     2020                CPUMCPUIDLEAF NewLeaf;
     2021                NewLeaf.uLeaf           = uBase + i;
     2022                NewLeaf.uSubLeaf        = 0;
     2023                NewLeaf.fSubLeafMask    = 0;
     2024                NewLeaf.uEax            = CpuId.eax;
     2025                NewLeaf.uEbx            = CpuId.ebx;
     2026                NewLeaf.uEcx            = CpuId.ecx;
     2027                NewLeaf.uEdx            = CpuId.edx;
     2028                NewLeaf.fFlags          = 0;
     2029                rc = cpumR3CpuIdInsert(ppaLeaves, pcLeaves, &NewLeaf);
     2030            }
     2031        }
     2032        else
     2033            rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
     2034    }
     2035    if (RT_FAILURE(rc))
     2036    {
     2037        RTMemFree(*ppaLeaves);
     2038        *ppaLeaves = NULL;
     2039        *pcLeaves = 0;
     2040    }
     2041    return rc;
     2042}
     2043
     2044
     2045static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)
     2046{
     2047    *ppaLeaves = NULL;
     2048    *pcLeaves = 0;
     2049
     2050    int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);
     2051    if (RT_SUCCESS(rc))
     2052        rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);
     2053    if (RT_SUCCESS(rc))
     2054        rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);
     2055
     2056    return rc;
    17272057}
    17282058
     
    18092139            && !(aHostRaw##set [1].reg & bit) \
    18102140            && !(aHostOverride##set [1].reg & bit) \
    1811             && !(aGuestOverride##set [1].reg & bit) \
    18122141           ) \
    18132142        { \
     
    18232152            && !(aHostRaw##set [1].reg & bit) \
    18242153            && !(aHostOverride##set [1].reg & bit) \
    1825             && !(aGuestOverride##set [1].reg & bit) \
    18262154           ) \
    18272155            LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
     
    18322160            && !(aHostRaw##set [1].reg & bit) \
    18332161            && !(aHostOverride##set [1].reg & bit) \
    1834             && !(aGuestOverride##set [1].reg & bit) \
    18352162           ) \
    18362163            LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
     
    18452172            && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
    18462173            && !(aHostOverride##set [1].reg & bit) \
    1847             && !(aGuestOverride##set [1].reg & bit) \
    18482174           ) \
    18492175        { \
     
    18602186            && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
    18612187            && !(aHostOverride##set [1].reg & bit) \
    1862             && !(aGuestOverride##set [1].reg & bit) \
    18632188           ) \
    18642189            LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \
     
    18702195            && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \
    18712196            && !(aHostOverride##set [1].reg & bit) \
    1872             && !(aGuestOverride##set [1].reg & bit) \
    18732197           ) \
    18742198            LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
     
    18852209                 : aHostRawStd[1].reg      & (StdBit)) \
    18862210            && !(aHostOverrideExt[1].reg   & (ExtBit)) \
    1887             && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
    18882211           ) \
    18892212        { \
     
    19012224                 : aHostRawStd[1].reg      & (StdBit)) \
    19022225            && !(aHostOverrideExt[1].reg   & (ExtBit)) \
    1903             && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
    19042226           ) \
    19052227            LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \
     
    19122234                 : aHostRawStd[1].reg      & (StdBit)) \
    19132235            && !(aHostOverrideExt[1].reg   & (ExtBit)) \
    1914             && !(aGuestOverrideExt[1].reg  & (ExtBit)) \
    19152236           ) \
    19162237            LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \
     
    19212242     * Load them into stack buffers first.
    19222243     */
    1923     CPUMCPUID   aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)];
    1924     uint32_t    cGuestCpuIdStd;
    1925     int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc);
    1926     if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd))
    1927         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1928     SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0]));
    1929 
    1930     CPUMCPUID   aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)];
    1931     uint32_t    cGuestCpuIdExt;
    1932     rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc);
    1933     if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt))
    1934         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1935     SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0]));
    1936 
    1937     CPUMCPUID   aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)];
    1938     uint32_t    cGuestCpuIdCentaur;
    1939     rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc);
    1940     if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur))
    1941         return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1942     SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0]));
     2244    PCPUMCPUIDLEAF paLeaves;
     2245    uint32_t       cLeaves;
     2246    int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves);
     2247    AssertRCReturn(rc, rc);
     2248
     2249    /** @todo we'll be leaking paLeaves on error return... */
    19432250
    19442251    CPUMCPUID   GuestCpuIdDef;
     
    19512258    if (cRawStd > RT_ELEMENTS(aRawStd))
    19522259        return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
    1953     SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
     2260    rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));
     2261    AssertRCReturn(rc, rc);
     2262    for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
     2263        ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
    19542264
    19552265    CPUMCPUID   aRawExt[32];
     
    19602270    rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));
    19612271    AssertRCReturn(rc, rc);
    1962 
    1963     /*
    1964      * Note that we support restoring less than the current amount of standard
    1965      * leaves because we've been allowed more is newer version of VBox.
    1966      *
    1967      * So, pad new entries with the default.
    1968      */
    1969     for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)
    1970         aGuestCpuIdStd[i] = GuestCpuIdDef;
    1971 
    1972     for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)
    1973         aGuestCpuIdExt[i] = GuestCpuIdDef;
    1974 
    1975     for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)
    1976         aGuestCpuIdCentaur[i] = GuestCpuIdDef;
    1977 
    1978     for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)
    1979         ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);
    1980 
    19812272    for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)
    19822273        ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);
     
    19992290     * Note! We currently only need the feature leaves, so skip rest.
    20002291     */
    2001     PCFGMNODE   pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID");
    2002     CPUMCPUID   aGuestOverrideStd[2];
    2003     memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd));
    2004     cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg);
    2005 
    2006     CPUMCPUID   aGuestOverrideExt[2];
    2007     memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt));
    2008     cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg);
    2009 
    2010     pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
     2292    PCFGMNODE   pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");
    20112293    CPUMCPUID   aHostOverrideStd[2];
    20122294    memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));
     
    22592541     *      "EMU?" - Can this be emulated?
    22602542     */
     2543    CPUMCPUID aGuestCpuIdStd[2];
     2544    RT_ZERO(aGuestCpuIdStd);
     2545    cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);
     2546
    22612547    /* CPUID(1).ecx */
    22622548    CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);    // -> EMU
     
    23282614
    23292615    /* CPUID(0x80000000). */
    2330     if (    aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001)
    2331         &&  aGuestCpuIdExt[0].eax <  UINT32_C(0x8000007f))
     2616    CPUMCPUID aGuestCpuIdExt[2];
     2617    RT_ZERO(aGuestCpuIdExt);
     2618    if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))
    23322619    {
    23332620        /** @todo deal with no 0x80000001 on the host. */
     
    24072694     * We're good, commit the CPU ID leaves.
    24082695     */
    2409     memcpy(&pVM->cpum.s.aGuestCpuIdStd[0],     &aGuestCpuIdStd[0],     sizeof(aGuestCpuIdStd));
    2410     memcpy(&pVM->cpum.s.aGuestCpuIdExt[0],     &aGuestCpuIdExt[0],     sizeof(aGuestCpuIdExt));
    2411     memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur));
    2412     pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef;
     2696    MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
     2697    pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR;
     2698    pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;
     2699    pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef;
     2700    rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves);
     2701    RTMemFree(paLeaves);
     2702    AssertLogRelRCReturn(rc, rc);
     2703
    24132704
    24142705#undef CPUID_CHECK_RET
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r49072 r49893  
    23982398                        PGMR3ResetCpu(pVM, pVCpu);
    23992399                        TRPMR3ResetCpu(pVCpu);
    2400                         CPUMR3ResetCpu(pVCpu);
     2400                        CPUMR3ResetCpu(pVM, pVCpu);
    24012401                        EMR3ResetCpu(pVCpu);
    24022402                        HMR3ResetCpu(pVCpu);
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r48629 r49893  
    21682168    }
    21692169
     2170    /** @todo query from CPUM. */
    21702171    pVM->pgm.s.GCPhysInvAddrMask = 0;
    21712172    for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++)
  • trunk/src/VBox/VMM/VMMR3/VM.cpp

    r48528 r49893  
    43804380    PDMR3ResetCpu(pVCpu);
    43814381    TRPMR3ResetCpu(pVCpu);
    4382     CPUMR3ResetCpu(pVCpu);
     4382    CPUMR3ResetCpu(pVM, pVCpu);
    43834383    EMR3ResetCpu(pVCpu);
    43844384    HMR3ResetCpu(pVCpu);
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r49147 r49893  
    14201420
    14211421    PGMR3ResetCpu(pVM, pVCpu);
    1422     CPUMR3ResetCpu(pVCpu);
     1422    CPUMR3ResetCpu(pVM, pVCpu);
    14231423
    14241424    return VINF_EM_WAIT_SIPI;
  • trunk/src/VBox/VMM/VMMR3/VMMTests.cpp

    r49383 r49893  
    872872     * Do the experiments.
    873873     */
    874     uint32_t uMsr   = 0xc0011011;
    875     uint64_t uValue = 0x10000;
     874    uint32_t uMsr   = 0x00000277;
     875    uint64_t uValue = UINT64_C(0x0007010600070106);
    876876#if 0
     877    uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13));
     878    uValue |= RT_BIT_64(13);
    877879    rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
    878880                     RCPtrValues, RCPtrValues + sizeof(uint64_t));
    879881    RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
    880882             uMsr, pauValues[0], uValue, pauValues[1], rc);
    881 #endif
     883#elif 1
     884    const uint64_t uOrgValue = uValue;
     885    uint32_t       cChanges = 0;
     886    for (int iBit = 63; iBit >= 58; iBit--)
     887    {
     888        uValue = uOrgValue & ~RT_BIT_64(iBit);
     889        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
     890                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
     891        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n",
     892                 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
     893                 (pauValues[0] ^  pauValues[1]) & RT_BIT_64(iBit) ?  "changed" : "unchanged");
     894        cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
     895
     896        uValue = uOrgValue | RT_BIT_64(iBit);
     897        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
     898                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
     899        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset   bit=%u -> %s\n",
     900                 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit,
     901                 (pauValues[0] ^  pauValues[1]) & RT_BIT_64(iBit) ?  "changed" : "unchanged");
     902        cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]);
     903    }
     904    RTPrintf("%u change(s)\n", cChanges);
     905#else
     906    uint64_t fWriteable = 0;
    882907    for (uint32_t i = 0; i <= 63; i++)
    883908    {
    884909        uValue = RT_BIT_64(i);
     910# if 0
     911        if (uValue & (0x7))
     912            continue;
     913# endif
    885914        rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
    886915                         RCPtrValues, RCPtrValues + sizeof(uint64_t));
    887916        RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
    888917                 uMsr, pauValues[0], uValue, pauValues[1], rc);
     918        if (RT_SUCCESS(rc))
     919            fWriteable |= RT_BIT_64(i);
    889920    }
    890921
     
    900931    RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n",
    901932             uMsr, pauValues[0], uValue, pauValues[1], rc);
     933
     934    uValue = fWriteable;
     935    rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue),
     936                     RCPtrValues, RCPtrValues + sizeof(uint64_t));
     937    RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n",
     938             uMsr, pauValues[0], uValue, pauValues[1], rc);
     939
     940#endif
    902941
    903942    /*
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette