Changeset 54561 in vbox for trunk/src/VBox
- Timestamp:
- Feb 27, 2015 4:59:02 PM (10 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r53391 r54561 5 5 6 6 /* 7 * Copyright (C) 2006-201 3Oracle Corporation7 * Copyright (C) 2006-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 67 67 * Defined Constants And Macros * 68 68 *******************************************************************************/ 69 /** The current saved state version. */70 #define CPUM_SAVED_STATE_VERSION 1471 /** The current saved state version before using SSMR3PutStruct. */72 #define CPUM_SAVED_STATE_VERSION_MEM 1373 /** The saved state version before introducing the MSR size field. */74 #define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 1275 /** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden76 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */77 #define CPUM_SAVED_STATE_VERSION_VER3_2 1178 /** The saved state version of 3.0 and 3.1 trunk before the teleportation79 * changes. */80 #define CPUM_SAVED_STATE_VERSION_VER3_0 1081 /** The saved state version for the 2.1 trunk before the MSR changes. */82 #define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 983 /** The saved state version of 2.0, used for backwards compatibility. */84 #define CPUM_SAVED_STATE_VERSION_VER2_0 885 /** The saved state version of 1.6, used for backwards compatibility. */86 #define CPUM_SAVED_STATE_VERSION_VER1_6 687 88 89 69 /** 90 70 * This was used in the saved state up to the early life of version 14. … … 116 96 * Internal Functions * 117 97 *******************************************************************************/ 118 static int cpumR3CpuIdInit(PVM pVM);119 98 static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass); 120 99 static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM); … … 709 688 710 689 /** 711 * Loads MSR range overrides.712 *713 * This must be called before the MSR ranges are moved from the normal heap to714 * the hyper heap!715 *716 * @returns VBox status code (VMSetError called).717 * @param pVM Pointer to the cross context VM structure718 * @param pMsrNode The CFGM node with the MSR overrides.719 */720 static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode)721 {722 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode))723 {724 /*725 * Assemble a valid MSR range.726 */727 CPUMMSRRANGE MsrRange;728 MsrRange.offCpumCpu = 0;729 MsrRange.fReserved = 0;730 731 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName));732 if (RT_FAILURE(rc))733 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc);734 735 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst);736 if (RT_FAILURE(rc))737 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n",738 MsrRange.szName, rc);739 740 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst);741 if (RT_FAILURE(rc))742 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n",743 MsrRange.szName, rc);744 745 char szType[32];746 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue");747 if (RT_FAILURE(rc))748 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n",749 MsrRange.szName, rc);750 if (!RTStrICmp(szType, "FixedValue"))751 {752 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue;753 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite;754 755 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0);756 if (RT_FAILURE(rc))757 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n",758 MsrRange.szName, rc);759 760 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0);761 if (RT_FAILURE(rc))762 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n",763 MsrRange.szName, rc);764 765 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0);766 if (RT_FAILURE(rc))767 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n",768 MsrRange.szName, rc);769 }770 else771 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,772 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType);773 774 /*775 * Insert the range into the table (replaces/splits/shrinks existing776 * MSR ranges).777 */778 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges,779 &MsrRange);780 if (RT_FAILURE(rc))781 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc);782 }783 784 return VINF_SUCCESS;785 }786 787 788 /**789 * Loads CPUID leaf overrides.790 *791 * This must be called before the CPUID leaves are moved from the normal792 * heap to the hyper heap!793 *794 * @returns VBox status code (VMSetError called).795 * @param pVM Pointer to the cross context VM structure796 * @param pParentNode The CFGM node with the CPUID leaves.797 * @param pszLabel How to label the overrides we're loading.798 */799 static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel)800 {801 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode))802 {803 /*804 * Get the leaf and subleaf numbers.805 */806 char szName[128];807 int rc = CFGMR3GetName(pNode, szName, sizeof(szName));808 if (RT_FAILURE(rc))809 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc);810 811 /* The leaf number is either specified directly or thru the node name. */812 uint32_t uLeaf;813 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf);814 if (rc == VERR_CFGM_VALUE_NOT_FOUND)815 {816 rc = RTStrToUInt32Full(szName, 16, &uLeaf);817 if (rc != VINF_SUCCESS)818 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS,819 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName);820 }821 else if (RT_FAILURE(rc))822 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n",823 pszLabel, szName, rc);824 825 uint32_t uSubLeaf;826 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0);827 if (RT_FAILURE(rc))828 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n",829 pszLabel, szName, rc);830 831 uint32_t fSubLeafMask;832 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0);833 if (RT_FAILURE(rc))834 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n",835 pszLabel, szName, rc);836 837 /*838 * Look up the specified leaf, since the output register values839 * defaults to any existing values. This allows overriding a single840 * register, without needing to know the other values.841 */842 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves,843 uLeaf, uSubLeaf);844 CPUMCPUIDLEAF Leaf;845 if (pLeaf)846 Leaf = *pLeaf;847 else848 RT_ZERO(Leaf);849 Leaf.uLeaf = uLeaf;850 Leaf.uSubLeaf = uSubLeaf;851 Leaf.fSubLeafMask = fSubLeafMask;852 853 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax);854 if (RT_FAILURE(rc))855 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n",856 pszLabel, szName, rc);857 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx);858 if (RT_FAILURE(rc))859 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n",860 pszLabel, szName, rc);861 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx);862 if (RT_FAILURE(rc))863 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n",864 pszLabel, szName, rc);865 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx);866 if (RT_FAILURE(rc))867 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n",868 pszLabel, szName, rc);869 870 /*871 * Insert the leaf into the table (replaces existing ones).872 */873 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves,874 &Leaf);875 if (RT_FAILURE(rc))876 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc);877 }878 879 return VINF_SUCCESS;880 }881 882 883 884 /**885 * Fetches overrides for a CPUID leaf.886 *887 * @returns VBox status code.888 * @param pLeaf The leaf to load the overrides into.889 * @param pCfgNode The CFGM node containing the overrides890 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).891 * @param iLeaf The CPUID leaf number.892 */893 static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf)894 {895 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf);896 if (pLeafNode)897 {898 uint32_t u32;899 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32);900 if (RT_SUCCESS(rc))901 pLeaf->eax = u32;902 else903 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);904 905 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32);906 if (RT_SUCCESS(rc))907 pLeaf->ebx = u32;908 else909 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);910 911 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32);912 if (RT_SUCCESS(rc))913 pLeaf->ecx = u32;914 else915 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);916 917 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32);918 if (RT_SUCCESS(rc))919 pLeaf->edx = u32;920 else921 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc);922 923 }924 return VINF_SUCCESS;925 }926 927 928 /**929 * Load the overrides for a set of CPUID leaves.930 *931 * @returns VBox status code.932 * @param paLeaves The leaf array.933 * @param cLeaves The number of leaves.934 * @param uStart The start leaf number.935 * @param pCfgNode The CFGM node containing the overrides936 * (/CPUM/HostCPUID/ or /CPUM/CPUID/).937 */938 static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)939 {940 for (uint32_t i = 0; i < cLeaves; i++)941 {942 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i);943 if (RT_FAILURE(rc))944 return rc;945 }946 947 return VINF_SUCCESS;948 }949 950 /**951 * Init a set of host CPUID leaves.952 *953 * @returns VBox status code.954 * @param paLeaves The leaf array.955 * @param cLeaves The number of leaves.956 * @param uStart The start leaf number.957 * @param pCfgNode The /CPUM/HostCPUID/ node.958 */959 static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode)960 {961 /* Using the ECX variant for all of them can't hurt... */962 for (uint32_t i = 0; i < cLeaves; i++)963 ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);964 965 /* Load CPUID leaf override; we currently don't care if the user966 specifies features the host CPU doesn't support. */967 return cpumR3CpuIdInitLoadOverrideSet(uStart, paLeaves, cLeaves, pCfgNode);968 }969 970 971 static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves)972 {973 /*974 * Install the CPUID information.975 */976 int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32,977 MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3);978 979 AssertLogRelRCReturn(rc, rc);980 981 982 pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);983 pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3);984 Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);985 Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3);986 987 /*988 * Explode the guest CPU features.989 */990 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);991 AssertLogRelRCReturn(rc, rc);992 993 /*994 * Adjust the scalable bus frequency according to the CPUID information995 * we're now using.996 */997 if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch))998 pCPUM->GuestInfo.uScalableBusFreq = pCPUM->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge999 ? UINT64_C(100000000) /* 100MHz */1000 : UINT64_C(133333333); /* 133MHz */1001 1002 /*1003 * Populate the legacy arrays. Currently used for everything, later only1004 * for patch manager.1005 */1006 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] =1007 {1008 { pCPUM->aGuestCpuIdStd, RT_ELEMENTS(pCPUM->aGuestCpuIdStd), 0x00000000 },1009 { pCPUM->aGuestCpuIdExt, RT_ELEMENTS(pCPUM->aGuestCpuIdExt), 0x80000000 },1010 { pCPUM->aGuestCpuIdCentaur, RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 },1011 };1012 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++)1013 {1014 uint32_t cLeft = aOldRanges[i].cCpuIds;1015 uint32_t uLeaf = aOldRanges[i].uBase + cLeft;1016 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft];1017 while (cLeft-- > 0)1018 {1019 uLeaf--;1020 pLegacyLeaf--;1021 1022 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf,1023 0 /* uSubLeaf */);1024 if (pLeaf)1025 {1026 pLegacyLeaf->eax = pLeaf->uEax;1027 pLegacyLeaf->ebx = pLeaf->uEbx;1028 pLegacyLeaf->ecx = pLeaf->uEcx;1029 pLegacyLeaf->edx = pLeaf->uEdx;1030 }1031 else1032 *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId;1033 }1034 }1035 1036 pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId;1037 1038 return VINF_SUCCESS;1039 }1040 1041 1042 /**1043 * Initializes the emulated CPU's cpuid information.1044 *1045 * @returns VBox status code.1046 * @param pVM Pointer to the VM.1047 */1048 static int cpumR3CpuIdInit(PVM pVM)1049 {1050 PCPUM pCPUM = &pVM->cpum.s;1051 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM");1052 int rc;1053 1054 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \1055 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \1056 { \1057 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \1058 (a_pLeafReg) &= ~(uint32_t)(fMask); \1059 }1060 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \1061 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \1062 { \1063 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \1064 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \1065 }1066 1067 /*1068 * Read the configuration.1069 */1070 /** @cfgm{/CPUM/SyntheticCpu, boolean, false}1071 * Enables the Synthetic CPU. The Vendor ID and Processor Name are1072 * completely overridden by VirtualBox custom strings. Some1073 * CPUID information is withheld, like the cache info.1074 *1075 * This is obsoleted by PortableCpuIdLevel. */1076 bool fSyntheticCpu;1077 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &fSyntheticCpu, false);1078 AssertRCReturn(rc, rc);1079 1080 /** @cfgm{/CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0}1081 * When non-zero CPUID features that could cause portability issues will be1082 * stripped. The higher the value the more features gets stripped. Higher1083 * values should only be used when older CPUs are involved since it may1084 * harm performance and maybe also cause problems with specific guests. */1085 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0);1086 AssertLogRelRCReturn(rc, rc);1087 1088 /** @cfgm{/CPUM/GuestCpuName, string}1089 * The name of the CPU we're to emulate. The default is the host CPU.1090 * Note! CPUs other than "host" one is currently unsupported. */1091 char szCpuName[128];1092 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host");1093 AssertLogRelRCReturn(rc, rc);1094 1095 /** @cfgm{/CPUM/CMPXCHG16B, boolean, false}1096 * Expose CMPXCHG16B to the guest if supported by the host.1097 */1098 bool fCmpXchg16b;1099 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false);1100 AssertLogRelRCReturn(rc, rc);1101 1102 /** @cfgm{/CPUM/MONITOR, boolean, true}1103 * Expose MONITOR/MWAIT instructions to the guest.1104 */1105 bool fMonitor;1106 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true);1107 AssertLogRelRCReturn(rc, rc);1108 1109 /** @cfgm{/CPUM/MWaitExtensions, boolean, false}1110 * Expose MWAIT extended features to the guest. For now we expose just MWAIT1111 * break on interrupt feature (bit 1).1112 */1113 bool fMWaitExtensions;1114 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false);1115 AssertLogRelRCReturn(rc, rc);1116 1117 /** @cfgm{/CPUM/SSE4.1, boolean, true}1118 * Expose SSE4.1 to the guest if available.1119 */1120 bool fSse41;1121 rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.1", &fSse41, true);1122 AssertLogRelRCReturn(rc, rc);1123 1124 /** @cfgm{/CPUM/SSE4.2, boolean, true}1125 * Expose SSE4.2 to the guest if available.1126 */1127 bool fSse42;1128 rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.2", &fSse42, true);1129 AssertLogRelRCReturn(rc, rc);1130 1131 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false}1132 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from1133 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e).1134 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22].1135 */1136 bool fNt4LeafLimit;1137 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false);1138 AssertLogRelRCReturn(rc, rc);1139 1140 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX}1141 * Restrict the reported CPU family+model+stepping of intel CPUs. This is1142 * probably going to be a temporary hack, so don't depend on this.1143 * The 1st byte of the value is the stepping, the 2nd byte value is the model1144 * number and the 3rd byte value is the family, and the 4th value must be zero.1145 */1146 uint32_t uMaxIntelFamilyModelStep;1147 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX);1148 AssertLogRelRCReturn(rc, rc);1149 1150 /*1151 * Get the guest CPU data from the database and/or the host.1152 */1153 rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo);1154 if (RT_FAILURE(rc))1155 return rc == VERR_CPUM_DB_CPU_NOT_FOUND1156 ? VMSetError(pVM, rc, RT_SRC_POS,1157 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName)1158 : rc;1159 1160 /** @cfgm{/CPUM/MSRs/[Name]/[First|Last|Type|Value|...],}1161 * Overrides the guest MSRs.1162 */1163 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs"));1164 1165 /** @cfgm{/CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit}1166 * Overrides the CPUID leaf values (from the host CPU usually) used for1167 * calculating the guest CPUID leaves. This can be used to preserve the CPUID1168 * values when moving a VM to a different machine. Another use is restricting1169 * (or extending) the feature set exposed to the guest. */1170 if (RT_SUCCESS(rc))1171 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID");1172 1173 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */1174 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS,1175 "Found unsupported configuration node '/CPUM/CPUID/'. "1176 "Please use IMachine::setCPUIDLeaf() instead.");1177 1178 /*1179 * Pre-explode the CPUID info.1180 */1181 if (RT_SUCCESS(rc))1182 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures);1183 if (RT_FAILURE(rc))1184 {1185 RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3);1186 pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL;1187 RTMemFree(pCPUM->GuestInfo.paMsrRangesR3);1188 pCPUM->GuestInfo.paMsrRangesR3 = NULL;1189 return rc;1190 }1191 1192 1193 /* ... split this function about here ... */1194 1195 1196 /* Cpuid 1:1197 * Only report features we can support.1198 *1199 * Note! When enabling new features the Synthetic CPU and Portable CPUID1200 * options may require adjusting (i.e. stripping what was enabled).1201 */1202 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,1203 1, 0); /* Note! Must refetch when used later. */1204 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2);1205 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU1206 | X86_CPUID_FEATURE_EDX_VME1207 | X86_CPUID_FEATURE_EDX_DE1208 | X86_CPUID_FEATURE_EDX_PSE1209 | X86_CPUID_FEATURE_EDX_TSC1210 | X86_CPUID_FEATURE_EDX_MSR1211 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured.1212 | X86_CPUID_FEATURE_EDX_MCE1213 | X86_CPUID_FEATURE_EDX_CX81214 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present.1215 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */1216 //| X86_CPUID_FEATURE_EDX_SEP1217 | X86_CPUID_FEATURE_EDX_MTRR1218 | X86_CPUID_FEATURE_EDX_PGE1219 | X86_CPUID_FEATURE_EDX_MCA1220 | X86_CPUID_FEATURE_EDX_CMOV1221 | X86_CPUID_FEATURE_EDX_PAT1222 | X86_CPUID_FEATURE_EDX_PSE361223 //| X86_CPUID_FEATURE_EDX_PSN - no serial number.1224 | X86_CPUID_FEATURE_EDX_CLFSH1225 //| X86_CPUID_FEATURE_EDX_DS - no debug store.1226 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet.1227 | X86_CPUID_FEATURE_EDX_MMX1228 | X86_CPUID_FEATURE_EDX_FXSR1229 | X86_CPUID_FEATURE_EDX_SSE1230 | X86_CPUID_FEATURE_EDX_SSE21231 //| X86_CPUID_FEATURE_EDX_SS - no self snoop.1232 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading.1233 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor.1234 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled.1235 | 0;1236 pStdFeatureLeaf->uEcx &= 01237 | X86_CPUID_FEATURE_ECX_SSE31238 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */1239 | ((fMonitor && pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0)1240 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store.1241 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized.1242 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step.1243 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2.1244 | X86_CPUID_FEATURE_ECX_SSSE31245 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++).1246 | (fCmpXchg16b ? X86_CPUID_FEATURE_ECX_CX16 : 0)1247 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */1248 //| X86_CPUID_FEATURE_ECX_TPRUPDATE1249 | (fSse41 ? X86_CPUID_FEATURE_ECX_SSE4_1 : 0)1250 | (fSse42 ? X86_CPUID_FEATURE_ECX_SSE4_2 : 0)1251 /* ECX Bit 21 - x2APIC support - not yet. */1252 // | X86_CPUID_FEATURE_ECX_X2APIC1253 /* ECX Bit 23 - POPCNT instruction. */1254 //| X86_CPUID_FEATURE_ECX_POPCNT1255 | 0;1256 if (pCPUM->u8PortableCpuIdLevel > 0)1257 {1258 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));1259 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);1260 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);1261 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1);1262 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2);1263 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16);1264 PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);1265 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE);1266 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);1267 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);1268 1269 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP1270 | X86_CPUID_FEATURE_EDX_PSN1271 | X86_CPUID_FEATURE_EDX_DS1272 | X86_CPUID_FEATURE_EDX_ACPI1273 | X86_CPUID_FEATURE_EDX_SS1274 | X86_CPUID_FEATURE_EDX_TM1275 | X86_CPUID_FEATURE_EDX_PBE1276 )));1277 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_PCLMUL1278 | X86_CPUID_FEATURE_ECX_DTES641279 | X86_CPUID_FEATURE_ECX_CPLDS1280 | X86_CPUID_FEATURE_ECX_VMX1281 | X86_CPUID_FEATURE_ECX_SMX1282 | X86_CPUID_FEATURE_ECX_EST1283 | X86_CPUID_FEATURE_ECX_TM21284 | X86_CPUID_FEATURE_ECX_CNTXID1285 | X86_CPUID_FEATURE_ECX_FMA1286 | X86_CPUID_FEATURE_ECX_CX161287 | X86_CPUID_FEATURE_ECX_TPRUPDATE1288 | X86_CPUID_FEATURE_ECX_PDCM1289 | X86_CPUID_FEATURE_ECX_DCA1290 | X86_CPUID_FEATURE_ECX_MOVBE1291 | X86_CPUID_FEATURE_ECX_AES1292 | X86_CPUID_FEATURE_ECX_POPCNT1293 | X86_CPUID_FEATURE_ECX_XSAVE1294 | X86_CPUID_FEATURE_ECX_OSXSAVE1295 | X86_CPUID_FEATURE_ECX_AVX1296 )));1297 }1298 1299 /* Cpuid 0x80000001:1300 * Only report features we can support.1301 *1302 * Note! When enabling new features the Synthetic CPU and Portable CPUID1303 * options may require adjusting (i.e. stripping what was enabled).1304 *1305 * ASSUMES that this is ALWAYS the AMD defined feature set if present.1306 */1307 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,1308 UINT32_C(0x80000001), 0); /* Note! Must refetch when used later. */1309 if (pExtFeatureLeaf)1310 {1311 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU1312 | X86_CPUID_AMD_FEATURE_EDX_VME1313 | X86_CPUID_AMD_FEATURE_EDX_DE1314 | X86_CPUID_AMD_FEATURE_EDX_PSE1315 | X86_CPUID_AMD_FEATURE_EDX_TSC1316 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs..1317 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet.1318 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet.1319 | X86_CPUID_AMD_FEATURE_EDX_CX81320 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present.1321 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */1322 //| X86_CPUID_EXT_FEATURE_EDX_SEP1323 | X86_CPUID_AMD_FEATURE_EDX_MTRR1324 | X86_CPUID_AMD_FEATURE_EDX_PGE1325 | X86_CPUID_AMD_FEATURE_EDX_MCA1326 | X86_CPUID_AMD_FEATURE_EDX_CMOV1327 | X86_CPUID_AMD_FEATURE_EDX_PAT1328 | X86_CPUID_AMD_FEATURE_EDX_PSE361329 //| X86_CPUID_EXT_FEATURE_EDX_NX - not virtualized, requires PAE.1330 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX1331 | X86_CPUID_AMD_FEATURE_EDX_MMX1332 | X86_CPUID_AMD_FEATURE_EDX_FXSR1333 | X86_CPUID_AMD_FEATURE_EDX_FFXSR1334 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB1335 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP1336 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary1337 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX1338 | X86_CPUID_AMD_FEATURE_EDX_3DNOW1339 | 0;1340 pExtFeatureLeaf->uEcx &= 01341 //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF1342 //| X86_CPUID_AMD_FEATURE_ECX_CMPL1343 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.1344 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC1345 /* Note: This could prevent teleporting from AMD to Intel CPUs! */1346 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */1347 //| X86_CPUID_AMD_FEATURE_ECX_ABM1348 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A1349 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE1350 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF1351 //| X86_CPUID_AMD_FEATURE_ECX_OSVW1352 //| X86_CPUID_AMD_FEATURE_ECX_IBS1353 //| X86_CPUID_AMD_FEATURE_ECX_SSE51354 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT1355 //| X86_CPUID_AMD_FEATURE_ECX_WDT1356 | 0;1357 if (pCPUM->u8PortableCpuIdLevel > 0)1358 {1359 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L);1360 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW);1361 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);1362 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR);1363 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);1364 PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);1365 PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV);1366 1367 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL1368 | X86_CPUID_AMD_FEATURE_ECX_SVM1369 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC1370 | X86_CPUID_AMD_FEATURE_ECX_CR8L1371 | X86_CPUID_AMD_FEATURE_ECX_ABM1372 | X86_CPUID_AMD_FEATURE_ECX_SSE4A1373 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE1374 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF1375 | X86_CPUID_AMD_FEATURE_ECX_OSVW1376 | X86_CPUID_AMD_FEATURE_ECX_IBS1377 | X86_CPUID_AMD_FEATURE_ECX_SSE51378 | X86_CPUID_AMD_FEATURE_ECX_SKINIT1379 | X86_CPUID_AMD_FEATURE_ECX_WDT1380 | UINT32_C(0xffffc000)1381 )));1382 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10)1383 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL1384 | RT_BIT(18)1385 | RT_BIT(19)1386 | RT_BIT(21)1387 | X86_CPUID_AMD_FEATURE_EDX_AXMMX1388 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB1389 | RT_BIT(28)1390 )));1391 }1392 }1393 1394 /*1395 * Hide HTT, multicode, SMP, whatever.1396 * (APIC-ID := 0 and #LogCpus := 0)1397 */1398 pStdFeatureLeaf->uEbx &= 0x0000ffff;1399 #ifdef VBOX_WITH_MULTI_CORE1400 if (pVM->cCpus > 1)1401 {1402 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */1403 pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16);1404 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */1405 }1406 #endif1407 1408 /* Cpuid 2:1409 * Intel: Cache and TLB information1410 * AMD: Reserved1411 * VIA: Reserved1412 * Safe to expose; restrict the number of calls to 1 for the portable case.1413 */1414 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0);1415 if ( pCPUM->u8PortableCpuIdLevel > 01416 && pCurLeaf1417 && (pCurLeaf->uEax & 0xff) > 1)1418 {1419 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff));1420 pCurLeaf->uEax &= UINT32_C(0xfffffffe);1421 }1422 1423 /* Cpuid 3:1424 * Intel: EAX, EBX - reserved (transmeta uses these)1425 * ECX, EDX - Processor Serial Number if available, otherwise reserved1426 * AMD: Reserved1427 * VIA: Reserved1428 * Safe to expose1429 */1430 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0);1431 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);1432 if ( !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN)1433 && pCurLeaf)1434 {1435 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;1436 if (pCPUM->u8PortableCpuIdLevel > 0)1437 pCurLeaf->uEax = pCurLeaf->uEbx = 0;1438 }1439 1440 /* Cpuid 4:1441 * Intel: Deterministic Cache Parameters Leaf1442 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 01443 * AMD: Reserved1444 * VIA: Reserved1445 * Safe to expose, except for EAX:1446 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)**1447 * Bits 31-26: Maximum number of processor cores in this physical package**1448 * Note: These SMP values are constant regardless of ECX1449 */1450 CPUMCPUIDLEAF NewLeaf;1451 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0);1452 if (pCurLeaf)1453 {1454 NewLeaf.uLeaf = 4;1455 NewLeaf.uSubLeaf = 0;1456 NewLeaf.fSubLeafMask = 0;1457 NewLeaf.uEax = 0;1458 NewLeaf.uEbx = 0;1459 NewLeaf.uEcx = 0;1460 NewLeaf.uEdx = 0;1461 NewLeaf.fFlags = 0;1462 #ifdef VBOX_WITH_MULTI_CORE1463 if ( pVM->cCpus > 11464 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)1465 {1466 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);1467 /* One logical processor with possibly multiple cores. */1468 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */1469 NewLeaf.uEax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */1470 }1471 #endif1472 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);1473 AssertLogRelRCReturn(rc, rc);1474 }1475 1476 /* Cpuid 5: Monitor/mwait Leaf1477 * Intel: ECX, EDX - reserved1478 * EAX, EBX - Smallest and largest monitor line size1479 * AMD: EDX - reserved1480 * EAX, EBX - Smallest and largest monitor line size1481 * ECX - extensions (ignored for now)1482 * VIA: Reserved1483 * Safe to expose1484 */1485 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 5, 0);1486 if (pCurLeaf)1487 {1488 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);1489 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR))1490 pCurLeaf->uEax = pCurLeaf->uEbx = 0;1491 1492 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;1493 if (fMWaitExtensions)1494 {1495 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;1496 /** @todo: for now we just expose host's MWAIT C-states, although conceptually1497 it shall be part of our power management virtualization model */1498 #if 01499 /* MWAIT sub C-states */1500 pCurLeaf->uEdx =1501 (0 << 0) /* 0 in C0 */ |1502 (2 << 4) /* 2 in C1 */ |1503 (2 << 8) /* 2 in C2 */ |1504 (2 << 12) /* 2 in C3 */ |1505 (0 << 16) /* 0 in C4 */1506 ;1507 #endif1508 }1509 else1510 pCurLeaf->uEcx = pCurLeaf->uEdx = 0;1511 }1512 1513 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers.1514 * Safe to pass on to the guest.1515 *1516 * Intel: 0x800000005 reserved1517 * 0x800000006 L2 cache information1518 * AMD: 0x800000005 L1 cache information1519 * 0x800000006 L2/L3 cache information1520 * VIA: 0x800000005 TLB and L1 cache information1521 * 0x800000006 L2 cache information1522 */1523 1524 /* Cpuid 0x800000007:1525 * Intel: Reserved1526 * AMD: EAX, EBX, ECX - reserved1527 * EDX: Advanced Power Management Information1528 * VIA: Reserved1529 */1530 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0);1531 if (pCurLeaf)1532 {1533 Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID);1534 1535 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0;1536 1537 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)1538 {1539 /* Only expose the TSC invariant capability bit to the guest. */1540 pCurLeaf->uEdx &= 01541 //| X86_CPUID_AMD_ADVPOWER_EDX_TS1542 //| X86_CPUID_AMD_ADVPOWER_EDX_FID1543 //| X86_CPUID_AMD_ADVPOWER_EDX_VID1544 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP1545 //| X86_CPUID_AMD_ADVPOWER_EDX_TM1546 //| X86_CPUID_AMD_ADVPOWER_EDX_STC1547 //| X86_CPUID_AMD_ADVPOWER_EDX_MC1548 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE1549 #if 01550 /*1551 * We don't expose X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR, because newer1552 * Linux kernels blindly assume that the AMD performance counters work1553 * if this is set for 64 bits guests. (Can't really find a CPUID feature1554 * bit for them though.)1555 */1556 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR1557 #endif1558 | 0;1559 }1560 else1561 pCurLeaf->uEdx = 0;1562 }1563 1564 /* Cpuid 0x800000008:1565 * Intel: EAX: Virtual/Physical address Size1566 * EBX, ECX, EDX - reserved1567 * AMD: EBX, EDX - reserved1568 * EAX: Virtual/Physical/Guest address Size1569 * ECX: Number of cores + APICIdCoreIdSize1570 * VIA: EAX: Virtual/Physical address Size1571 * EBX, ECX, EDX - reserved1572 */1573 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0);1574 if (pCurLeaf)1575 {1576 /* Only expose the virtual and physical address sizes to the guest. */1577 pCurLeaf->uEax &= UINT32_C(0x0000ffff);1578 pCurLeaf->uEbx = pCurLeaf->uEdx = 0; /* reserved */1579 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu)1580 * NC (0-7) Number of cores; 0 equals 1 core */1581 pCurLeaf->uEcx = 0;1582 #ifdef VBOX_WITH_MULTI_CORE1583 if ( pVM->cCpus > 11584 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD)1585 {1586 /* Legacy method to determine the number of cores. */1587 pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */1588 pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,1589 UINT32_C(0x80000001), 0);1590 if (pExtFeatureLeaf)1591 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL;1592 }1593 #endif1594 }1595 1596 1597 /*1598 * Limit it the number of entries, zapping the remainder.1599 *1600 * The limits are masking off stuff about power saving and similar, this1601 * is perhaps a bit crudely done as there is probably some relatively harmless1602 * info too in these leaves (like words about having a constant TSC).1603 */1604 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0);1605 if (pCurLeaf)1606 {1607 if (pCurLeaf->uEax > 5)1608 {1609 pCurLeaf->uEax = 5;1610 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,1611 pCurLeaf->uEax + 1, UINT32_C(0x000fffff));1612 }1613 1614 /* NT4 hack, no zapping of extra leaves here. */1615 if (fNt4LeafLimit && pCurLeaf->uEax > 3)1616 pCurLeaf->uEax = 3;1617 }1618 1619 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0);1620 if (pCurLeaf)1621 {1622 if (pCurLeaf->uEax > UINT32_C(0x80000008))1623 {1624 pCurLeaf->uEax = UINT32_C(0x80000008);1625 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,1626 pCurLeaf->uEax + 1, UINT32_C(0x800fffff));1627 }1628 }1629 1630 /*1631 * Centaur stuff (VIA).1632 *1633 * The important part here (we think) is to make sure the 0xc00000001634 * function returns 0xc0000001. As for the features, we don't currently1635 * let on about any of those... 0xc0000002 seems to be some1636 * temperature/hz/++ stuff, include it as well (static).1637 */1638 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0);1639 if (pCurLeaf)1640 {1641 if ( pCurLeaf->uEax >= UINT32_C(0xc0000000)1642 && pCurLeaf->uEax <= UINT32_C(0xc0000004))1643 {1644 pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002));1645 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,1646 UINT32_C(0xc0000002), UINT32_C(0xc00fffff));1647 1648 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves,1649 UINT32_C(0xc0000001), 0);1650 if (pCurLeaf)1651 pCurLeaf->uEdx = 0; /* all features hidden */1652 }1653 else1654 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves,1655 UINT32_C(0xc0000000), UINT32_C(0xc00fffff));1656 }1657 1658 /*1659 * Hypervisor identification.1660 *1661 * We only return minimal information, primarily ensuring that the1662 * 0x40000000 function returns 0x40000001 and identifying ourselves.1663 * Hypervisor-specific interface is supported through GIM which will1664 * modify these leaves if required depending on the GIM provider.1665 */1666 NewLeaf.uLeaf = UINT32_C(0x40000000);1667 NewLeaf.uSubLeaf = 0;1668 NewLeaf.fSubLeafMask = 0;1669 NewLeaf.uEax = UINT32_C(0x40000001);1670 NewLeaf.uEbx = 0x786f4256 /* 'VBox' */;1671 NewLeaf.uEcx = 0x786f4256 /* 'VBox' */;1672 NewLeaf.uEdx = 0x786f4256 /* 'VBox' */;1673 NewLeaf.fFlags = 0;1674 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);1675 AssertLogRelRCReturn(rc, rc);1676 1677 NewLeaf.uLeaf = UINT32_C(0x40000001);1678 NewLeaf.uEax = 0x656e6f6e; /* 'none' */1679 NewLeaf.uEbx = 0;1680 NewLeaf.uEcx = 0;1681 NewLeaf.uEdx = 0;1682 NewLeaf.fFlags = 0;1683 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf);1684 AssertLogRelRCReturn(rc, rc);1685 1686 /*1687 * Mini CPU selection support for making Mac OS X happy.1688 */1689 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL)1690 {1691 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0);1692 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax),1693 ASMGetCpuModelIntel(pStdFeatureLeaf->uEax),1694 ASMGetCpuFamily(pStdFeatureLeaf->uEax),1695 0);1696 if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep)1697 {1698 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000);1699 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */1700 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */1701 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */1702 uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */1703 if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */1704 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20;1705 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n",1706 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));1707 pStdFeatureLeaf->uEax = uNew;1708 }1709 }1710 1711 /*1712 * MSR fudging.1713 */1714 /** @cfgm{/CPUM/FudgeMSRs, boolean, true}1715 * Fudges some common MSRs if not present in the selected CPU database entry.1716 * This is for trying to keep VMs running when moved between different hosts1717 * and different CPU vendors. */1718 bool fEnable;1719 rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRCReturn(rc, rc);1720 if (fEnable)1721 {1722 rc = cpumR3MsrApplyFudge(pVM);1723 AssertLogRelRCReturn(rc, rc);1724 }1725 1726 /*1727 * Move the MSR and CPUID arrays over on the hypervisor heap, and explode1728 * guest CPU features again.1729 */1730 void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3;1731 int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves);1732 RTMemFree(pvFree);1733 1734 pvFree = pCPUM->GuestInfo.paMsrRangesR3;1735 int rc2 = MMHyperDupMem(pVM, pvFree,1736 sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32,1737 MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3);1738 RTMemFree(pvFree);1739 AssertLogRelRCReturn(rc1, rc1);1740 AssertLogRelRCReturn(rc2, rc2);1741 1742 pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3);1743 pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3);1744 cpumR3MsrRegStats(pVM);1745 1746 /*1747 * Some more configuration that we're applying at the end of everything1748 * via the CPUMSetGuestCpuIdFeature API.1749 */1750 1751 /* Check if PAE was explicitely enabled by the user. */1752 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc);1753 if (fEnable)1754 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);1755 1756 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */1757 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc);1758 if (fEnable)1759 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);1760 1761 /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */1762 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc);1763 if (fEnable)1764 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP);1765 1766 #undef PORTABLE_DISABLE_FEATURE_BIT1767 #undef PORTABLE_CLEAR_BITS_WHEN1768 1769 return VINF_SUCCESS;1770 }1771 1772 1773 /**1774 690 * Applies relocations to data and code managed by this 1775 691 * component. This function will be called at init and … … 2007 923 2008 924 2009 /**2010 * Called both in pass 0 and the final pass.2011 *2012 * @param pVM Pointer to the VM.2013 * @param pSSM The saved state handle.2014 */2015 static void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM)2016 {2017 /*2018 * Save all the CPU ID leaves here so we can check them for compatibility2019 * upon loading.2020 */2021 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd));2022 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd));2023 2024 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt));2025 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt));2026 2027 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur));2028 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur));2029 2030 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef));2031 2032 /*2033 * Save a good portion of the raw CPU IDs as well as they may come in2034 * handy when validating features for raw mode.2035 */2036 CPUMCPUID aRawStd[16];2037 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++)2038 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);2039 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd));2040 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd));2041 2042 CPUMCPUID aRawExt[32];2043 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++)2044 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);2045 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt));2046 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt));2047 }2048 2049 2050 static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)2051 {2052 uint32_t cCpuIds;2053 int rc = SSMR3GetU32(pSSM, &cCpuIds);2054 if (RT_SUCCESS(rc))2055 {2056 if (cCpuIds < 64)2057 {2058 for (uint32_t i = 0; i < cCpuIds; i++)2059 {2060 CPUMCPUID CpuId;2061 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId));2062 if (RT_FAILURE(rc))2063 break;2064 2065 CPUMCPUIDLEAF NewLeaf;2066 NewLeaf.uLeaf = uBase + i;2067 NewLeaf.uSubLeaf = 0;2068 NewLeaf.fSubLeafMask = 0;2069 NewLeaf.uEax = CpuId.eax;2070 NewLeaf.uEbx = CpuId.ebx;2071 NewLeaf.uEcx = CpuId.ecx;2072 NewLeaf.uEdx = CpuId.edx;2073 NewLeaf.fFlags = 0;2074 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf);2075 }2076 }2077 else2078 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED;2079 }2080 if (RT_FAILURE(rc))2081 {2082 RTMemFree(*ppaLeaves);2083 *ppaLeaves = NULL;2084 *pcLeaves = 0;2085 }2086 return rc;2087 }2088 2089 2090 static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)2091 {2092 *ppaLeaves = NULL;2093 *pcLeaves = 0;2094 2095 int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves);2096 if (RT_SUCCESS(rc))2097 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves);2098 if (RT_SUCCESS(rc))2099 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves);2100 2101 return rc;2102 }2103 2104 2105 /**2106 * Loads the CPU ID leaves saved by pass 0.2107 *2108 * @returns VBox status code.2109 * @param pVM Pointer to the VM.2110 * @param pSSM The saved state handle.2111 * @param uVersion The format version.2112 */2113 static int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)2114 {2115 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION);2116 2117 /*2118 * Define a bunch of macros for simplifying the code.2119 */2120 /* Generic expression + failure message. */2121 #define CPUID_CHECK_RET(expr, fmt) \2122 do { \2123 if (!(expr)) \2124 { \2125 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \2126 if (fStrictCpuIdChecks) \2127 { \2128 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \2129 RTStrFree(pszMsg); \2130 return rcCpuid; \2131 } \2132 LogRel(("CPUM: %s\n", pszMsg)); \2133 RTStrFree(pszMsg); \2134 } \2135 } while (0)2136 #define CPUID_CHECK_WRN(expr, fmt) \2137 do { \2138 if (!(expr)) \2139 LogRel(fmt); \2140 } while (0)2141 2142 /* For comparing two values and bitch if they differs. */2143 #define CPUID_CHECK2_RET(what, host, saved) \2144 do { \2145 if ((host) != (saved)) \2146 { \2147 if (fStrictCpuIdChecks) \2148 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \2149 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \2150 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \2151 } \2152 } while (0)2153 #define CPUID_CHECK2_WRN(what, host, saved) \2154 do { \2155 if ((host) != (saved)) \2156 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \2157 } while (0)2158 2159 /* For checking raw cpu features (raw mode). */2160 #define CPUID_RAW_FEATURE_RET(set, reg, bit) \2161 do { \2162 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \2163 { \2164 if (fStrictCpuIdChecks) \2165 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \2166 N_(#bit " mismatch: host=%d saved=%d"), \2167 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \2168 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \2169 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \2170 } \2171 } while (0)2172 #define CPUID_RAW_FEATURE_WRN(set, reg, bit) \2173 do { \2174 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \2175 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \2176 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \2177 } while (0)2178 #define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0)2179 2180 /* For checking guest features. */2181 #define CPUID_GST_FEATURE_RET(set, reg, bit) \2182 do { \2183 if ( (aGuestCpuId##set [1].reg & bit) \2184 && !(aHostRaw##set [1].reg & bit) \2185 && !(aHostOverride##set [1].reg & bit) \2186 ) \2187 { \2188 if (fStrictCpuIdChecks) \2189 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \2190 N_(#bit " is not supported by the host but has already exposed to the guest")); \2191 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \2192 } \2193 } while (0)2194 #define CPUID_GST_FEATURE_WRN(set, reg, bit) \2195 do { \2196 if ( (aGuestCpuId##set [1].reg & bit) \2197 && !(aHostRaw##set [1].reg & bit) \2198 && !(aHostOverride##set [1].reg & bit) \2199 ) \2200 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \2201 } while (0)2202 #define CPUID_GST_FEATURE_EMU(set, reg, bit) \2203 do { \2204 if ( (aGuestCpuId##set [1].reg & bit) \2205 && !(aHostRaw##set [1].reg & bit) \2206 && !(aHostOverride##set [1].reg & bit) \2207 ) \2208 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \2209 } while (0)2210 #define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0)2211 2212 /* For checking guest features if AMD guest CPU. */2213 #define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \2214 do { \2215 if ( (aGuestCpuId##set [1].reg & bit) \2216 && fGuestAmd \2217 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \2218 && !(aHostOverride##set [1].reg & bit) \2219 ) \2220 { \2221 if (fStrictCpuIdChecks) \2222 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \2223 N_(#bit " is not supported by the host but has already exposed to the guest")); \2224 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \2225 } \2226 } while (0)2227 #define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \2228 do { \2229 if ( (aGuestCpuId##set [1].reg & bit) \2230 && fGuestAmd \2231 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \2232 && !(aHostOverride##set [1].reg & bit) \2233 ) \2234 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \2235 } while (0)2236 #define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \2237 do { \2238 if ( (aGuestCpuId##set [1].reg & bit) \2239 && fGuestAmd \2240 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \2241 && !(aHostOverride##set [1].reg & bit) \2242 ) \2243 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \2244 } while (0)2245 #define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0)2246 2247 /* For checking AMD features which have a corresponding bit in the standard2248 range. (Intel defines very few bits in the extended feature sets.) */2249 #define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \2250 do { \2251 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \2252 && !(fHostAmd \2253 ? aHostRawExt[1].reg & (ExtBit) \2254 : aHostRawStd[1].reg & (StdBit)) \2255 && !(aHostOverrideExt[1].reg & (ExtBit)) \2256 ) \2257 { \2258 if (fStrictCpuIdChecks) \2259 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \2260 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \2261 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \2262 } \2263 } while (0)2264 #define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \2265 do { \2266 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \2267 && !(fHostAmd \2268 ? aHostRawExt[1].reg & (ExtBit) \2269 : aHostRawStd[1].reg & (StdBit)) \2270 && !(aHostOverrideExt[1].reg & (ExtBit)) \2271 ) \2272 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \2273 } while (0)2274 #define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \2275 do { \2276 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \2277 && !(fHostAmd \2278 ? aHostRawExt[1].reg & (ExtBit) \2279 : aHostRawStd[1].reg & (StdBit)) \2280 && !(aHostOverrideExt[1].reg & (ExtBit)) \2281 ) \2282 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \2283 } while (0)2284 #define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0)2285 2286 /*2287 * Load them into stack buffers first.2288 */2289 PCPUMCPUIDLEAF paLeaves;2290 uint32_t cLeaves;2291 int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves);2292 AssertRCReturn(rc, rc);2293 2294 /** @todo we'll be leaking paLeaves on error return... */2295 2296 CPUMCPUID GuestCpuIdDef;2297 rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef));2298 AssertRCReturn(rc, rc);2299 2300 CPUMCPUID aRawStd[16];2301 uint32_t cRawStd;2302 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc);2303 if (cRawStd > RT_ELEMENTS(aRawStd))2304 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;2305 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0]));2306 AssertRCReturn(rc, rc);2307 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)2308 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);2309 2310 CPUMCPUID aRawExt[32];2311 uint32_t cRawExt;2312 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc);2313 if (cRawExt > RT_ELEMENTS(aRawExt))2314 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;2315 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0]));2316 AssertRCReturn(rc, rc);2317 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++)2318 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);2319 2320 /*2321 * Get the raw CPU IDs for the current host.2322 */2323 CPUMCPUID aHostRawStd[16];2324 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++)2325 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);2326 2327 CPUMCPUID aHostRawExt[32];2328 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++)2329 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0,2330 &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);2331 2332 /*2333 * Get the host and guest overrides so we don't reject the state because2334 * some feature was enabled thru these interfaces.2335 * Note! We currently only need the feature leaves, so skip rest.2336 */2337 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID");2338 CPUMCPUID aHostOverrideStd[2];2339 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd));2340 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg);2341 2342 CPUMCPUID aHostOverrideExt[2];2343 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt));2344 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg);2345 2346 /*2347 * This can be skipped.2348 */2349 bool fStrictCpuIdChecks;2350 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true);2351 2352 2353 2354 /*2355 * For raw-mode we'll require that the CPUs are very similar since we don't2356 * intercept CPUID instructions for user mode applications.2357 */2358 if (!HMIsEnabled(pVM))2359 {2360 /* CPUID(0) */2361 CPUID_CHECK_RET( aHostRawStd[0].ebx == aRawStd[0].ebx2362 && aHostRawStd[0].ecx == aRawStd[0].ecx2363 && aHostRawStd[0].edx == aRawStd[0].edx,2364 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),2365 &aHostRawStd[0].ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx,2366 &aRawStd[0].ebx, &aRawStd[0].edx, &aRawStd[0].ecx));2367 CPUID_CHECK2_WRN("Std CPUID max leaf", aHostRawStd[0].eax, aRawStd[0].eax);2368 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3);2369 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);2370 2371 bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].ebx, aRawStd[0].ecx, aRawStd[0].edx);2372 2373 /* CPUID(1).eax */2374 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawStd[1].eax), ASMGetCpuFamily(aRawStd[1].eax));2375 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawStd[1].eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel));2376 CPUID_CHECK2_WRN("CPU type", (aHostRawStd[1].eax >> 12) & 3, (aRawStd[1].eax >> 12) & 3 );2377 2378 /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */2379 CPUID_CHECK2_RET("CPU brand ID", aHostRawStd[1].ebx & 0xff, aRawStd[1].ebx & 0xff);2380 CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].ebx >> 8) & 0xff, (aRawStd[1].ebx >> 8) & 0xff);2381 2382 /* CPUID(1).ecx */2383 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);2384 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);2385 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);2386 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);2387 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);2388 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);2389 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);2390 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST);2391 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);2392 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);2393 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);2394 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );2395 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);2396 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);2397 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);2398 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);2399 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);2400 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);2401 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);2402 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);2403 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);2404 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);2405 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);2406 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);2407 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);2408 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);2409 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);2410 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE);2411 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);2412 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);2413 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);2414 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);2415 2416 /* CPUID(1).edx */2417 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);2418 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);2419 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);2420 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);2421 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);2422 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);2423 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE);2424 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);2425 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);2426 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);2427 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);2428 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);2429 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);2430 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);2431 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);2432 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);2433 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);2434 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);2435 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);2436 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);2437 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);2438 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS);2439 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);2440 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);2441 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);2442 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);2443 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);2444 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS);2445 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT);2446 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM);2447 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);2448 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE);2449 2450 /* CPUID(2) - config, mostly about caches. ignore. */2451 /* CPUID(3) - processor serial number. ignore. */2452 /* CPUID(4) - config, cache and topology - takes ECX as input. ignore. */2453 /* CPUID(5) - mwait/monitor config. ignore. */2454 /* CPUID(6) - power management. ignore. */2455 /* CPUID(7) - ???. ignore. */2456 /* CPUID(8) - ???. ignore. */2457 /* CPUID(9) - DCA. ignore for now. */2458 /* CPUID(a) - PeMo info. ignore for now. */2459 /* CPUID(b) - topology info - takes ECX as input. ignore. */2460 2461 /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */2462 CPUID_CHECK_WRN( aRawStd[0].eax < UINT32_C(0x0000000d)2463 || aHostRawStd[0].eax >= UINT32_C(0x0000000d),2464 ("CPUM: Standard leaf D was present on saved state host, not present on current.\n"));2465 if ( aRawStd[0].eax >= UINT32_C(0x0000000d)2466 && aHostRawStd[0].eax >= UINT32_C(0x0000000d))2467 {2468 CPUID_CHECK2_WRN("Valid low XCR0 bits", aHostRawStd[0xd].eax, aRawStd[0xd].eax);2469 CPUID_CHECK2_WRN("Valid high XCR0 bits", aHostRawStd[0xd].edx, aRawStd[0xd].edx);2470 CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size", aHostRawStd[0xd].ebx, aRawStd[0xd].ebx);2471 CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size", aHostRawStd[0xd].ecx, aRawStd[0xd].ecx);2472 }2473 2474 /* CPUID(0x80000000) - same as CPUID(0) except for eax.2475 Note! Intel have/is marking many of the fields here as reserved. We2476 will verify them as if it's an AMD CPU. */2477 CPUID_CHECK_RET( (aHostRawExt[0].eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f))2478 || !(aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f)),2479 (N_("Extended leaves was present on saved state host, but is missing on the current\n")));2480 if (aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f))2481 {2482 CPUID_CHECK_RET( aHostRawExt[0].ebx == aRawExt[0].ebx2483 && aHostRawExt[0].ecx == aRawExt[0].ecx2484 && aHostRawExt[0].edx == aRawExt[0].edx,2485 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"),2486 &aHostRawExt[0].ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx,2487 &aRawExt[0].ebx, &aRawExt[0].edx, &aRawExt[0].ecx));2488 CPUID_CHECK2_WRN("Ext CPUID max leaf", aHostRawExt[0].eax, aRawExt[0].eax);2489 2490 /* CPUID(0x80000001).eax - same as CPUID(0).eax. */2491 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawExt[1].eax), ASMGetCpuFamily(aRawExt[1].eax));2492 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawExt[1].eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel));2493 CPUID_CHECK2_WRN("CPU type", (aHostRawExt[1].eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 );2494 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 );2495 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28);2496 2497 /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */2498 CPUID_CHECK2_WRN("CPU BrandID", aHostRawExt[1].ebx & 0xffff, aRawExt[1].ebx & 0xffff);2499 CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff);2500 CPUID_CHECK2_WRN("PkgType", (aHostRawExt[1].ebx >> 28) & 0xf, (aRawExt[1].ebx >> 28) & 0xf);2501 2502 /* CPUID(0x80000001).ecx */2503 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);2504 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);2505 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);2506 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);2507 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);2508 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);2509 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);2510 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);2511 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);2512 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);2513 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);2514 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);2515 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);2516 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);2517 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));2518 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));2519 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));2520 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));2521 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));2522 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));2523 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));2524 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));2525 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));2526 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));2527 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));2528 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));2529 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));2530 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));2531 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));2532 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));2533 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));2534 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));2535 2536 /* CPUID(0x80000001).edx */2537 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU);2538 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME);2539 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE);2540 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE);2541 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC);2542 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR);2543 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE);2544 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE);2545 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8);2546 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);2547 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);2548 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP);2549 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);2550 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);2551 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA);2552 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV);2553 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT);2554 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36);2555 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);2556 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);2557 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);2558 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);2559 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);2560 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX);2561 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);2562 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);2563 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);2564 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);2565 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);2566 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);2567 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);2568 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);2569 2570 /** @todo verify the rest as well. */2571 }2572 }2573 2574 2575 2576 /*2577 * Verify that we can support the features already exposed to the guest on2578 * this host.2579 *2580 * Most of the features we're emulating requires intercepting instruction2581 * and doing it the slow way, so there is no need to warn when they aren't2582 * present in the host CPU. Thus we use IGN instead of EMU on these.2583 *2584 * Trailing comments:2585 * "EMU" - Possible to emulate, could be lots of work and very slow.2586 * "EMU?" - Can this be emulated?2587 */2588 CPUMCPUID aGuestCpuIdStd[2];2589 RT_ZERO(aGuestCpuIdStd);2590 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]);2591 2592 /* CPUID(1).ecx */2593 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU2594 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?2595 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?2596 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);2597 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?2598 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU2599 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU2600 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST); // -> EMU2601 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?2602 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU2603 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU2604 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );2605 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?2606 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?2607 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU2608 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU2609 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);2610 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/);2611 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?2612 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU2613 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU2614 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);2615 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU2616 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU2617 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/);2618 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES); // -> EMU2619 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU2620 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU2621 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?2622 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/);2623 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/);2624 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host2625 2626 /* CPUID(1).edx */2627 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);2628 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);2629 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?2630 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);2631 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU2632 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU2633 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE);2634 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);2635 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?2636 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);2637 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);2638 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);2639 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);2640 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);2641 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);2642 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU2643 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);2644 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);2645 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);2646 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU2647 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);2648 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?2649 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?2650 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU2651 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU2652 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU2653 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU2654 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?2655 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?2656 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?2657 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU2658 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?2659 2660 /* CPUID(0x80000000). */2661 CPUMCPUID aGuestCpuIdExt[2];2662 RT_ZERO(aGuestCpuIdExt);2663 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1]))2664 {2665 /** @todo deal with no 0x80000001 on the host. */2666 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);2667 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx);2668 2669 /* CPUID(0x80000001).ecx */2670 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU2671 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU2672 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU2673 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???2674 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU2675 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU2676 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU2677 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU2678 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU2679 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?2680 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU2681 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5); // -> EMU2682 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU2683 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU2684 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));2685 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));2686 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));2687 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));2688 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));2689 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));2690 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));2691 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));2692 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));2693 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));2694 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));2695 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));2696 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));2697 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));2698 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));2699 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));2700 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));2701 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));2702 2703 /* CPUID(0x80000001).edx */2704 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU2705 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU2706 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU2707 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);2708 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU2709 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU2710 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);2711 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);2712 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?2713 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);2714 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);2715 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.2716 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);2717 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);2718 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);2719 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU2720 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);2721 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);2722 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);2723 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);2724 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);2725 CPUID_GST_FEATURE_WRN( Ext, edx, RT_BIT_32(21) /*reserved*/);2726 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);2727 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU2728 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU2729 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);2730 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);2731 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);2732 CPUID_GST_FEATURE_IGN( Ext, edx, RT_BIT_32(28) /*reserved*/);2733 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);2734 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);2735 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);2736 }2737 2738 /*2739 * We're good, commit the CPU ID leaves.2740 */2741 MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);2742 pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR;2743 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR;2744 pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef;2745 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves);2746 RTMemFree(paLeaves);2747 AssertLogRelRCReturn(rc, rc);2748 2749 2750 #undef CPUID_CHECK_RET2751 #undef CPUID_CHECK_WRN2752 #undef CPUID_CHECK2_RET2753 #undef CPUID_CHECK2_WRN2754 #undef CPUID_RAW_FEATURE_RET2755 #undef CPUID_RAW_FEATURE_WRN2756 #undef CPUID_RAW_FEATURE_IGN2757 #undef CPUID_GST_FEATURE_RET2758 #undef CPUID_GST_FEATURE_WRN2759 #undef CPUID_GST_FEATURE_EMU2760 #undef CPUID_GST_FEATURE_IGN2761 #undef CPUID_GST_FEATURE2_RET2762 #undef CPUID_GST_FEATURE2_WRN2763 #undef CPUID_GST_FEATURE2_EMU2764 #undef CPUID_GST_FEATURE2_IGN2765 #undef CPUID_GST_AMD_FEATURE_RET2766 #undef CPUID_GST_AMD_FEATURE_WRN2767 #undef CPUID_GST_AMD_FEATURE_EMU2768 #undef CPUID_GST_AMD_FEATURE_IGN2769 2770 return VINF_SUCCESS;2771 }2772 925 2773 926 -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r52106 r54561 5 5 6 6 /* 7 * Copyright (C) 2013-201 4Oracle Corporation7 * Copyright (C) 2013-2015 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 21 21 #define LOG_GROUP LOG_GROUP_CPUM 22 22 #include <VBox/vmm/cpum.h> 23 #include <VBox/vmm/hm.h> 24 #include <VBox/vmm/ssm.h> 23 25 #include "CPUMInternal.h" 24 26 #include <VBox/vmm/vm.h> … … 1487 1489 } 1488 1490 1491 1492 /* 1493 * 1494 * Init related code. 1495 * Init related code. 1496 * Init related code. 1497 * 1498 * 1499 */ 1500 #ifdef VBOX_IN_VMM 1501 1502 /** 1503 * Loads MSR range overrides. 1504 * 1505 * This must be called before the MSR ranges are moved from the normal heap to 1506 * the hyper heap! 1507 * 1508 * @returns VBox status code (VMSetError called). 1509 * @param pVM Pointer to the cross context VM structure 1510 * @param pMsrNode The CFGM node with the MSR overrides. 1511 */ 1512 static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode) 1513 { 1514 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode)) 1515 { 1516 /* 1517 * Assemble a valid MSR range. 1518 */ 1519 CPUMMSRRANGE MsrRange; 1520 MsrRange.offCpumCpu = 0; 1521 MsrRange.fReserved = 0; 1522 1523 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName)); 1524 if (RT_FAILURE(rc)) 1525 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc); 1526 1527 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst); 1528 if (RT_FAILURE(rc)) 1529 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n", 1530 MsrRange.szName, rc); 1531 1532 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst); 1533 if (RT_FAILURE(rc)) 1534 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n", 1535 MsrRange.szName, rc); 1536 1537 char szType[32]; 1538 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue"); 1539 if (RT_FAILURE(rc)) 1540 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n", 1541 MsrRange.szName, rc); 1542 if (!RTStrICmp(szType, "FixedValue")) 1543 { 1544 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue; 1545 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite; 1546 1547 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uValue, 0); 1548 if (RT_FAILURE(rc)) 1549 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n", 1550 MsrRange.szName, rc); 1551 1552 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0); 1553 if (RT_FAILURE(rc)) 1554 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n", 1555 MsrRange.szName, rc); 1556 1557 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0); 1558 if (RT_FAILURE(rc)) 1559 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n", 1560 MsrRange.szName, rc); 1561 } 1562 else 1563 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, 1564 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType); 1565 1566 /* 1567 * Insert the range into the table (replaces/splits/shrinks existing 1568 * MSR ranges). 1569 */ 1570 rc = cpumR3MsrRangesInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, 1571 &MsrRange); 1572 if (RT_FAILURE(rc)) 1573 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc); 1574 } 1575 1576 return VINF_SUCCESS; 1577 } 1578 1579 1580 /** 1581 * Loads CPUID leaf overrides. 1582 * 1583 * This must be called before the CPUID leaves are moved from the normal 1584 * heap to the hyper heap! 1585 * 1586 * @returns VBox status code (VMSetError called). 1587 * @param pVM Pointer to the cross context VM structure 1588 * @param pParentNode The CFGM node with the CPUID leaves. 1589 * @param pszLabel How to label the overrides we're loading. 1590 */ 1591 static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel) 1592 { 1593 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode)) 1594 { 1595 /* 1596 * Get the leaf and subleaf numbers. 1597 */ 1598 char szName[128]; 1599 int rc = CFGMR3GetName(pNode, szName, sizeof(szName)); 1600 if (RT_FAILURE(rc)) 1601 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc); 1602 1603 /* The leaf number is either specified directly or thru the node name. */ 1604 uint32_t uLeaf; 1605 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf); 1606 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 1607 { 1608 rc = RTStrToUInt32Full(szName, 16, &uLeaf); 1609 if (rc != VINF_SUCCESS) 1610 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS, 1611 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName); 1612 } 1613 else if (RT_FAILURE(rc)) 1614 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n", 1615 pszLabel, szName, rc); 1616 1617 uint32_t uSubLeaf; 1618 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0); 1619 if (RT_FAILURE(rc)) 1620 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n", 1621 pszLabel, szName, rc); 1622 1623 uint32_t fSubLeafMask; 1624 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0); 1625 if (RT_FAILURE(rc)) 1626 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n", 1627 pszLabel, szName, rc); 1628 1629 /* 1630 * Look up the specified leaf, since the output register values 1631 * defaults to any existing values. This allows overriding a single 1632 * register, without needing to know the other values. 1633 */ 1634 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves, 1635 uLeaf, uSubLeaf); 1636 CPUMCPUIDLEAF Leaf; 1637 if (pLeaf) 1638 Leaf = *pLeaf; 1639 else 1640 RT_ZERO(Leaf); 1641 Leaf.uLeaf = uLeaf; 1642 Leaf.uSubLeaf = uSubLeaf; 1643 Leaf.fSubLeafMask = fSubLeafMask; 1644 1645 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax); 1646 if (RT_FAILURE(rc)) 1647 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n", 1648 pszLabel, szName, rc); 1649 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx); 1650 if (RT_FAILURE(rc)) 1651 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n", 1652 pszLabel, szName, rc); 1653 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx); 1654 if (RT_FAILURE(rc)) 1655 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n", 1656 pszLabel, szName, rc); 1657 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx); 1658 if (RT_FAILURE(rc)) 1659 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n", 1660 pszLabel, szName, rc); 1661 1662 /* 1663 * Insert the leaf into the table (replaces existing ones). 1664 */ 1665 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves, 1666 &Leaf); 1667 if (RT_FAILURE(rc)) 1668 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc); 1669 } 1670 1671 return VINF_SUCCESS; 1672 } 1673 1674 1675 1676 /** 1677 * Fetches overrides for a CPUID leaf. 1678 * 1679 * @returns VBox status code. 1680 * @param pLeaf The leaf to load the overrides into. 1681 * @param pCfgNode The CFGM node containing the overrides 1682 * (/CPUM/HostCPUID/ or /CPUM/CPUID/). 1683 * @param iLeaf The CPUID leaf number. 1684 */ 1685 static int cpumR3CpuIdFetchLeafOverride(PCPUMCPUID pLeaf, PCFGMNODE pCfgNode, uint32_t iLeaf) 1686 { 1687 PCFGMNODE pLeafNode = CFGMR3GetChildF(pCfgNode, "%RX32", iLeaf); 1688 if (pLeafNode) 1689 { 1690 uint32_t u32; 1691 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32); 1692 if (RT_SUCCESS(rc)) 1693 pLeaf->eax = u32; 1694 else 1695 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); 1696 1697 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32); 1698 if (RT_SUCCESS(rc)) 1699 pLeaf->ebx = u32; 1700 else 1701 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); 1702 1703 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32); 1704 if (RT_SUCCESS(rc)) 1705 pLeaf->ecx = u32; 1706 else 1707 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); 1708 1709 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32); 1710 if (RT_SUCCESS(rc)) 1711 pLeaf->edx = u32; 1712 else 1713 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); 1714 1715 } 1716 return VINF_SUCCESS; 1717 } 1718 1719 1720 /** 1721 * Load the overrides for a set of CPUID leaves. 1722 * 1723 * @returns VBox status code. 1724 * @param paLeaves The leaf array. 1725 * @param cLeaves The number of leaves. 1726 * @param uStart The start leaf number. 1727 * @param pCfgNode The CFGM node containing the overrides 1728 * (/CPUM/HostCPUID/ or /CPUM/CPUID/). 1729 */ 1730 static int cpumR3CpuIdInitLoadOverrideSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode) 1731 { 1732 for (uint32_t i = 0; i < cLeaves; i++) 1733 { 1734 int rc = cpumR3CpuIdFetchLeafOverride(&paLeaves[i], pCfgNode, uStart + i); 1735 if (RT_FAILURE(rc)) 1736 return rc; 1737 } 1738 1739 return VINF_SUCCESS; 1740 } 1741 1742 /** 1743 * Init a set of host CPUID leaves. 1744 * 1745 * @returns VBox status code. 1746 * @param paLeaves The leaf array. 1747 * @param cLeaves The number of leaves. 1748 * @param uStart The start leaf number. 1749 * @param pCfgNode The /CPUM/HostCPUID/ node. 1750 */ 1751 static int cpumR3CpuIdInitHostSet(uint32_t uStart, PCPUMCPUID paLeaves, uint32_t cLeaves, PCFGMNODE pCfgNode) 1752 { 1753 /* Using the ECX variant for all of them can't hurt... */ 1754 for (uint32_t i = 0; i < cLeaves; i++) 1755 ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx); 1756 1757 /* Load CPUID leaf override; we currently don't care if the user 1758 specifies features the host CPU doesn't support. */ 1759 return cpumR3CpuIdInitLoadOverrideSet(uStart, paLeaves, cLeaves, pCfgNode); 1760 } 1761 1762 1763 static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves) 1764 { 1765 /* 1766 * Install the CPUID information. 1767 */ 1768 int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32, 1769 MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3); 1770 1771 AssertLogRelRCReturn(rc, rc); 1772 1773 1774 pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3); 1775 pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3); 1776 Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3); 1777 Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3); 1778 1779 /* 1780 * Explode the guest CPU features. 1781 */ 1782 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures); 1783 AssertLogRelRCReturn(rc, rc); 1784 1785 /* 1786 * Adjust the scalable bus frequency according to the CPUID information 1787 * we're now using. 1788 */ 1789 if (CPUMMICROARCH_IS_INTEL_CORE7(pVM->cpum.s.GuestFeatures.enmMicroarch)) 1790 pCPUM->GuestInfo.uScalableBusFreq = pCPUM->GuestFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_SandyBridge 1791 ? UINT64_C(100000000) /* 100MHz */ 1792 : UINT64_C(133333333); /* 133MHz */ 1793 1794 /* 1795 * Populate the legacy arrays. Currently used for everything, later only 1796 * for patch manager. 1797 */ 1798 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] = 1799 { 1800 { pCPUM->aGuestCpuIdStd, RT_ELEMENTS(pCPUM->aGuestCpuIdStd), 0x00000000 }, 1801 { pCPUM->aGuestCpuIdExt, RT_ELEMENTS(pCPUM->aGuestCpuIdExt), 0x80000000 }, 1802 { pCPUM->aGuestCpuIdCentaur, RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 }, 1803 }; 1804 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++) 1805 { 1806 uint32_t cLeft = aOldRanges[i].cCpuIds; 1807 uint32_t uLeaf = aOldRanges[i].uBase + cLeft; 1808 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft]; 1809 while (cLeft-- > 0) 1810 { 1811 uLeaf--; 1812 pLegacyLeaf--; 1813 1814 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf, 1815 0 /* uSubLeaf */); 1816 if (pLeaf) 1817 { 1818 pLegacyLeaf->eax = pLeaf->uEax; 1819 pLegacyLeaf->ebx = pLeaf->uEbx; 1820 pLegacyLeaf->ecx = pLeaf->uEcx; 1821 pLegacyLeaf->edx = pLeaf->uEdx; 1822 } 1823 else 1824 *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId; 1825 } 1826 } 1827 1828 pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId; 1829 1830 return VINF_SUCCESS; 1831 } 1832 1833 1834 /** 1835 * Initializes the emulated CPU's cpuid information. 1836 * 1837 * @returns VBox status code. 1838 * @param pVM Pointer to the VM. 1839 */ 1840 int cpumR3CpuIdInit(PVM pVM) 1841 { 1842 PCPUM pCPUM = &pVM->cpum.s; 1843 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"); 1844 int rc; 1845 1846 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \ 1847 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \ 1848 { \ 1849 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \ 1850 (a_pLeafReg) &= ~(uint32_t)(fMask); \ 1851 } 1852 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \ 1853 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \ 1854 { \ 1855 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \ 1856 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \ 1857 } 1858 1859 /* 1860 * Read the configuration. 1861 */ 1862 /** @cfgm{/CPUM/SyntheticCpu, boolean, false} 1863 * Enables the Synthetic CPU. The Vendor ID and Processor Name are 1864 * completely overridden by VirtualBox custom strings. Some 1865 * CPUID information is withheld, like the cache info. 1866 * 1867 * This is obsoleted by PortableCpuIdLevel. */ 1868 bool fSyntheticCpu; 1869 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &fSyntheticCpu, false); 1870 AssertRCReturn(rc, rc); 1871 1872 /** @cfgm{/CPUM/PortableCpuIdLevel, 8-bit, 0, 3, 0} 1873 * When non-zero CPUID features that could cause portability issues will be 1874 * stripped. The higher the value the more features gets stripped. Higher 1875 * values should only be used when older CPUs are involved since it may 1876 * harm performance and maybe also cause problems with specific guests. */ 1877 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0); 1878 AssertLogRelRCReturn(rc, rc); 1879 1880 /** @cfgm{/CPUM/GuestCpuName, string} 1881 * The name of the CPU we're to emulate. The default is the host CPU. 1882 * Note! CPUs other than "host" one is currently unsupported. */ 1883 char szCpuName[128]; 1884 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host"); 1885 AssertLogRelRCReturn(rc, rc); 1886 1887 /** @cfgm{/CPUM/CMPXCHG16B, boolean, false} 1888 * Expose CMPXCHG16B to the guest if supported by the host. 1889 */ 1890 bool fCmpXchg16b; 1891 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); 1892 AssertLogRelRCReturn(rc, rc); 1893 1894 /** @cfgm{/CPUM/MONITOR, boolean, true} 1895 * Expose MONITOR/MWAIT instructions to the guest. 1896 */ 1897 bool fMonitor; 1898 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); 1899 AssertLogRelRCReturn(rc, rc); 1900 1901 /** @cfgm{/CPUM/MWaitExtensions, boolean, false} 1902 * Expose MWAIT extended features to the guest. For now we expose just MWAIT 1903 * break on interrupt feature (bit 1). 1904 */ 1905 bool fMWaitExtensions; 1906 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); 1907 AssertLogRelRCReturn(rc, rc); 1908 1909 /** @cfgm{/CPUM/SSE4.1, boolean, true} 1910 * Expose SSE4.1 to the guest if available. 1911 */ 1912 bool fSse41; 1913 rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.1", &fSse41, true); 1914 AssertLogRelRCReturn(rc, rc); 1915 1916 /** @cfgm{/CPUM/SSE4.2, boolean, true} 1917 * Expose SSE4.2 to the guest if available. 1918 */ 1919 bool fSse42; 1920 rc = CFGMR3QueryBoolDef(pCpumCfg, "SSE4.2", &fSse42, true); 1921 AssertLogRelRCReturn(rc, rc); 1922 1923 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false} 1924 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from 1925 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e). 1926 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22]. 1927 */ 1928 bool fNt4LeafLimit; 1929 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); 1930 AssertLogRelRCReturn(rc, rc); 1931 1932 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX} 1933 * Restrict the reported CPU family+model+stepping of intel CPUs. This is 1934 * probably going to be a temporary hack, so don't depend on this. 1935 * The 1st byte of the value is the stepping, the 2nd byte value is the model 1936 * number and the 3rd byte value is the family, and the 4th value must be zero. 1937 */ 1938 uint32_t uMaxIntelFamilyModelStep; 1939 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX); 1940 AssertLogRelRCReturn(rc, rc); 1941 1942 /* 1943 * Get the guest CPU data from the database and/or the host. 1944 */ 1945 rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo); 1946 if (RT_FAILURE(rc)) 1947 return rc == VERR_CPUM_DB_CPU_NOT_FOUND 1948 ? VMSetError(pVM, rc, RT_SRC_POS, 1949 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName) 1950 : rc; 1951 1952 /** @cfgm{/CPUM/MSRs/[Name]/[First|Last|Type|Value|...],} 1953 * Overrides the guest MSRs. 1954 */ 1955 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs")); 1956 1957 /** @cfgm{/CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 1958 * Overrides the CPUID leaf values (from the host CPU usually) used for 1959 * calculating the guest CPUID leaves. This can be used to preserve the CPUID 1960 * values when moving a VM to a different machine. Another use is restricting 1961 * (or extending) the feature set exposed to the guest. */ 1962 if (RT_SUCCESS(rc)) 1963 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID"); 1964 1965 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */ 1966 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS, 1967 "Found unsupported configuration node '/CPUM/CPUID/'. " 1968 "Please use IMachine::setCPUIDLeaf() instead."); 1969 1970 /* 1971 * Pre-explode the CPUID info. 1972 */ 1973 if (RT_SUCCESS(rc)) 1974 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures); 1975 if (RT_FAILURE(rc)) 1976 { 1977 RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3); 1978 pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL; 1979 RTMemFree(pCPUM->GuestInfo.paMsrRangesR3); 1980 pCPUM->GuestInfo.paMsrRangesR3 = NULL; 1981 return rc; 1982 } 1983 1984 1985 /* ... split this function about here ... */ 1986 1987 1988 /* Cpuid 1: 1989 * Only report features we can support. 1990 * 1991 * Note! When enabling new features the Synthetic CPU and Portable CPUID 1992 * options may require adjusting (i.e. stripping what was enabled). 1993 */ 1994 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1995 1, 0); /* Note! Must refetch when used later. */ 1996 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2); 1997 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU 1998 | X86_CPUID_FEATURE_EDX_VME 1999 | X86_CPUID_FEATURE_EDX_DE 2000 | X86_CPUID_FEATURE_EDX_PSE 2001 | X86_CPUID_FEATURE_EDX_TSC 2002 | X86_CPUID_FEATURE_EDX_MSR 2003 //| X86_CPUID_FEATURE_EDX_PAE - set later if configured. 2004 | X86_CPUID_FEATURE_EDX_MCE 2005 | X86_CPUID_FEATURE_EDX_CX8 2006 //| X86_CPUID_FEATURE_EDX_APIC - set by the APIC device if present. 2007 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */ 2008 //| X86_CPUID_FEATURE_EDX_SEP 2009 | X86_CPUID_FEATURE_EDX_MTRR 2010 | X86_CPUID_FEATURE_EDX_PGE 2011 | X86_CPUID_FEATURE_EDX_MCA 2012 | X86_CPUID_FEATURE_EDX_CMOV 2013 | X86_CPUID_FEATURE_EDX_PAT 2014 | X86_CPUID_FEATURE_EDX_PSE36 2015 //| X86_CPUID_FEATURE_EDX_PSN - no serial number. 2016 | X86_CPUID_FEATURE_EDX_CLFSH 2017 //| X86_CPUID_FEATURE_EDX_DS - no debug store. 2018 //| X86_CPUID_FEATURE_EDX_ACPI - not virtualized yet. 2019 | X86_CPUID_FEATURE_EDX_MMX 2020 | X86_CPUID_FEATURE_EDX_FXSR 2021 | X86_CPUID_FEATURE_EDX_SSE 2022 | X86_CPUID_FEATURE_EDX_SSE2 2023 //| X86_CPUID_FEATURE_EDX_SS - no self snoop. 2024 //| X86_CPUID_FEATURE_EDX_HTT - no hyperthreading. 2025 //| X86_CPUID_FEATURE_EDX_TM - no thermal monitor. 2026 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled. 2027 | 0; 2028 pStdFeatureLeaf->uEcx &= 0 2029 | X86_CPUID_FEATURE_ECX_SSE3 2030 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */ 2031 | ((fMonitor && pVM->cCpus == 1) ? X86_CPUID_FEATURE_ECX_MONITOR : 0) 2032 //| X86_CPUID_FEATURE_ECX_CPLDS - no CPL qualified debug store. 2033 //| X86_CPUID_FEATURE_ECX_VMX - not virtualized. 2034 //| X86_CPUID_FEATURE_ECX_EST - no extended speed step. 2035 //| X86_CPUID_FEATURE_ECX_TM2 - no thermal monitor 2. 2036 | X86_CPUID_FEATURE_ECX_SSSE3 2037 //| X86_CPUID_FEATURE_ECX_CNTXID - no L1 context id (MSR++). 2038 | (fCmpXchg16b ? X86_CPUID_FEATURE_ECX_CX16 : 0) 2039 /* ECX Bit 14 - xTPR Update Control. Processor supports changing IA32_MISC_ENABLES[bit 23]. */ 2040 //| X86_CPUID_FEATURE_ECX_TPRUPDATE 2041 | (fSse41 ? X86_CPUID_FEATURE_ECX_SSE4_1 : 0) 2042 | (fSse42 ? X86_CPUID_FEATURE_ECX_SSE4_2 : 0) 2043 /* ECX Bit 21 - x2APIC support - not yet. */ 2044 // | X86_CPUID_FEATURE_ECX_X2APIC 2045 /* ECX Bit 23 - POPCNT instruction. */ 2046 //| X86_CPUID_FEATURE_ECX_POPCNT 2047 | 0; 2048 if (pCPUM->u8PortableCpuIdLevel > 0) 2049 { 2050 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12)); 2051 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3); 2052 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3); 2053 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_1, X86_CPUID_FEATURE_ECX_SSE4_1); 2054 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE4_2, X86_CPUID_FEATURE_ECX_SSE4_2); 2055 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16); 2056 PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2); 2057 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE); 2058 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH); 2059 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV); 2060 2061 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP 2062 | X86_CPUID_FEATURE_EDX_PSN 2063 | X86_CPUID_FEATURE_EDX_DS 2064 | X86_CPUID_FEATURE_EDX_ACPI 2065 | X86_CPUID_FEATURE_EDX_SS 2066 | X86_CPUID_FEATURE_EDX_TM 2067 | X86_CPUID_FEATURE_EDX_PBE 2068 ))); 2069 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_PCLMUL 2070 | X86_CPUID_FEATURE_ECX_DTES64 2071 | X86_CPUID_FEATURE_ECX_CPLDS 2072 | X86_CPUID_FEATURE_ECX_VMX 2073 | X86_CPUID_FEATURE_ECX_SMX 2074 | X86_CPUID_FEATURE_ECX_EST 2075 | X86_CPUID_FEATURE_ECX_TM2 2076 | X86_CPUID_FEATURE_ECX_CNTXID 2077 | X86_CPUID_FEATURE_ECX_FMA 2078 | X86_CPUID_FEATURE_ECX_CX16 2079 | X86_CPUID_FEATURE_ECX_TPRUPDATE 2080 | X86_CPUID_FEATURE_ECX_PDCM 2081 | X86_CPUID_FEATURE_ECX_DCA 2082 | X86_CPUID_FEATURE_ECX_MOVBE 2083 | X86_CPUID_FEATURE_ECX_AES 2084 | X86_CPUID_FEATURE_ECX_POPCNT 2085 | X86_CPUID_FEATURE_ECX_XSAVE 2086 | X86_CPUID_FEATURE_ECX_OSXSAVE 2087 | X86_CPUID_FEATURE_ECX_AVX 2088 ))); 2089 } 2090 2091 /* Cpuid 0x80000001: 2092 * Only report features we can support. 2093 * 2094 * Note! When enabling new features the Synthetic CPU and Portable CPUID 2095 * options may require adjusting (i.e. stripping what was enabled). 2096 * 2097 * ASSUMES that this is ALWAYS the AMD defined feature set if present. 2098 */ 2099 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2100 UINT32_C(0x80000001), 0); /* Note! Must refetch when used later. */ 2101 if (pExtFeatureLeaf) 2102 { 2103 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU 2104 | X86_CPUID_AMD_FEATURE_EDX_VME 2105 | X86_CPUID_AMD_FEATURE_EDX_DE 2106 | X86_CPUID_AMD_FEATURE_EDX_PSE 2107 | X86_CPUID_AMD_FEATURE_EDX_TSC 2108 | X86_CPUID_AMD_FEATURE_EDX_MSR //?? this means AMD MSRs.. 2109 //| X86_CPUID_AMD_FEATURE_EDX_PAE - not implemented yet. 2110 //| X86_CPUID_AMD_FEATURE_EDX_MCE - not virtualized yet. 2111 | X86_CPUID_AMD_FEATURE_EDX_CX8 2112 //| X86_CPUID_AMD_FEATURE_EDX_APIC - set by the APIC device if present. 2113 /* Note! we don't report sysenter/sysexit support due to our inability to keep the IOPL part of eflags in sync while in ring 1 (see @bugref{1757}) */ 2114 //| X86_CPUID_EXT_FEATURE_EDX_SEP 2115 | X86_CPUID_AMD_FEATURE_EDX_MTRR 2116 | X86_CPUID_AMD_FEATURE_EDX_PGE 2117 | X86_CPUID_AMD_FEATURE_EDX_MCA 2118 | X86_CPUID_AMD_FEATURE_EDX_CMOV 2119 | X86_CPUID_AMD_FEATURE_EDX_PAT 2120 | X86_CPUID_AMD_FEATURE_EDX_PSE36 2121 //| X86_CPUID_EXT_FEATURE_EDX_NX - not virtualized, requires PAE. 2122 //| X86_CPUID_AMD_FEATURE_EDX_AXMMX 2123 | X86_CPUID_AMD_FEATURE_EDX_MMX 2124 | X86_CPUID_AMD_FEATURE_EDX_FXSR 2125 | X86_CPUID_AMD_FEATURE_EDX_FFXSR 2126 //| X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 2127 | X86_CPUID_EXT_FEATURE_EDX_RDTSCP 2128 //| X86_CPUID_EXT_FEATURE_EDX_LONG_MODE - turned on when necessary 2129 | X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX 2130 | X86_CPUID_AMD_FEATURE_EDX_3DNOW 2131 | 0; 2132 pExtFeatureLeaf->uEcx &= 0 2133 //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF 2134 //| X86_CPUID_AMD_FEATURE_ECX_CMPL 2135 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized. 2136 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC 2137 /* Note: This could prevent teleporting from AMD to Intel CPUs! */ 2138 | X86_CPUID_AMD_FEATURE_ECX_CR8L /* expose lock mov cr0 = mov cr8 hack for guests that can use this feature to access the TPR. */ 2139 //| X86_CPUID_AMD_FEATURE_ECX_ABM 2140 //| X86_CPUID_AMD_FEATURE_ECX_SSE4A 2141 //| X86_CPUID_AMD_FEATURE_ECX_MISALNSSE 2142 //| X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF 2143 //| X86_CPUID_AMD_FEATURE_ECX_OSVW 2144 //| X86_CPUID_AMD_FEATURE_ECX_IBS 2145 //| X86_CPUID_AMD_FEATURE_ECX_SSE5 2146 //| X86_CPUID_AMD_FEATURE_ECX_SKINIT 2147 //| X86_CPUID_AMD_FEATURE_ECX_WDT 2148 | 0; 2149 if (pCPUM->u8PortableCpuIdLevel > 0) 2150 { 2151 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L); 2152 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 2153 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 2154 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 2155 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 2156 PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 2157 PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV); 2158 2159 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL 2160 | X86_CPUID_AMD_FEATURE_ECX_SVM 2161 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC 2162 | X86_CPUID_AMD_FEATURE_ECX_CR8L 2163 | X86_CPUID_AMD_FEATURE_ECX_ABM 2164 | X86_CPUID_AMD_FEATURE_ECX_SSE4A 2165 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE 2166 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF 2167 | X86_CPUID_AMD_FEATURE_ECX_OSVW 2168 | X86_CPUID_AMD_FEATURE_ECX_IBS 2169 | X86_CPUID_AMD_FEATURE_ECX_SSE5 2170 | X86_CPUID_AMD_FEATURE_ECX_SKINIT 2171 | X86_CPUID_AMD_FEATURE_ECX_WDT 2172 | UINT32_C(0xffffc000) 2173 ))); 2174 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10) 2175 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL 2176 | RT_BIT(18) 2177 | RT_BIT(19) 2178 | RT_BIT(21) 2179 | X86_CPUID_AMD_FEATURE_EDX_AXMMX 2180 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 2181 | RT_BIT(28) 2182 ))); 2183 } 2184 } 2185 2186 /* 2187 * Hide HTT, multicode, SMP, whatever. 2188 * (APIC-ID := 0 and #LogCpus := 0) 2189 */ 2190 pStdFeatureLeaf->uEbx &= 0x0000ffff; 2191 #ifdef VBOX_WITH_MULTI_CORE 2192 if (pVM->cCpus > 1) 2193 { 2194 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */ 2195 pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16); 2196 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */ 2197 } 2198 #endif 2199 2200 /* Cpuid 2: 2201 * Intel: Cache and TLB information 2202 * AMD: Reserved 2203 * VIA: Reserved 2204 * Safe to expose; restrict the number of calls to 1 for the portable case. 2205 */ 2206 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0); 2207 if ( pCPUM->u8PortableCpuIdLevel > 0 2208 && pCurLeaf 2209 && (pCurLeaf->uEax & 0xff) > 1) 2210 { 2211 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff)); 2212 pCurLeaf->uEax &= UINT32_C(0xfffffffe); 2213 } 2214 2215 /* Cpuid 3: 2216 * Intel: EAX, EBX - reserved (transmeta uses these) 2217 * ECX, EDX - Processor Serial Number if available, otherwise reserved 2218 * AMD: Reserved 2219 * VIA: Reserved 2220 * Safe to expose 2221 */ 2222 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0); 2223 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0); 2224 if ( !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN) 2225 && pCurLeaf) 2226 { 2227 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 2228 if (pCPUM->u8PortableCpuIdLevel > 0) 2229 pCurLeaf->uEax = pCurLeaf->uEbx = 0; 2230 } 2231 2232 /* Cpuid 4: 2233 * Intel: Deterministic Cache Parameters Leaf 2234 * Note: Depends on the ECX input! -> Feeling rather lazy now, so we just return 0 2235 * AMD: Reserved 2236 * VIA: Reserved 2237 * Safe to expose, except for EAX: 2238 * Bits 25-14: Maximum number of addressable IDs for logical processors sharing this cache (see note)** 2239 * Bits 31-26: Maximum number of processor cores in this physical package** 2240 * Note: These SMP values are constant regardless of ECX 2241 */ 2242 CPUMCPUIDLEAF NewLeaf; 2243 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0); 2244 if (pCurLeaf) 2245 { 2246 NewLeaf.uLeaf = 4; 2247 NewLeaf.uSubLeaf = 0; 2248 NewLeaf.fSubLeafMask = 0; 2249 NewLeaf.uEax = 0; 2250 NewLeaf.uEbx = 0; 2251 NewLeaf.uEcx = 0; 2252 NewLeaf.uEdx = 0; 2253 NewLeaf.fFlags = 0; 2254 #ifdef VBOX_WITH_MULTI_CORE 2255 if ( pVM->cCpus > 1 2256 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 2257 { 2258 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS); 2259 /* One logical processor with possibly multiple cores. */ 2260 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */ 2261 NewLeaf.uEax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */ 2262 } 2263 #endif 2264 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 2265 AssertLogRelRCReturn(rc, rc); 2266 } 2267 2268 /* Cpuid 5: Monitor/mwait Leaf 2269 * Intel: ECX, EDX - reserved 2270 * EAX, EBX - Smallest and largest monitor line size 2271 * AMD: EDX - reserved 2272 * EAX, EBX - Smallest and largest monitor line size 2273 * ECX - extensions (ignored for now) 2274 * VIA: Reserved 2275 * Safe to expose 2276 */ 2277 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 5, 0); 2278 if (pCurLeaf) 2279 { 2280 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0); 2281 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR)) 2282 pCurLeaf->uEax = pCurLeaf->uEbx = 0; 2283 2284 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 2285 if (fMWaitExtensions) 2286 { 2287 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0; 2288 /** @todo: for now we just expose host's MWAIT C-states, although conceptually 2289 it shall be part of our power management virtualization model */ 2290 #if 0 2291 /* MWAIT sub C-states */ 2292 pCurLeaf->uEdx = 2293 (0 << 0) /* 0 in C0 */ | 2294 (2 << 4) /* 2 in C1 */ | 2295 (2 << 8) /* 2 in C2 */ | 2296 (2 << 12) /* 2 in C3 */ | 2297 (0 << 16) /* 0 in C4 */ 2298 ; 2299 #endif 2300 } 2301 else 2302 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 2303 } 2304 2305 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers. 2306 * Safe to pass on to the guest. 2307 * 2308 * Intel: 0x800000005 reserved 2309 * 0x800000006 L2 cache information 2310 * AMD: 0x800000005 L1 cache information 2311 * 0x800000006 L2/L3 cache information 2312 * VIA: 0x800000005 TLB and L1 cache information 2313 * 0x800000006 L2 cache information 2314 */ 2315 2316 /* Cpuid 0x800000007: 2317 * Intel: Reserved 2318 * AMD: EAX, EBX, ECX - reserved 2319 * EDX: Advanced Power Management Information 2320 * VIA: Reserved 2321 */ 2322 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0); 2323 if (pCurLeaf) 2324 { 2325 Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID); 2326 2327 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0; 2328 2329 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2330 { 2331 /* Only expose the TSC invariant capability bit to the guest. */ 2332 pCurLeaf->uEdx &= 0 2333 //| X86_CPUID_AMD_ADVPOWER_EDX_TS 2334 //| X86_CPUID_AMD_ADVPOWER_EDX_FID 2335 //| X86_CPUID_AMD_ADVPOWER_EDX_VID 2336 //| X86_CPUID_AMD_ADVPOWER_EDX_TTP 2337 //| X86_CPUID_AMD_ADVPOWER_EDX_TM 2338 //| X86_CPUID_AMD_ADVPOWER_EDX_STC 2339 //| X86_CPUID_AMD_ADVPOWER_EDX_MC 2340 //| X86_CPUID_AMD_ADVPOWER_EDX_HWPSTATE 2341 #if 0 2342 /* 2343 * We don't expose X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR, because newer 2344 * Linux kernels blindly assume that the AMD performance counters work 2345 * if this is set for 64 bits guests. (Can't really find a CPUID feature 2346 * bit for them though.) 2347 */ 2348 | X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR 2349 #endif 2350 | 0; 2351 } 2352 else 2353 pCurLeaf->uEdx = 0; 2354 } 2355 2356 /* Cpuid 0x800000008: 2357 * Intel: EAX: Virtual/Physical address Size 2358 * EBX, ECX, EDX - reserved 2359 * AMD: EBX, EDX - reserved 2360 * EAX: Virtual/Physical/Guest address Size 2361 * ECX: Number of cores + APICIdCoreIdSize 2362 * VIA: EAX: Virtual/Physical address Size 2363 * EBX, ECX, EDX - reserved 2364 */ 2365 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0); 2366 if (pCurLeaf) 2367 { 2368 /* Only expose the virtual and physical address sizes to the guest. */ 2369 pCurLeaf->uEax &= UINT32_C(0x0000ffff); 2370 pCurLeaf->uEbx = pCurLeaf->uEdx = 0; /* reserved */ 2371 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu) 2372 * NC (0-7) Number of cores; 0 equals 1 core */ 2373 pCurLeaf->uEcx = 0; 2374 #ifdef VBOX_WITH_MULTI_CORE 2375 if ( pVM->cCpus > 1 2376 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2377 { 2378 /* Legacy method to determine the number of cores. */ 2379 pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */ 2380 pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2381 UINT32_C(0x80000001), 0); 2382 if (pExtFeatureLeaf) 2383 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; 2384 } 2385 #endif 2386 } 2387 2388 2389 /* 2390 * Limit it the number of entries, zapping the remainder. 2391 * 2392 * The limits are masking off stuff about power saving and similar, this 2393 * is perhaps a bit crudely done as there is probably some relatively harmless 2394 * info too in these leaves (like words about having a constant TSC). 2395 */ 2396 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0); 2397 if (pCurLeaf) 2398 { 2399 if (pCurLeaf->uEax > 5) 2400 { 2401 pCurLeaf->uEax = 5; 2402 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 2403 pCurLeaf->uEax + 1, UINT32_C(0x000fffff)); 2404 } 2405 2406 /* NT4 hack, no zapping of extra leaves here. */ 2407 if (fNt4LeafLimit && pCurLeaf->uEax > 3) 2408 pCurLeaf->uEax = 3; 2409 } 2410 2411 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0); 2412 if (pCurLeaf) 2413 { 2414 if (pCurLeaf->uEax > UINT32_C(0x80000008)) 2415 { 2416 pCurLeaf->uEax = UINT32_C(0x80000008); 2417 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 2418 pCurLeaf->uEax + 1, UINT32_C(0x800fffff)); 2419 } 2420 } 2421 2422 /* 2423 * Centaur stuff (VIA). 2424 * 2425 * The important part here (we think) is to make sure the 0xc0000000 2426 * function returns 0xc0000001. As for the features, we don't currently 2427 * let on about any of those... 0xc0000002 seems to be some 2428 * temperature/hz/++ stuff, include it as well (static). 2429 */ 2430 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0); 2431 if (pCurLeaf) 2432 { 2433 if ( pCurLeaf->uEax >= UINT32_C(0xc0000000) 2434 && pCurLeaf->uEax <= UINT32_C(0xc0000004)) 2435 { 2436 pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002)); 2437 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 2438 UINT32_C(0xc0000002), UINT32_C(0xc00fffff)); 2439 2440 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2441 UINT32_C(0xc0000001), 0); 2442 if (pCurLeaf) 2443 pCurLeaf->uEdx = 0; /* all features hidden */ 2444 } 2445 else 2446 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 2447 UINT32_C(0xc0000000), UINT32_C(0xc00fffff)); 2448 } 2449 2450 /* 2451 * Hypervisor identification. 2452 * 2453 * We only return minimal information, primarily ensuring that the 2454 * 0x40000000 function returns 0x40000001 and identifying ourselves. 2455 * Hypervisor-specific interface is supported through GIM which will 2456 * modify these leaves if required depending on the GIM provider. 2457 */ 2458 NewLeaf.uLeaf = UINT32_C(0x40000000); 2459 NewLeaf.uSubLeaf = 0; 2460 NewLeaf.fSubLeafMask = 0; 2461 NewLeaf.uEax = UINT32_C(0x40000001); 2462 NewLeaf.uEbx = 0x786f4256 /* 'VBox' */; 2463 NewLeaf.uEcx = 0x786f4256 /* 'VBox' */; 2464 NewLeaf.uEdx = 0x786f4256 /* 'VBox' */; 2465 NewLeaf.fFlags = 0; 2466 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 2467 AssertLogRelRCReturn(rc, rc); 2468 2469 NewLeaf.uLeaf = UINT32_C(0x40000001); 2470 NewLeaf.uEax = 0x656e6f6e; /* 'none' */ 2471 NewLeaf.uEbx = 0; 2472 NewLeaf.uEcx = 0; 2473 NewLeaf.uEdx = 0; 2474 NewLeaf.fFlags = 0; 2475 rc = cpumR3CpuIdInsert(NULL /* pVM */, &pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 2476 AssertLogRelRCReturn(rc, rc); 2477 2478 /* 2479 * Mini CPU selection support for making Mac OS X happy. 2480 */ 2481 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 2482 { 2483 pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0); 2484 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax), 2485 ASMGetCpuModelIntel(pStdFeatureLeaf->uEax), 2486 ASMGetCpuFamily(pStdFeatureLeaf->uEax), 2487 0); 2488 if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep) 2489 { 2490 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000); 2491 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */ 2492 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */ 2493 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) >> 4) << 16; /* 4 high model bits */ 2494 uNew |= (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf) << 8; /* 4 low family bits */ 2495 if (RT_BYTE3(uMaxIntelFamilyModelStep) > 0xf) /* 8 high family bits, using intel's suggested calculation. */ 2496 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20; 2497 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n", 2498 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep)); 2499 pStdFeatureLeaf->uEax = uNew; 2500 } 2501 } 2502 2503 /* 2504 * MSR fudging. 2505 */ 2506 /** @cfgm{/CPUM/FudgeMSRs, boolean, true} 2507 * Fudges some common MSRs if not present in the selected CPU database entry. 2508 * This is for trying to keep VMs running when moved between different hosts 2509 * and different CPU vendors. */ 2510 bool fEnable; 2511 rc = CFGMR3QueryBoolDef(pCpumCfg, "FudgeMSRs", &fEnable, true); AssertRCReturn(rc, rc); 2512 if (fEnable) 2513 { 2514 rc = cpumR3MsrApplyFudge(pVM); 2515 AssertLogRelRCReturn(rc, rc); 2516 } 2517 2518 /* 2519 * Move the MSR and CPUID arrays over on the hypervisor heap, and explode 2520 * guest CPU features again. 2521 */ 2522 void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3; 2523 int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves); 2524 RTMemFree(pvFree); 2525 2526 pvFree = pCPUM->GuestInfo.paMsrRangesR3; 2527 int rc2 = MMHyperDupMem(pVM, pvFree, 2528 sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32, 2529 MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3); 2530 RTMemFree(pvFree); 2531 AssertLogRelRCReturn(rc1, rc1); 2532 AssertLogRelRCReturn(rc2, rc2); 2533 2534 pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3); 2535 pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3); 2536 cpumR3MsrRegStats(pVM); 2537 2538 /* 2539 * Some more configuration that we're applying at the end of everything 2540 * via the CPUMSetGuestCpuIdFeature API. 2541 */ 2542 2543 /* Check if PAE was explicitely enabled by the user. */ 2544 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc); 2545 if (fEnable) 2546 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE); 2547 2548 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */ 2549 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc); 2550 if (fEnable) 2551 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 2552 2553 /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */ 2554 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc); 2555 if (fEnable) 2556 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP); 2557 2558 #undef PORTABLE_DISABLE_FEATURE_BIT 2559 #undef PORTABLE_CLEAR_BITS_WHEN 2560 2561 return VINF_SUCCESS; 2562 } 2563 2564 2565 2566 /* 2567 * 2568 * 2569 * Saved state related code. 2570 * Saved state related code. 2571 * Saved state related code. 2572 * 2573 * 2574 */ 2575 2576 /** 2577 * Called both in pass 0 and the final pass. 2578 * 2579 * @param pVM Pointer to the VM. 2580 * @param pSSM The saved state handle. 2581 */ 2582 void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM) 2583 { 2584 /* 2585 * Save all the CPU ID leaves here so we can check them for compatibility 2586 * upon loading. 2587 */ 2588 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)); 2589 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdStd[0], sizeof(pVM->cpum.s.aGuestCpuIdStd)); 2590 2591 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)); 2592 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdExt[0], sizeof(pVM->cpum.s.aGuestCpuIdExt)); 2593 2594 SSMR3PutU32(pSSM, RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)); 2595 SSMR3PutMem(pSSM, &pVM->cpum.s.aGuestCpuIdCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdCentaur)); 2596 2597 SSMR3PutMem(pSSM, &pVM->cpum.s.GuestCpuIdDef, sizeof(pVM->cpum.s.GuestCpuIdDef)); 2598 2599 /* 2600 * Save a good portion of the raw CPU IDs as well as they may come in 2601 * handy when validating features for raw mode. 2602 */ 2603 CPUMCPUID aRawStd[16]; 2604 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++) 2605 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx); 2606 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd)); 2607 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd)); 2608 2609 CPUMCPUID aRawExt[32]; 2610 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++) 2611 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx); 2612 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt)); 2613 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt)); 2614 } 2615 2616 2617 static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves) 2618 { 2619 uint32_t cCpuIds; 2620 int rc = SSMR3GetU32(pSSM, &cCpuIds); 2621 if (RT_SUCCESS(rc)) 2622 { 2623 if (cCpuIds < 64) 2624 { 2625 for (uint32_t i = 0; i < cCpuIds; i++) 2626 { 2627 CPUMCPUID CpuId; 2628 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId)); 2629 if (RT_FAILURE(rc)) 2630 break; 2631 2632 CPUMCPUIDLEAF NewLeaf; 2633 NewLeaf.uLeaf = uBase + i; 2634 NewLeaf.uSubLeaf = 0; 2635 NewLeaf.fSubLeafMask = 0; 2636 NewLeaf.uEax = CpuId.eax; 2637 NewLeaf.uEbx = CpuId.ebx; 2638 NewLeaf.uEcx = CpuId.ecx; 2639 NewLeaf.uEdx = CpuId.edx; 2640 NewLeaf.fFlags = 0; 2641 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf); 2642 } 2643 } 2644 else 2645 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 2646 } 2647 if (RT_FAILURE(rc)) 2648 { 2649 RTMemFree(*ppaLeaves); 2650 *ppaLeaves = NULL; 2651 *pcLeaves = 0; 2652 } 2653 return rc; 2654 } 2655 2656 2657 static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves) 2658 { 2659 *ppaLeaves = NULL; 2660 *pcLeaves = 0; 2661 2662 int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves); 2663 if (RT_SUCCESS(rc)) 2664 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves); 2665 if (RT_SUCCESS(rc)) 2666 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves); 2667 2668 return rc; 2669 } 2670 2671 2672 /** 2673 * Loads the CPU ID leaves saved by pass 0. 2674 * 2675 * @returns VBox status code. 2676 * @param pVM Pointer to the VM. 2677 * @param pSSM The saved state handle. 2678 * @param uVersion The format version. 2679 */ 2680 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion) 2681 { 2682 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION); 2683 2684 /* 2685 * Define a bunch of macros for simplifying the code. 2686 */ 2687 /* Generic expression + failure message. */ 2688 #define CPUID_CHECK_RET(expr, fmt) \ 2689 do { \ 2690 if (!(expr)) \ 2691 { \ 2692 char *pszMsg = RTStrAPrintf2 fmt; /* lack of variadic macros sucks */ \ 2693 if (fStrictCpuIdChecks) \ 2694 { \ 2695 int rcCpuid = SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, "%s", pszMsg); \ 2696 RTStrFree(pszMsg); \ 2697 return rcCpuid; \ 2698 } \ 2699 LogRel(("CPUM: %s\n", pszMsg)); \ 2700 RTStrFree(pszMsg); \ 2701 } \ 2702 } while (0) 2703 #define CPUID_CHECK_WRN(expr, fmt) \ 2704 do { \ 2705 if (!(expr)) \ 2706 LogRel(fmt); \ 2707 } while (0) 2708 2709 /* For comparing two values and bitch if they differs. */ 2710 #define CPUID_CHECK2_RET(what, host, saved) \ 2711 do { \ 2712 if ((host) != (saved)) \ 2713 { \ 2714 if (fStrictCpuIdChecks) \ 2715 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \ 2716 N_(#what " mismatch: host=%#x saved=%#x"), (host), (saved)); \ 2717 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \ 2718 } \ 2719 } while (0) 2720 #define CPUID_CHECK2_WRN(what, host, saved) \ 2721 do { \ 2722 if ((host) != (saved)) \ 2723 LogRel(("CPUM: " #what " differs: host=%#x saved=%#x\n", (host), (saved))); \ 2724 } while (0) 2725 2726 /* For checking raw cpu features (raw mode). */ 2727 #define CPUID_RAW_FEATURE_RET(set, reg, bit) \ 2728 do { \ 2729 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \ 2730 { \ 2731 if (fStrictCpuIdChecks) \ 2732 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \ 2733 N_(#bit " mismatch: host=%d saved=%d"), \ 2734 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) ); \ 2735 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \ 2736 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \ 2737 } \ 2738 } while (0) 2739 #define CPUID_RAW_FEATURE_WRN(set, reg, bit) \ 2740 do { \ 2741 if ((aHostRaw##set [1].reg & bit) != (aRaw##set [1].reg & bit)) \ 2742 LogRel(("CPUM: " #bit" differs: host=%d saved=%d\n", \ 2743 !!(aHostRaw##set [1].reg & (bit)), !!(aRaw##set [1].reg & (bit)) )); \ 2744 } while (0) 2745 #define CPUID_RAW_FEATURE_IGN(set, reg, bit) do { } while (0) 2746 2747 /* For checking guest features. */ 2748 #define CPUID_GST_FEATURE_RET(set, reg, bit) \ 2749 do { \ 2750 if ( (aGuestCpuId##set [1].reg & bit) \ 2751 && !(aHostRaw##set [1].reg & bit) \ 2752 && !(aHostOverride##set [1].reg & bit) \ 2753 ) \ 2754 { \ 2755 if (fStrictCpuIdChecks) \ 2756 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \ 2757 N_(#bit " is not supported by the host but has already exposed to the guest")); \ 2758 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ 2759 } \ 2760 } while (0) 2761 #define CPUID_GST_FEATURE_WRN(set, reg, bit) \ 2762 do { \ 2763 if ( (aGuestCpuId##set [1].reg & bit) \ 2764 && !(aHostRaw##set [1].reg & bit) \ 2765 && !(aHostOverride##set [1].reg & bit) \ 2766 ) \ 2767 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ 2768 } while (0) 2769 #define CPUID_GST_FEATURE_EMU(set, reg, bit) \ 2770 do { \ 2771 if ( (aGuestCpuId##set [1].reg & bit) \ 2772 && !(aHostRaw##set [1].reg & bit) \ 2773 && !(aHostOverride##set [1].reg & bit) \ 2774 ) \ 2775 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ 2776 } while (0) 2777 #define CPUID_GST_FEATURE_IGN(set, reg, bit) do { } while (0) 2778 2779 /* For checking guest features if AMD guest CPU. */ 2780 #define CPUID_GST_AMD_FEATURE_RET(set, reg, bit) \ 2781 do { \ 2782 if ( (aGuestCpuId##set [1].reg & bit) \ 2783 && fGuestAmd \ 2784 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 2785 && !(aHostOverride##set [1].reg & bit) \ 2786 ) \ 2787 { \ 2788 if (fStrictCpuIdChecks) \ 2789 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \ 2790 N_(#bit " is not supported by the host but has already exposed to the guest")); \ 2791 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ 2792 } \ 2793 } while (0) 2794 #define CPUID_GST_AMD_FEATURE_WRN(set, reg, bit) \ 2795 do { \ 2796 if ( (aGuestCpuId##set [1].reg & bit) \ 2797 && fGuestAmd \ 2798 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 2799 && !(aHostOverride##set [1].reg & bit) \ 2800 ) \ 2801 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ 2802 } while (0) 2803 #define CPUID_GST_AMD_FEATURE_EMU(set, reg, bit) \ 2804 do { \ 2805 if ( (aGuestCpuId##set [1].reg & bit) \ 2806 && fGuestAmd \ 2807 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 2808 && !(aHostOverride##set [1].reg & bit) \ 2809 ) \ 2810 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ 2811 } while (0) 2812 #define CPUID_GST_AMD_FEATURE_IGN(set, reg, bit) do { } while (0) 2813 2814 /* For checking AMD features which have a corresponding bit in the standard 2815 range. (Intel defines very few bits in the extended feature sets.) */ 2816 #define CPUID_GST_FEATURE2_RET(reg, ExtBit, StdBit) \ 2817 do { \ 2818 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \ 2819 && !(fHostAmd \ 2820 ? aHostRawExt[1].reg & (ExtBit) \ 2821 : aHostRawStd[1].reg & (StdBit)) \ 2822 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 2823 ) \ 2824 { \ 2825 if (fStrictCpuIdChecks) \ 2826 return SSMR3SetLoadError(pSSM, VERR_SSM_LOAD_CPUID_MISMATCH, RT_SRC_POS, \ 2827 N_(#ExtBit " is not supported by the host but has already exposed to the guest")); \ 2828 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \ 2829 } \ 2830 } while (0) 2831 #define CPUID_GST_FEATURE2_WRN(reg, ExtBit, StdBit) \ 2832 do { \ 2833 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \ 2834 && !(fHostAmd \ 2835 ? aHostRawExt[1].reg & (ExtBit) \ 2836 : aHostRawStd[1].reg & (StdBit)) \ 2837 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 2838 ) \ 2839 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \ 2840 } while (0) 2841 #define CPUID_GST_FEATURE2_EMU(reg, ExtBit, StdBit) \ 2842 do { \ 2843 if ( (aGuestCpuIdExt [1].reg & (ExtBit)) \ 2844 && !(fHostAmd \ 2845 ? aHostRawExt[1].reg & (ExtBit) \ 2846 : aHostRawStd[1].reg & (StdBit)) \ 2847 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 2848 ) \ 2849 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ 2850 } while (0) 2851 #define CPUID_GST_FEATURE2_IGN(reg, ExtBit, StdBit) do { } while (0) 2852 2853 /* 2854 * Load them into stack buffers first. 2855 */ 2856 PCPUMCPUIDLEAF paLeaves; 2857 uint32_t cLeaves; 2858 int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves); 2859 AssertRCReturn(rc, rc); 2860 2861 /** @todo we'll be leaking paLeaves on error return... */ 2862 2863 CPUMCPUID GuestCpuIdDef; 2864 rc = SSMR3GetMem(pSSM, &GuestCpuIdDef, sizeof(GuestCpuIdDef)); 2865 AssertRCReturn(rc, rc); 2866 2867 CPUMCPUID aRawStd[16]; 2868 uint32_t cRawStd; 2869 rc = SSMR3GetU32(pSSM, &cRawStd); AssertRCReturn(rc, rc); 2870 if (cRawStd > RT_ELEMENTS(aRawStd)) 2871 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 2872 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0])); 2873 AssertRCReturn(rc, rc); 2874 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++) 2875 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx); 2876 2877 CPUMCPUID aRawExt[32]; 2878 uint32_t cRawExt; 2879 rc = SSMR3GetU32(pSSM, &cRawExt); AssertRCReturn(rc, rc); 2880 if (cRawExt > RT_ELEMENTS(aRawExt)) 2881 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 2882 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0])); 2883 AssertRCReturn(rc, rc); 2884 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++) 2885 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx); 2886 2887 /* 2888 * Get the raw CPU IDs for the current host. 2889 */ 2890 CPUMCPUID aHostRawStd[16]; 2891 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++) 2892 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx); 2893 2894 CPUMCPUID aHostRawExt[32]; 2895 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++) 2896 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, 2897 &aHostRawExt[i].eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx); 2898 2899 /* 2900 * Get the host and guest overrides so we don't reject the state because 2901 * some feature was enabled thru these interfaces. 2902 * Note! We currently only need the feature leaves, so skip rest. 2903 */ 2904 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID"); 2905 CPUMCPUID aHostOverrideStd[2]; 2906 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd)); 2907 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aHostOverrideStd[0], RT_ELEMENTS(aHostOverrideStd), pOverrideCfg); 2908 2909 CPUMCPUID aHostOverrideExt[2]; 2910 memcpy(&aHostOverrideExt[0], &aHostRawExt[0], sizeof(aHostOverrideExt)); 2911 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aHostOverrideExt[0], RT_ELEMENTS(aHostOverrideExt), pOverrideCfg); 2912 2913 /* 2914 * This can be skipped. 2915 */ 2916 bool fStrictCpuIdChecks; 2917 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"), "StrictCpuIdChecks", &fStrictCpuIdChecks, true); 2918 2919 2920 2921 /* 2922 * For raw-mode we'll require that the CPUs are very similar since we don't 2923 * intercept CPUID instructions for user mode applications. 2924 */ 2925 if (!HMIsEnabled(pVM)) 2926 { 2927 /* CPUID(0) */ 2928 CPUID_CHECK_RET( aHostRawStd[0].ebx == aRawStd[0].ebx 2929 && aHostRawStd[0].ecx == aRawStd[0].ecx 2930 && aHostRawStd[0].edx == aRawStd[0].edx, 2931 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"), 2932 &aHostRawStd[0].ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx, 2933 &aRawStd[0].ebx, &aRawStd[0].edx, &aRawStd[0].ecx)); 2934 CPUID_CHECK2_WRN("Std CPUID max leaf", aHostRawStd[0].eax, aRawStd[0].eax); 2935 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3); 2936 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28); 2937 2938 bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].ebx, aRawStd[0].ecx, aRawStd[0].edx); 2939 2940 /* CPUID(1).eax */ 2941 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawStd[1].eax), ASMGetCpuFamily(aRawStd[1].eax)); 2942 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawStd[1].eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel)); 2943 CPUID_CHECK2_WRN("CPU type", (aHostRawStd[1].eax >> 12) & 3, (aRawStd[1].eax >> 12) & 3 ); 2944 2945 /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */ 2946 CPUID_CHECK2_RET("CPU brand ID", aHostRawStd[1].ebx & 0xff, aRawStd[1].ebx & 0xff); 2947 CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].ebx >> 8) & 0xff, (aRawStd[1].ebx >> 8) & 0xff); 2948 2949 /* CPUID(1).ecx */ 2950 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); 2951 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); 2952 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64); 2953 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR); 2954 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS); 2955 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX); 2956 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX); 2957 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST); 2958 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2); 2959 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3); 2960 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID); 2961 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ ); 2962 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA); 2963 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16); 2964 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE); 2965 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM); 2966 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/); 2967 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/); 2968 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA); 2969 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1); 2970 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2); 2971 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC); 2972 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE); 2973 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT); 2974 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/); 2975 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES); 2976 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE); 2977 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); 2978 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX); 2979 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/); 2980 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/); 2981 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP); 2982 2983 /* CPUID(1).edx */ 2984 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU); 2985 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME); 2986 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE); 2987 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE); 2988 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC); 2989 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR); 2990 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE); 2991 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE); 2992 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8); 2993 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC); 2994 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/); 2995 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP); 2996 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR); 2997 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE); 2998 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA); 2999 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV); 3000 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT); 3001 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36); 3002 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN); 3003 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH); 3004 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/); 3005 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS); 3006 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI); 3007 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX); 3008 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR); 3009 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE); 3010 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2); 3011 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS); 3012 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT); 3013 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM); 3014 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/); 3015 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE); 3016 3017 /* CPUID(2) - config, mostly about caches. ignore. */ 3018 /* CPUID(3) - processor serial number. ignore. */ 3019 /* CPUID(4) - config, cache and topology - takes ECX as input. ignore. */ 3020 /* CPUID(5) - mwait/monitor config. ignore. */ 3021 /* CPUID(6) - power management. ignore. */ 3022 /* CPUID(7) - ???. ignore. */ 3023 /* CPUID(8) - ???. ignore. */ 3024 /* CPUID(9) - DCA. ignore for now. */ 3025 /* CPUID(a) - PeMo info. ignore for now. */ 3026 /* CPUID(b) - topology info - takes ECX as input. ignore. */ 3027 3028 /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */ 3029 CPUID_CHECK_WRN( aRawStd[0].eax < UINT32_C(0x0000000d) 3030 || aHostRawStd[0].eax >= UINT32_C(0x0000000d), 3031 ("CPUM: Standard leaf D was present on saved state host, not present on current.\n")); 3032 if ( aRawStd[0].eax >= UINT32_C(0x0000000d) 3033 && aHostRawStd[0].eax >= UINT32_C(0x0000000d)) 3034 { 3035 CPUID_CHECK2_WRN("Valid low XCR0 bits", aHostRawStd[0xd].eax, aRawStd[0xd].eax); 3036 CPUID_CHECK2_WRN("Valid high XCR0 bits", aHostRawStd[0xd].edx, aRawStd[0xd].edx); 3037 CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size", aHostRawStd[0xd].ebx, aRawStd[0xd].ebx); 3038 CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size", aHostRawStd[0xd].ecx, aRawStd[0xd].ecx); 3039 } 3040 3041 /* CPUID(0x80000000) - same as CPUID(0) except for eax. 3042 Note! Intel have/is marking many of the fields here as reserved. We 3043 will verify them as if it's an AMD CPU. */ 3044 CPUID_CHECK_RET( (aHostRawExt[0].eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f)) 3045 || !(aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f)), 3046 (N_("Extended leaves was present on saved state host, but is missing on the current\n"))); 3047 if (aRawExt[0].eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f)) 3048 { 3049 CPUID_CHECK_RET( aHostRawExt[0].ebx == aRawExt[0].ebx 3050 && aHostRawExt[0].ecx == aRawExt[0].ecx 3051 && aHostRawExt[0].edx == aRawExt[0].edx, 3052 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"), 3053 &aHostRawExt[0].ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx, 3054 &aRawExt[0].ebx, &aRawExt[0].edx, &aRawExt[0].ecx)); 3055 CPUID_CHECK2_WRN("Ext CPUID max leaf", aHostRawExt[0].eax, aRawExt[0].eax); 3056 3057 /* CPUID(0x80000001).eax - same as CPUID(0).eax. */ 3058 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawExt[1].eax), ASMGetCpuFamily(aRawExt[1].eax)); 3059 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawExt[1].eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel)); 3060 CPUID_CHECK2_WRN("CPU type", (aHostRawExt[1].eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 ); 3061 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 ); 3062 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].eax >> 28, aRawExt[1].eax >> 28); 3063 3064 /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */ 3065 CPUID_CHECK2_WRN("CPU BrandID", aHostRawExt[1].ebx & 0xffff, aRawExt[1].ebx & 0xffff); 3066 CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff); 3067 CPUID_CHECK2_WRN("PkgType", (aHostRawExt[1].ebx >> 28) & 0xf, (aRawExt[1].ebx >> 28) & 0xf); 3068 3069 /* CPUID(0x80000001).ecx */ 3070 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 3071 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); 3072 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); 3073 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC); 3074 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L); 3075 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM); 3076 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); 3077 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE); 3078 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF); 3079 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW); 3080 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS); 3081 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5); 3082 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); 3083 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT); 3084 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14)); 3085 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15)); 3086 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16)); 3087 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17)); 3088 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18)); 3089 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19)); 3090 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20)); 3091 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21)); 3092 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22)); 3093 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23)); 3094 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24)); 3095 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25)); 3096 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26)); 3097 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27)); 3098 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28)); 3099 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29)); 3100 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30)); 3101 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31)); 3102 3103 /* CPUID(0x80000001).edx */ 3104 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU); 3105 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME); 3106 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE); 3107 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE); 3108 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC); 3109 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR); 3110 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE); 3111 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE); 3112 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8); 3113 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC); 3114 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/); 3115 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP); 3116 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR); 3117 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE); 3118 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA); 3119 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV); 3120 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT); 3121 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36); 3122 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/); 3123 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/); 3124 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX); 3125 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/); 3126 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX); 3127 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX); 3128 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR); 3129 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 3130 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB); 3131 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 3132 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/); 3133 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 3134 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 3135 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 3136 3137 /** @todo verify the rest as well. */ 3138 } 3139 } 3140 3141 3142 3143 /* 3144 * Verify that we can support the features already exposed to the guest on 3145 * this host. 3146 * 3147 * Most of the features we're emulating requires intercepting instruction 3148 * and doing it the slow way, so there is no need to warn when they aren't 3149 * present in the host CPU. Thus we use IGN instead of EMU on these. 3150 * 3151 * Trailing comments: 3152 * "EMU" - Possible to emulate, could be lots of work and very slow. 3153 * "EMU?" - Can this be emulated? 3154 */ 3155 CPUMCPUID aGuestCpuIdStd[2]; 3156 RT_ZERO(aGuestCpuIdStd); 3157 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]); 3158 3159 /* CPUID(1).ecx */ 3160 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU 3161 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU? 3162 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU? 3163 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR); 3164 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU? 3165 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU 3166 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU 3167 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST); // -> EMU 3168 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU? 3169 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU 3170 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU 3171 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ ); 3172 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this? 3173 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU? 3174 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU 3175 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU 3176 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/); 3177 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(17) /*reserved*/); 3178 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU? 3179 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU 3180 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU 3181 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC); 3182 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU 3183 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU 3184 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(24) /*reserved*/); 3185 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES); // -> EMU 3186 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU 3187 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU 3188 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU? 3189 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(29) /*reserved*/); 3190 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(30) /*reserved*/); 3191 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host 3192 3193 /* CPUID(1).edx */ 3194 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU); 3195 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME); 3196 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE); // -> EMU? 3197 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE); 3198 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU 3199 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU 3200 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE); 3201 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE); 3202 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU? 3203 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC); 3204 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/); 3205 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP); 3206 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR); 3207 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE); 3208 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA); 3209 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU 3210 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT); 3211 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36); 3212 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN); 3213 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU 3214 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/); 3215 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS); // -> EMU? 3216 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU? 3217 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU 3218 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU 3219 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU 3220 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU 3221 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS); // -> EMU? 3222 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU? 3223 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM); // -> EMU? 3224 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU 3225 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU? 3226 3227 /* CPUID(0x80000000). */ 3228 CPUMCPUID aGuestCpuIdExt[2]; 3229 RT_ZERO(aGuestCpuIdExt); 3230 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1])) 3231 { 3232 /** @todo deal with no 0x80000001 on the host. */ 3233 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx); 3234 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx); 3235 3236 /* CPUID(0x80000001).ecx */ 3237 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU 3238 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU 3239 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU 3240 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ??? 3241 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU 3242 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU 3243 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU 3244 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU 3245 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU 3246 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU? 3247 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU 3248 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5); // -> EMU 3249 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU 3250 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU 3251 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14)); 3252 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15)); 3253 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16)); 3254 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17)); 3255 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18)); 3256 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19)); 3257 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20)); 3258 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21)); 3259 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22)); 3260 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23)); 3261 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24)); 3262 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25)); 3263 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26)); 3264 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27)); 3265 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28)); 3266 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29)); 3267 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30)); 3268 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31)); 3269 3270 /* CPUID(0x80000001).edx */ 3271 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU 3272 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU 3273 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU 3274 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE); 3275 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU 3276 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU 3277 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE); 3278 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE); 3279 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU? 3280 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC); 3281 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/); 3282 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only. 3283 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR); 3284 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE); 3285 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA); 3286 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU 3287 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT); 3288 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36); 3289 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/); 3290 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/); 3291 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX); 3292 CPUID_GST_FEATURE_WRN( Ext, edx, RT_BIT_32(21) /*reserved*/); 3293 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX); 3294 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU 3295 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU 3296 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 3297 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB); 3298 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 3299 CPUID_GST_FEATURE_IGN( Ext, edx, RT_BIT_32(28) /*reserved*/); 3300 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 3301 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 3302 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 3303 } 3304 3305 /* 3306 * We're good, commit the CPU ID leaves. 3307 */ 3308 MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 3309 pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR; 3310 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR; 3311 pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef; 3312 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves); 3313 RTMemFree(paLeaves); 3314 AssertLogRelRCReturn(rc, rc); 3315 3316 3317 #undef CPUID_CHECK_RET 3318 #undef CPUID_CHECK_WRN 3319 #undef CPUID_CHECK2_RET 3320 #undef CPUID_CHECK2_WRN 3321 #undef CPUID_RAW_FEATURE_RET 3322 #undef CPUID_RAW_FEATURE_WRN 3323 #undef CPUID_RAW_FEATURE_IGN 3324 #undef CPUID_GST_FEATURE_RET 3325 #undef CPUID_GST_FEATURE_WRN 3326 #undef CPUID_GST_FEATURE_EMU 3327 #undef CPUID_GST_FEATURE_IGN 3328 #undef CPUID_GST_FEATURE2_RET 3329 #undef CPUID_GST_FEATURE2_WRN 3330 #undef CPUID_GST_FEATURE2_EMU 3331 #undef CPUID_GST_FEATURE2_IGN 3332 #undef CPUID_GST_AMD_FEATURE_RET 3333 #undef CPUID_GST_AMD_FEATURE_WRN 3334 #undef CPUID_GST_AMD_FEATURE_EMU 3335 #undef CPUID_GST_AMD_FEATURE_IGN 3336 3337 return VINF_SUCCESS; 3338 } 3339 3340 #endif /* VBOX_IN_VMM */ -
trunk/src/VBox/VMM/include/CPUMInternal.h
r53630 r54561 118 118 #endif 119 119 #endif 120 121 122 /** @name CPUM Saved State Version. 123 * @{ */ 124 /** The current saved state version. */ 125 #define CPUM_SAVED_STATE_VERSION 14 126 /** The current saved state version before using SSMR3PutStruct. */ 127 #define CPUM_SAVED_STATE_VERSION_MEM 13 128 /** The saved state version before introducing the MSR size field. */ 129 #define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12 130 /** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden 131 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */ 132 #define CPUM_SAVED_STATE_VERSION_VER3_2 11 133 /** The saved state version of 3.0 and 3.1 trunk before the teleportation 134 * changes. */ 135 #define CPUM_SAVED_STATE_VERSION_VER3_0 10 136 /** The saved state version for the 2.1 trunk before the MSR changes. */ 137 #define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9 138 /** The saved state version of 2.0, used for backwards compatibility. */ 139 #define CPUM_SAVED_STATE_VERSION_VER2_0 8 140 /** The saved state version of 1.6, used for backwards compatibility. */ 141 #define CPUM_SAVED_STATE_VERSION_VER1_6 6 142 /** @} */ 143 120 144 121 145 … … 587 611 void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast); 588 612 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures); 613 int cpumR3CpuIdInit(PVM pVM); 614 void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM); 615 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion); 616 589 617 int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo); 590 618 int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange);
Note:
See TracChangeset
for help on using the changeset viewer.