Changeset 49893 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Dec 13, 2013 12:40:20 AM (11 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 2 added
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r49538 r49893 55 55 #include <VBox/err.h> 56 56 #include <VBox/log.h> 57 #include <iprt/asm-amd64-x86.h> 57 58 #include <iprt/assert.h> 58 #include <iprt/asm-amd64-x86.h> 59 #include <iprt/cpuset.h> 60 #include <iprt/mem.h> 61 #include <iprt/mp.h> 59 62 #include <iprt/string.h> 60 #include <iprt/mp.h>61 #include <iprt/cpuset.h>62 63 #include "internal/pgm.h" 63 64 … … 115 116 * Internal Functions * 116 117 *******************************************************************************/ 117 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);118 118 static int cpumR3CpuIdInit(PVM pVM); 119 119 static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass); … … 581 581 582 582 /* 583 * Assert alignment and sizes.583 * Assert alignment, sizes and tables. 584 584 */ 585 585 AssertCompileMemberAlignment(VM, cpum.s, 32); … … 592 592 AssertCompileMemberAlignment(VMCPU, cpum.s, 64); 593 593 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64); 594 #ifdef VBOX_STRICT 595 int rc2 = cpumR3MsrStrictInitChecks(); 596 AssertRCReturn(rc2, rc2); 597 #endif 594 598 595 599 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */ 596 600 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum); 597 601 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum); 602 598 603 599 604 /* Calculate the offset from CPUMCPU to CPUM. */ … … 647 652 648 653 /* 649 * Detect the host CPU vendor. 650 * (The guest CPU vendor is re-detected later on.) 651 */ 652 uint32_t uEAX, uEBX, uECX, uEDX; 653 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 654 pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX); 655 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor; 654 * Gather info about the host CPU. 655 */ 656 PCPUMCPUIDLEAF paLeaves; 657 uint32_t cLeaves; 658 int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves); 659 AssertLogRelRCReturn(rc, rc); 660 661 rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures); 662 RTMemFree(paLeaves); 663 AssertLogRelRCReturn(rc, rc); 664 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor; 656 665 657 666 /* … … 662 671 * Register saved state data item. 663 672 */ 664 intrc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),665 666 667 673 rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM), 674 NULL, cpumR3LiveExec, NULL, 675 NULL, cpumR3SaveExec, NULL, 676 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone); 668 677 if (RT_FAILURE(rc)) 669 678 return rc; … … 700 709 701 710 /** 702 * Detect the CPU vendor give n the 703 * 704 * @returns The vendor. 705 * @param uEAX EAX from CPUID(0). 706 * @param uEBX EBX from CPUID(0). 707 * @param uECX ECX from CPUID(0). 708 * @param uEDX EDX from CPUID(0). 709 */ 710 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX) 711 { 712 if (ASMIsValidStdRange(uEAX)) 713 { 714 if (ASMIsAmdCpuEx(uEBX, uECX, uEDX)) 715 return CPUMCPUVENDOR_AMD; 716 717 if (ASMIsIntelCpuEx(uEBX, uECX, uEDX)) 718 return CPUMCPUVENDOR_INTEL; 719 720 if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX)) 721 return CPUMCPUVENDOR_VIA; 722 723 /** @todo detect the other buggers... */ 724 } 725 726 return CPUMCPUVENDOR_UNKNOWN; 711 * Loads MSR range overrides. 712 * 713 * This must be called before the MSR ranges are moved from the normal heap to 714 * the hyper heap! 715 * 716 * @returns VBox status code (VMSetError called). 717 * @param pVM Pointer to the cross context VM structure 718 * @param pMsrNode The CFGM node with the MSR overrides. 719 */ 720 static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode) 721 { 722 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode)) 723 { 724 /* 725 * Assemble a valid MSR range. 726 */ 727 CPUMMSRRANGE MsrRange; 728 MsrRange.offCpumCpu = 0; 729 MsrRange.fReserved = 0; 730 731 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName)); 732 if (RT_FAILURE(rc)) 733 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc); 734 735 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst); 736 if (RT_FAILURE(rc)) 737 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n", 738 MsrRange.szName, rc); 739 740 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst); 741 if (RT_FAILURE(rc)) 742 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n", 743 MsrRange.szName, rc); 744 745 char szType[32]; 746 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue"); 747 if (RT_FAILURE(rc)) 748 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n", 749 MsrRange.szName, rc); 750 if (!RTStrICmp(szType, "FixedValue")) 751 { 752 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue; 753 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite; 754 755 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uInitOrReadValue, 0); 756 if (RT_FAILURE(rc)) 757 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n", 758 MsrRange.szName, rc); 759 760 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0); 761 if (RT_FAILURE(rc)) 762 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n", 763 MsrRange.szName, rc); 764 765 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0); 766 if (RT_FAILURE(rc)) 767 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n", 768 MsrRange.szName, rc); 769 } 770 else 771 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, 772 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType); 773 774 /* 775 * Insert the range into the table (replaces/splits/shrinks existing 776 * MSR ranges). 777 */ 778 rc = cpumR3MsrRangesInsert(&pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, &MsrRange); 779 if (RT_FAILURE(rc)) 780 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc); 781 } 782 783 return VINF_SUCCESS; 727 784 } 785 786 787 /** 788 * Loads CPUID leaf overrides. 789 * 790 * This must be called before the CPUID leaves are moved from the normal 791 * heap to the hyper heap! 792 * 793 * @returns VBox status code (VMSetError called). 794 * @param pVM Pointer to the cross context VM structure 795 * @param pParentNode The CFGM node with the CPUID leaves. 796 * @param pszLabel How to label the overrides we're loading. 797 */ 798 static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel) 799 { 800 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode)) 801 { 802 /* 803 * Get the leaf and subleaf numbers. 804 */ 805 char szName[128]; 806 int rc = CFGMR3GetName(pNode, szName, sizeof(szName)); 807 if (RT_FAILURE(rc)) 808 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc); 809 810 /* The leaf number is either specified directly or thru the node name. */ 811 uint32_t uLeaf; 812 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf); 813 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 814 { 815 rc = RTStrToUInt32Full(szName, 16, &uLeaf); 816 if (rc != VINF_SUCCESS) 817 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS, 818 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName); 819 } 820 else if (RT_FAILURE(rc)) 821 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n", 822 pszLabel, szName, rc); 823 824 uint32_t uSubLeaf; 825 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0); 826 if (RT_FAILURE(rc)) 827 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n", 828 pszLabel, szName, rc); 829 830 uint32_t fSubLeafMask; 831 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0); 832 if (RT_FAILURE(rc)) 833 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n", 834 pszLabel, szName, rc); 835 836 /* 837 * Look up the specified leaf, since the output register values 838 * defaults to any existing values. This allows overriding a single 839 * register, without needing to know the other values. 840 */ 841 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves, 842 uLeaf, uSubLeaf); 843 CPUMCPUIDLEAF Leaf; 844 if (pLeaf) 845 Leaf = *pLeaf; 846 else 847 RT_ZERO(Leaf); 848 Leaf.uLeaf = uLeaf; 849 Leaf.uSubLeaf = uSubLeaf; 850 Leaf.fSubLeafMask = fSubLeafMask; 851 852 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax); 853 if (RT_FAILURE(rc)) 854 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n", 855 pszLabel, szName, rc); 856 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx); 857 if (RT_FAILURE(rc)) 858 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n", 859 pszLabel, szName, rc); 860 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx); 861 if (RT_FAILURE(rc)) 862 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n", 863 pszLabel, szName, rc); 864 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx); 865 if (RT_FAILURE(rc)) 866 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n", 867 pszLabel, szName, rc); 868 869 /* 870 * Insert the leaf into the table (replaces existing ones). 871 */ 872 rc = cpumR3CpuIdInsert(&pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves, &Leaf); 873 if (RT_FAILURE(rc)) 874 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc); 875 } 876 877 return VINF_SUCCESS; 878 } 879 728 880 729 881 … … 815 967 816 968 969 static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves) 970 { 971 /* 972 * Install the CPUID information. 973 */ 974 int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32, 975 MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3); 976 977 AssertLogRelRCReturn(rc, rc); 978 979 pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3); 980 pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3); 981 Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3); 982 Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3); 983 984 /* 985 * Explode the guest CPU features. 986 */ 987 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures); 988 AssertLogRelRCReturn(rc, rc); 989 990 991 /* 992 * Populate the legacy arrays. Currently used for everything, later only 993 * for patch manager. 994 */ 995 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] = 996 { 997 { pCPUM->aGuestCpuIdStd, RT_ELEMENTS(pCPUM->aGuestCpuIdStd), 0x00000000 }, 998 { pCPUM->aGuestCpuIdExt, RT_ELEMENTS(pCPUM->aGuestCpuIdExt), 0x80000000 }, 999 { pCPUM->aGuestCpuIdCentaur, RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 }, 1000 { pCPUM->aGuestCpuIdHyper, RT_ELEMENTS(pCPUM->aGuestCpuIdHyper), 0x40000000 }, 1001 }; 1002 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++) 1003 { 1004 uint32_t cLeft = aOldRanges[i].cCpuIds; 1005 uint32_t uLeaf = aOldRanges[i].uBase + cLeft; 1006 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft]; 1007 while (cLeft-- > 0) 1008 { 1009 uLeaf--; 1010 pLegacyLeaf--; 1011 1012 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf, 0); 1013 if (pLeaf) 1014 { 1015 pLegacyLeaf->eax = pLeaf->uEax; 1016 pLegacyLeaf->ebx = pLeaf->uEbx; 1017 pLegacyLeaf->ecx = pLeaf->uEcx; 1018 pLegacyLeaf->edx = pLeaf->uEdx; 1019 } 1020 else 1021 *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId; 1022 } 1023 } 1024 1025 pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId; 1026 1027 return VINF_SUCCESS; 1028 } 1029 1030 817 1031 /** 818 1032 * Initializes the emulated CPU's cpuid information. … … 825 1039 PCPUM pCPUM = &pVM->cpum.s; 826 1040 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"); 827 uint32_t i;828 1041 int rc; 829 1042 830 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \831 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg& (fMask)) == (uValue) ) \1043 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \ 1044 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \ 832 1045 { \ 833 LogRel(("PortableCpuId: " # LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg& (fMask))); \834 pCPUM->aGuestCpuId##LeafSuffReg&= ~(uint32_t)(fMask); \835 } 836 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \837 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg& (fBitMask)) ) \1046 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \ 1047 (a_pLeafReg) &= ~(uint32_t)(fMask); \ 1048 } 1049 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \ 1050 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \ 838 1051 { \ 839 LogRel(("PortableCpuId: " # LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \840 pCPUM->aGuestCpuId##LeafSuffReg&= ~(uint32_t)(fBitMask); \1052 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \ 1053 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \ 841 1054 } 842 1055 … … 847 1060 * Enables the Synthetic CPU. The Vendor ID and Processor Name are 848 1061 * completely overridden by VirtualBox custom strings. Some 849 * CPUID information is withheld, like the cache info. */ 850 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &pCPUM->fSyntheticCpu, false); 1062 * CPUID information is withheld, like the cache info. 1063 * 1064 * This is obsoleted by PortableCpuIdLevel. */ 1065 bool fSyntheticCpu; 1066 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &fSyntheticCpu, false); 851 1067 AssertRCReturn(rc, rc); 852 1068 … … 856 1072 * values should only be used when older CPUs are involved since it may 857 1073 * harm performance and maybe also cause problems with specific guests. */ 858 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0); 859 AssertRCReturn(rc, rc); 860 861 AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG); 862 863 /* 864 * Get the host CPUID leaves and redetect the guest CPU vendor (could've 865 * been overridden). 866 */ 867 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 868 * Overrides the host CPUID leaf values used for calculating the guest CPUID 869 * leaves. This can be used to preserve the CPUID values when moving a VM to a 870 * different machine. Another use is restricting (or extending) the feature set 871 * exposed to the guest. */ 872 PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID"); 873 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pHostOverrideCfg); 874 AssertRCReturn(rc, rc); 875 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pHostOverrideCfg); 876 AssertRCReturn(rc, rc); 877 rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg); 878 AssertRCReturn(rc, rc); 879 880 pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx, 881 pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx); 882 883 /* 884 * Determine the default leaf. 885 * 886 * Intel returns values of the highest standard function, while AMD 887 * returns zeros. VIA on the other hand seems to returning nothing or 888 * perhaps some random garbage, we don't try to duplicate this behavior. 889 */ 890 ASMCpuIdExSlow(pCPUM->aGuestCpuIdStd[0].eax + 10, 0, 0, 0, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */ 891 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx, 892 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx); 1074 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0); 1075 AssertLogRelRCReturn(rc, rc); 1076 1077 /** @cfgm{CPUM/GuestCpuName, string} 1078 * The name of of the CPU we're to emulate. The default is the host CPU. 1079 * Note! CPUs other than "host" one is currently unsupported. */ 1080 char szCpuName[128]; 1081 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host"); 1082 AssertLogRelRCReturn(rc, rc); 893 1083 894 1084 /** @cfgm{/CPUM/CMPXCHG16B, boolean, false} … … 896 1086 */ 897 1087 bool fCmpXchg16b; 898 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); AssertRCReturn(rc, rc); 1088 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); 1089 AssertLogRelRCReturn(rc, rc); 899 1090 900 1091 /** @cfgm{/CPUM/MONITOR, boolean, true} … … 902 1093 */ 903 1094 bool fMonitor; 904 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); AssertRCReturn(rc, rc); 905 906 /* Cpuid 1 & 0x80000001: 1095 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); 1096 AssertLogRelRCReturn(rc, rc); 1097 1098 /** @cfgm{/CPUM/MWaitExtensions, boolean, false} 1099 * Expose MWAIT extended features to the guest. For now we expose just MWAIT 1100 * break on interrupt feature (bit 1). 1101 */ 1102 bool fMWaitExtensions; 1103 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); 1104 AssertLogRelRCReturn(rc, rc); 1105 1106 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false} 1107 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from 1108 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e). 1109 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22]. 1110 */ 1111 bool fNt4LeafLimit; 1112 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); 1113 AssertLogRelRCReturn(rc, rc); 1114 1115 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX} 1116 * Restrict the reported CPU family+model+stepping of intel CPUs. This is 1117 * probably going to be a temporary hack, so don't depend on this. 1118 * The 1st byte of the value is the stepping, the 2nd byte value is the model 1119 * number and the 3rd byte value is the family, and the 4th value must be zero. 1120 */ 1121 uint32_t uMaxIntelFamilyModelStep; 1122 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX); 1123 AssertLogRelRCReturn(rc, rc); 1124 1125 /* 1126 * Get the guest CPU data from the database and/or the host. 1127 */ 1128 rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo); 1129 if (RT_FAILURE(rc)) 1130 return rc == VERR_CPUM_DB_CPU_NOT_FOUND 1131 ? VMSetError(pVM, rc, RT_SRC_POS, 1132 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName) 1133 : rc; 1134 1135 /** @cfgm{CPUM/MSRs/[Name]/[First|Last|Type|Value|...],} 1136 * Overrides the guest MSRs. 1137 */ 1138 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs")); 1139 1140 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 1141 * Overrides the CPUID leaf values (from the host CPU usually) used for 1142 * calculating the guest CPUID leaves. This can be used to preserve the CPUID 1143 * values when moving a VM to a different machine. Another use is restricting 1144 * (or extending) the feature set exposed to the guest. */ 1145 if (RT_SUCCESS(rc)) 1146 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID"); 1147 1148 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */ 1149 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS, 1150 "Found unsupported configuration node '/CPUM/CPUID/'. " 1151 "Please use IMachine::setCPUIDLeaf() instead."); 1152 1153 /* 1154 * Pre-exploded the CPUID info. 1155 */ 1156 if (RT_SUCCESS(rc)) 1157 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures); 1158 if (RT_FAILURE(rc)) 1159 { 1160 RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3); 1161 pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL; 1162 RTMemFree(pCPUM->GuestInfo.paMsrRangesR3); 1163 pCPUM->GuestInfo.paMsrRangesR3 = NULL; 1164 return rc; 1165 } 1166 1167 1168 /* ... split this function about here ... */ 1169 1170 1171 PCPUMCPUIDLEAF pStdLeaf0 = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0); 1172 AssertLogRelReturn(pStdLeaf0, VERR_CPUM_IPE_2); 1173 1174 1175 /* Cpuid 1: 907 1176 * Only report features we can support. 908 1177 * … … 910 1179 * options may require adjusting (i.e. stripping what was enabled). 911 1180 */ 912 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU 1181 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0); 1182 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2); 1183 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU 913 1184 | X86_CPUID_FEATURE_EDX_VME 914 1185 | X86_CPUID_FEATURE_EDX_DE … … 941 1212 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled. 942 1213 | 0; 943 p CPUM->aGuestCpuIdStd[1].ecx&= 01214 pStdFeatureLeaf->uEcx &= 0 944 1215 | X86_CPUID_FEATURE_ECX_SSE3 945 1216 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */ … … 961 1232 if (pCPUM->u8PortableCpuIdLevel > 0) 962 1233 { 963 PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));964 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);965 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);966 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, CX16, X86_CPUID_FEATURE_ECX_CX16);967 PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);968 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE, X86_CPUID_FEATURE_EDX_SSE);969 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);970 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);971 972 Assert(!(p CPUM->aGuestCpuIdStd[1].edx& ( X86_CPUID_FEATURE_EDX_SEP1234 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12)); 1235 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3); 1236 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3); 1237 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16); 1238 PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2); 1239 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE); 1240 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH); 1241 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV); 1242 1243 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP 973 1244 | X86_CPUID_FEATURE_EDX_PSN 974 1245 | X86_CPUID_FEATURE_EDX_DS … … 978 1249 | X86_CPUID_FEATURE_EDX_PBE 979 1250 ))); 980 Assert(!(p CPUM->aGuestCpuIdStd[1].ecx& ( X86_CPUID_FEATURE_ECX_PCLMUL1251 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_PCLMUL 981 1252 | X86_CPUID_FEATURE_ECX_DTES64 982 1253 | X86_CPUID_FEATURE_ECX_CPLDS … … 1008 1279 * ASSUMES that this is ALWAYS the AMD defined feature set if present. 1009 1280 */ 1010 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU 1281 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1282 UINT32_C(0x80000001), 0); 1283 if (pExtFeatureLeaf) 1284 { 1285 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU 1011 1286 | X86_CPUID_AMD_FEATURE_EDX_VME 1012 1287 | X86_CPUID_AMD_FEATURE_EDX_DE … … 1037 1312 | X86_CPUID_AMD_FEATURE_EDX_3DNOW 1038 1313 | 0; 1039 pCPUM->aGuestCpuIdExt[1].ecx&= 01314 pExtFeatureLeaf->uEcx &= 0 1040 1315 //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF 1041 1316 //| X86_CPUID_AMD_FEATURE_ECX_CMPL … … 1054 1329 //| X86_CPUID_AMD_FEATURE_ECX_WDT 1055 1330 | 0; 1056 if (pCPUM->u8PortableCpuIdLevel > 0) 1057 { 1058 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L); 1059 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 1060 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 1061 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 1062 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1063 PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 1064 PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV); 1065 1066 Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL 1067 | X86_CPUID_AMD_FEATURE_ECX_SVM 1068 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC 1069 | X86_CPUID_AMD_FEATURE_ECX_CR8L 1070 | X86_CPUID_AMD_FEATURE_ECX_ABM 1071 | X86_CPUID_AMD_FEATURE_ECX_SSE4A 1072 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE 1073 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF 1074 | X86_CPUID_AMD_FEATURE_ECX_OSVW 1075 | X86_CPUID_AMD_FEATURE_ECX_IBS 1076 | X86_CPUID_AMD_FEATURE_ECX_SSE5 1077 | X86_CPUID_AMD_FEATURE_ECX_SKINIT 1078 | X86_CPUID_AMD_FEATURE_ECX_WDT 1079 | UINT32_C(0xffffc000) 1080 ))); 1081 Assert(!(pCPUM->aGuestCpuIdExt[1].edx & ( RT_BIT(10) 1082 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL 1083 | RT_BIT(18) 1084 | RT_BIT(19) 1085 | RT_BIT(21) 1086 | X86_CPUID_AMD_FEATURE_EDX_AXMMX 1087 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 1088 | RT_BIT(28) 1089 ))); 1090 } 1091 1092 /* 1093 * Apply the Synthetic CPU modifications. (TODO: move this up) 1094 */ 1095 if (pCPUM->fSyntheticCpu) 1096 { 1097 static const char s_szVendor[13] = "VirtualBox "; 1098 static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000 "; /* includes null terminator */ 1099 1100 pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC; 1101 1102 /* Limit the nr of standard leaves; 5 for monitor/mwait */ 1103 pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5); 1104 1105 /* 0: Vendor */ 1106 pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0]; 1107 pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2]; 1108 pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1]; 1109 1110 /* 1.eax: Version information. family : model : stepping */ 1111 pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1; 1112 1113 /* Leaves 2 - 4 are Intel only - zero them out */ 1114 memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2])); 1115 memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3])); 1116 memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4])); 1117 1118 /* Leaf 5 = monitor/mwait */ 1119 1120 /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */ 1121 pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008); 1122 /* AMD only - set to zero. */ 1123 pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0; 1124 1125 /* 0x800000001: shared feature bits are set dynamically. */ 1126 memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1])); 1127 1128 /* 0x800000002-4: Processor Name String Identifier. */ 1129 pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0]; 1130 pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1]; 1131 pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2]; 1132 pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3]; 1133 pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4]; 1134 pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5]; 1135 pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6]; 1136 pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7]; 1137 pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8]; 1138 pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9]; 1139 pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10]; 1140 pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11]; 1141 1142 /* 0x800000005-7 - reserved -> zero */ 1143 memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5])); 1144 memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6])); 1145 memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7])); 1146 1147 /* 0x800000008: only the max virtual and physical address size. */ 1148 pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */ 1331 if (pCPUM->u8PortableCpuIdLevel > 0) 1332 { 1333 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L); 1334 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 1335 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 1336 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 1337 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1338 PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 1339 PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV); 1340 1341 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL 1342 | X86_CPUID_AMD_FEATURE_ECX_SVM 1343 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC 1344 | X86_CPUID_AMD_FEATURE_ECX_CR8L 1345 | X86_CPUID_AMD_FEATURE_ECX_ABM 1346 | X86_CPUID_AMD_FEATURE_ECX_SSE4A 1347 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE 1348 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF 1349 | X86_CPUID_AMD_FEATURE_ECX_OSVW 1350 | X86_CPUID_AMD_FEATURE_ECX_IBS 1351 | X86_CPUID_AMD_FEATURE_ECX_SSE5 1352 | X86_CPUID_AMD_FEATURE_ECX_SKINIT 1353 | X86_CPUID_AMD_FEATURE_ECX_WDT 1354 | UINT32_C(0xffffc000) 1355 ))); 1356 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10) 1357 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL 1358 | RT_BIT(18) 1359 | RT_BIT(19) 1360 | RT_BIT(21) 1361 | X86_CPUID_AMD_FEATURE_EDX_AXMMX 1362 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 1363 | RT_BIT(28) 1364 ))); 1365 } 1149 1366 } 1150 1367 … … 1153 1370 * (APIC-ID := 0 and #LogCpus := 0) 1154 1371 */ 1155 p CPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;1372 pStdFeatureLeaf->uEbx &= 0x0000ffff; 1156 1373 #ifdef VBOX_WITH_MULTI_CORE 1157 if ( pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC 1158 && pVM->cCpus > 1) 1374 if (pVM->cCpus > 1) 1159 1375 { 1160 1376 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */ 1161 p CPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);1162 p CPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */1377 pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16); 1378 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */ 1163 1379 } 1164 1380 #endif … … 1170 1386 * Safe to expose; restrict the number of calls to 1 for the portable case. 1171 1387 */ 1172 if ( pCPUM->u8PortableCpuIdLevel > 0 1173 && pCPUM->aGuestCpuIdStd[0].eax >= 2 1174 && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1) 1175 { 1176 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff)); 1177 pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe); 1388 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0); 1389 if ( pCPUM->u8PortableCpuIdLevel > 0 1390 && pCurLeaf 1391 && (pCurLeaf->uEax & 0xff) > 1) 1392 { 1393 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff)); 1394 pCurLeaf->uEax &= UINT32_C(0xfffffffe); 1178 1395 } 1179 1396 … … 1185 1402 * Safe to expose 1186 1403 */ 1187 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN)) 1188 { 1189 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0; 1404 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0); 1405 if ( !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN) 1406 && pCurLeaf) 1407 { 1408 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 1190 1409 if (pCPUM->u8PortableCpuIdLevel > 0) 1191 pC PUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;1410 pCurLeaf->uEax = pCurLeaf->uEbx = 0; 1192 1411 } 1193 1412 … … 1202 1421 * Note: These SMP values are constant regardless of ECX 1203 1422 */ 1204 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0; 1205 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0; 1423 CPUMCPUIDLEAF NewLeaf; 1424 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0); 1425 if (pCurLeaf) 1426 { 1427 NewLeaf.uLeaf = 4; 1428 NewLeaf.uSubLeaf = 0; 1429 NewLeaf.fSubLeafMask = 0; 1430 NewLeaf.uEax = 0; 1431 NewLeaf.uEbx = 0; 1432 NewLeaf.uEcx = 0; 1433 NewLeaf.uEdx = 0; 1434 NewLeaf.fFlags = 0; 1206 1435 #ifdef VBOX_WITH_MULTI_CORE 1207 if ( pVM->cCpus > 11208 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)1209 {1210 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);1211 /* One logical processor with possibly multiple cores. */1212 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */1213 pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */1214 }1436 if ( pVM->cCpus > 1 1437 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 1438 { 1439 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS); 1440 /* One logical processor with possibly multiple cores. */ 1441 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */ 1442 NewLeaf.uEax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */ 1443 } 1215 1444 #endif 1445 rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 1446 AssertLogRelRCReturn(rc, rc); 1447 } 1216 1448 1217 1449 /* Cpuid 5: Monitor/mwait Leaf … … 1224 1456 * Safe to expose 1225 1457 */ 1226 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR)) 1227 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0; 1228 1229 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0; 1230 /** @cfgm{/CPUM/MWaitExtensions, boolean, false} 1231 * Expose MWAIT extended features to the guest. For now we expose 1232 * just MWAIT break on interrupt feature (bit 1). 1233 */ 1234 bool fMWaitExtensions; 1235 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc); 1236 if (fMWaitExtensions) 1237 { 1238 pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0; 1239 /** @todo: for now we just expose host's MWAIT C-states, although conceptually 1240 it shall be part of our power management virtualization model */ 1458 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0); 1459 if (pCurLeaf) 1460 { 1461 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR)) 1462 pCurLeaf->uEax = pCurLeaf->uEbx = 0; 1463 1464 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 1465 if (fMWaitExtensions) 1466 { 1467 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0; 1468 /** @todo: for now we just expose host's MWAIT C-states, although conceptually 1469 it shall be part of our power management virtualization model */ 1241 1470 #if 0 1242 /* MWAIT sub C-states */1243 pCPUM->aGuestCpuIdStd[5].edx =1244 (0 << 0) /* 0 in C0 */ |1245 (2 << 4) /* 2 in C1 */ |1246 (2 << 8) /* 2 in C2 */ |1247 (2 << 12) /* 2 in C3 */ |1248 (0 << 16) /* 0 in C4 */1249 ;1471 /* MWAIT sub C-states */ 1472 pCurLeaf->uEdx = 1473 (0 << 0) /* 0 in C0 */ | 1474 (2 << 4) /* 2 in C1 */ | 1475 (2 << 8) /* 2 in C2 */ | 1476 (2 << 12) /* 2 in C3 */ | 1477 (0 << 16) /* 0 in C4 */ 1478 ; 1250 1479 #endif 1251 } 1252 else 1253 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0; 1480 } 1481 else 1482 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 1483 } 1254 1484 1255 1485 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers. … … 1270 1500 * VIA: Reserved 1271 1501 */ 1272 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007)) 1273 { 1274 Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID); 1275 1276 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0; 1277 1278 if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 1502 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0); 1503 if (pCurLeaf) 1504 { 1505 Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID); 1506 1507 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0; 1508 1509 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1279 1510 { 1280 1511 /* Only expose the TSC invariant capability bit to the guest. */ 1281 pC PUM->aGuestCpuIdExt[7].edx&= 01512 pCurLeaf->uEdx &= 0 1282 1513 //| X86_CPUID_AMD_ADVPOWER_EDX_TS 1283 1514 //| X86_CPUID_AMD_ADVPOWER_EDX_FID … … 1300 1531 } 1301 1532 else 1302 pC PUM->aGuestCpuIdExt[7].edx= 0;1533 pCurLeaf->uEdx = 0; 1303 1534 } 1304 1535 … … 1312 1543 * EBX, ECX, EDX - reserved 1313 1544 */ 1314 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008)) 1545 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0); 1546 if (pCurLeaf) 1315 1547 { 1316 1548 /* Only expose the virtual and physical address sizes to the guest. */ 1317 pC PUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);1318 pC PUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */1549 pCurLeaf->uEax &= UINT32_C(0x0000ffff); 1550 pCurLeaf->uEbx = pCurLeaf->uEdx = 0; /* reserved */ 1319 1551 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu) 1320 1552 * NC (0-7) Number of cores; 0 equals 1 core */ 1321 pC PUM->aGuestCpuIdExt[8].ecx = 0;1553 pCurLeaf->uEcx = 0; 1322 1554 #ifdef VBOX_WITH_MULTI_CORE 1323 1555 if ( pVM->cCpus > 1 1324 && p VM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)1556 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1325 1557 { 1326 1558 /* Legacy method to determine the number of cores. */ 1327 pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; 1328 pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */ 1559 pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */ 1560 if (pExtFeatureLeaf) 1561 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; 1329 1562 } 1330 1563 #endif 1331 1564 } 1332 1565 1333 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false} 1334 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from 1335 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e). 1336 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22]. 1337 */ 1338 bool fNt4LeafLimit; 1339 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc); 1340 if (fNt4LeafLimit && pCPUM->aGuestCpuIdStd[0].eax > 3) 1341 pCPUM->aGuestCpuIdStd[0].eax = 3; 1342 1343 /* 1344 * Limit it the number of entries and fill the remaining with the defaults. 1566 1567 /* 1568 * Limit it the number of entries, zapping the remainder. 1345 1569 * 1346 1570 * The limits are masking off stuff about power saving and similar, this … … 1348 1572 * info too in these leaves (like words about having a constant TSC). 1349 1573 */ 1350 if (pCPUM->aGuestCpuIdStd[0].eax > 5) 1351 pCPUM->aGuestCpuIdStd[0].eax = 5; 1352 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++) 1353 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef; 1354 1355 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008)) 1356 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008); 1357 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000) 1358 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1 1359 : 0; 1360 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); 1361 i++) 1362 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef; 1574 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0); 1575 if (pCurLeaf) 1576 { 1577 if (pCurLeaf->uEax > 5) 1578 { 1579 pCurLeaf->uEax = 5; 1580 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1581 UINT32_C(0x00000006), UINT32_C(0x000fffff)); 1582 } 1583 1584 /* NT4 hack, no zapping of extra leaves here. */ 1585 if (fNt4LeafLimit && pCurLeaf->uEax > 3) 1586 pCurLeaf->uEax = 3; 1587 } 1588 1589 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0); 1590 if (pCurLeaf) 1591 { 1592 if (pCurLeaf->uEax > UINT32_C(0x80000008)) 1593 { 1594 pCurLeaf->uEax = UINT32_C(0x80000008); 1595 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1596 UINT32_C(0x80000008), UINT32_C(0x800fffff)); 1597 } 1598 } 1363 1599 1364 1600 /* … … 1370 1606 * temperature/hz/++ stuff, include it as well (static). 1371 1607 */ 1372 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000) 1373 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004)) 1374 { 1375 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002)); 1376 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */ 1377 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000); 1378 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); 1379 i++) 1380 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef; 1381 } 1382 else 1383 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++) 1384 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef; 1608 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0); 1609 if (pCurLeaf) 1610 { 1611 if ( pCurLeaf->uEax >= UINT32_C(0xc0000000) 1612 && pCurLeaf->uEax <= UINT32_C(0xc0000004)) 1613 { 1614 pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002)); 1615 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1616 UINT32_C(0xc0000002), UINT32_C(0xc00fffff)); 1617 1618 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1619 UINT32_C(0xc0000001), 0); 1620 if (pCurLeaf) 1621 pCurLeaf->uEdx = 0; /* all features hidden */ 1622 } 1623 else 1624 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1625 UINT32_C(0xc0000000), UINT32_C(0xc00fffff)); 1626 } 1385 1627 1386 1628 /* … … 1391 1633 * Currently we do not support any hypervisor-specific interface. 1392 1634 */ 1393 pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001); 1394 pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx 1395 = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256; /* 'VBox' */ 1396 pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e; /* 'none' */ 1397 pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx 1398 = pCPUM->aGuestCpuIdHyper[1].edx = 0; /* Reserved */ 1635 NewLeaf.uLeaf = UINT32_C(0x40000000); 1636 NewLeaf.uSubLeaf = 0; 1637 NewLeaf.fSubLeafMask = 0; 1638 NewLeaf.uEax = UINT32_C(0x40000001); 1639 NewLeaf.uEbx = 0x786f4256 /* 'VBox' */; 1640 NewLeaf.uEcx = 0x786f4256 /* 'VBox' */; 1641 NewLeaf.uEdx = 0x786f4256 /* 'VBox' */; 1642 NewLeaf.fFlags = 0; 1643 rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 1644 AssertLogRelRCReturn(rc, rc); 1645 1646 NewLeaf.uLeaf = UINT32_C(0x40000001); 1647 NewLeaf.uEax = 0x656e6f6e; /* 'none' */ 1648 NewLeaf.uEbx = 0; 1649 NewLeaf.uEcx = 0; 1650 NewLeaf.uEdx = 0; 1651 NewLeaf.fFlags = 0; 1652 rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 1653 AssertLogRelRCReturn(rc, rc); 1399 1654 1400 1655 /* 1401 1656 * Mini CPU selection support for making Mac OS X happy. 1402 1657 */ 1403 if (pCPUM->enmGuestCpuVendor == CPUMCPUVENDOR_INTEL) 1404 { 1405 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX} 1406 * Restrict the reported CPU family+model+stepping of intel CPUs. This is 1407 * probably going to be a temporary hack, so don't depend on this. 1408 * The 1st byte of the value is the stepping, the 2nd byte value is the model 1409 * number and the 3rd byte value is the family, and the 4th value must be zero. 1410 */ 1411 uint32_t uMaxIntelFamilyModelStep; 1412 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX); 1413 AssertRCReturn(rc, rc); 1414 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pCPUM->aGuestCpuIdStd[1].eax), 1415 ASMGetCpuModelIntel(pCPUM->aGuestCpuIdStd[1].eax), 1416 ASMGetCpuFamily(pCPUM->aGuestCpuIdStd[1].eax), 1658 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 1659 { 1660 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax), 1661 ASMGetCpuModelIntel(pStdFeatureLeaf->uEax), 1662 ASMGetCpuFamily(pStdFeatureLeaf->uEax), 1417 1663 0); 1418 1664 if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep) 1419 1665 { 1420 uint32_t uNew = p CPUM->aGuestCpuIdStd[1].eax & UINT32_C(0xf0003000);1666 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000); 1421 1667 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */ 1422 1668 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */ … … 1426 1672 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20; 1427 1673 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n", 1428 p CPUM->aGuestCpuIdStd[1].eax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));1429 p CPUM->aGuestCpuIdStd[1].eax = uNew;1674 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep)); 1675 pStdFeatureLeaf->uEax = uNew; 1430 1676 } 1431 1677 } 1432 1678 1433 /* 1434 * Load CPUID overrides from configuration. 1435 * Note: Kind of redundant now, but allows unchanged overrides 1436 */ 1437 /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 1438 * Overrides the CPUID leaf values. */ 1439 PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID"); 1440 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pOverrideCfg); 1441 AssertRCReturn(rc, rc); 1442 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pOverrideCfg); 1443 AssertRCReturn(rc, rc); 1444 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg); 1445 AssertRCReturn(rc, rc); 1446 1447 /* 1448 * Check if PAE was explicitely enabled by the user. 1449 */ 1679 1680 /* 1681 * Move the MSR and CPUID arrays over on the hypervisor heap, and explode 1682 * guest CPU features again. 1683 */ 1684 void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3; 1685 int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves); 1686 RTMemFree(pvFree); 1687 1688 pvFree = pCPUM->GuestInfo.paMsrRangesR3; 1689 int rc2 = MMHyperDupMem(pVM, pvFree, 1690 sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32, 1691 MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3); 1692 RTMemFree(pvFree); 1693 AssertLogRelRCReturn(rc1, rc1); 1694 AssertLogRelRCReturn(rc2, rc2); 1695 1696 pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3); 1697 pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3); 1698 cpumR3MsrRegStats(pVM); 1699 1700 /* 1701 * Some more configuration that we're applying at the end of everything 1702 * via the CPUMSetGuestCpuIdFeature API. 1703 */ 1704 1705 /* Check if PAE was explicitely enabled by the user. */ 1450 1706 bool fEnable; 1451 1707 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc); … … 1453 1709 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE); 1454 1710 1455 /* 1456 * We don't normally enable NX for raw-mode, so give the user a chance to 1457 * force it on. 1458 */ 1711 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */ 1459 1712 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc); 1460 1713 if (fEnable) 1461 1714 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1462 1715 1463 /* 1464 * We don't enable the Hypervisor Present bit by default, but it may 1465 * be needed by some guests. 1466 */ 1716 /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */ 1467 1717 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc); 1468 1718 if (fEnable) … … 1488 1738 { 1489 1739 LogFlow(("CPUMR3Relocate\n")); 1740 1741 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3); 1742 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 1490 1743 1491 1744 /* Recheck the guest DRx values in raw-mode. */ … … 1552 1805 * Used by CPUMR3Reset and CPU hot plugging. 1553 1806 * 1554 * @param pVCpu Pointer to the VMCPU. 1555 */ 1556 VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu) 1807 * @param pVM Pointer to the cross context VM structure. 1808 * @param pVCpu Pointer to the cross context virtual CPU structure of 1809 * the CPU that is being reset. This may differ from the 1810 * current EMT. 1811 */ 1812 VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu) 1557 1813 { 1558 1814 /** @todo anything different for VCPU > 0? */ … … 1635 1891 supports all bits, since a zero value here should be read as 0xffbf. */ 1636 1892 1893 /* 1894 * MSRs. 1895 */ 1637 1896 /* Init PAT MSR */ 1638 1897 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */ … … 1642 1901 Assert(!pCtx->msrEFER); 1643 1902 1903 /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really 1904 is supposed to be here, just trying provide useful/sensible values. */ 1905 PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE); 1906 if (pRange) 1907 { 1908 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL 1909 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL 1910 | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0) 1911 | MSR_IA32_MISC_ENABLE_FAST_STRINGS; 1912 pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL 1913 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; 1914 pRange->fWrGpMask &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable; 1915 } 1916 1917 /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */ 1918 1644 1919 /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be 1645 1920 * called from each EMT while we're getting called by CPUMR3Reset() 1646 1921 * iteratively on the same thread. Fix later. */ 1647 #if 0 1922 #if 0 /** @todo r=bird: This we will do in TM, not here. */ 1648 1923 /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */ 1649 1924 CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0); … … 1673 1948 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1674 1949 { 1675 CPUMR3ResetCpu( &pVM->aCpus[i]);1950 CPUMR3ResetCpu(pVM, &pVM->aCpus[i]); 1676 1951 1677 1952 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 1725 2000 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt)); 1726 2001 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt)); 2002 } 2003 2004 2005 static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves) 2006 { 2007 uint32_t cCpuIds; 2008 int rc = SSMR3GetU32(pSSM, &cCpuIds); 2009 if (RT_SUCCESS(rc)) 2010 { 2011 if (cCpuIds < 64) 2012 { 2013 for (uint32_t i = 0; i < cCpuIds; i++) 2014 { 2015 CPUMCPUID CpuId; 2016 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId)); 2017 if (RT_FAILURE(rc)) 2018 break; 2019 2020 CPUMCPUIDLEAF NewLeaf; 2021 NewLeaf.uLeaf = uBase + i; 2022 NewLeaf.uSubLeaf = 0; 2023 NewLeaf.fSubLeafMask = 0; 2024 NewLeaf.uEax = CpuId.eax; 2025 NewLeaf.uEbx = CpuId.ebx; 2026 NewLeaf.uEcx = CpuId.ecx; 2027 NewLeaf.uEdx = CpuId.edx; 2028 NewLeaf.fFlags = 0; 2029 rc = cpumR3CpuIdInsert(ppaLeaves, pcLeaves, &NewLeaf); 2030 } 2031 } 2032 else 2033 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 2034 } 2035 if (RT_FAILURE(rc)) 2036 { 2037 RTMemFree(*ppaLeaves); 2038 *ppaLeaves = NULL; 2039 *pcLeaves = 0; 2040 } 2041 return rc; 2042 } 2043 2044 2045 static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves) 2046 { 2047 *ppaLeaves = NULL; 2048 *pcLeaves = 0; 2049 2050 int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves); 2051 if (RT_SUCCESS(rc)) 2052 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves); 2053 if (RT_SUCCESS(rc)) 2054 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves); 2055 2056 return rc; 1727 2057 } 1728 2058 … … 1809 2139 && !(aHostRaw##set [1].reg & bit) \ 1810 2140 && !(aHostOverride##set [1].reg & bit) \ 1811 && !(aGuestOverride##set [1].reg & bit) \1812 2141 ) \ 1813 2142 { \ … … 1823 2152 && !(aHostRaw##set [1].reg & bit) \ 1824 2153 && !(aHostOverride##set [1].reg & bit) \ 1825 && !(aGuestOverride##set [1].reg & bit) \1826 2154 ) \ 1827 2155 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ … … 1832 2160 && !(aHostRaw##set [1].reg & bit) \ 1833 2161 && !(aHostOverride##set [1].reg & bit) \ 1834 && !(aGuestOverride##set [1].reg & bit) \1835 2162 ) \ 1836 2163 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ … … 1845 2172 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 1846 2173 && !(aHostOverride##set [1].reg & bit) \ 1847 && !(aGuestOverride##set [1].reg & bit) \1848 2174 ) \ 1849 2175 { \ … … 1860 2186 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 1861 2187 && !(aHostOverride##set [1].reg & bit) \ 1862 && !(aGuestOverride##set [1].reg & bit) \1863 2188 ) \ 1864 2189 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ … … 1870 2195 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 1871 2196 && !(aHostOverride##set [1].reg & bit) \ 1872 && !(aGuestOverride##set [1].reg & bit) \1873 2197 ) \ 1874 2198 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ … … 1885 2209 : aHostRawStd[1].reg & (StdBit)) \ 1886 2210 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 1887 && !(aGuestOverrideExt[1].reg & (ExtBit)) \1888 2211 ) \ 1889 2212 { \ … … 1901 2224 : aHostRawStd[1].reg & (StdBit)) \ 1902 2225 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 1903 && !(aGuestOverrideExt[1].reg & (ExtBit)) \1904 2226 ) \ 1905 2227 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \ … … 1912 2234 : aHostRawStd[1].reg & (StdBit)) \ 1913 2235 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 1914 && !(aGuestOverrideExt[1].reg & (ExtBit)) \1915 2236 ) \ 1916 2237 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ … … 1921 2242 * Load them into stack buffers first. 1922 2243 */ 1923 CPUMCPUID aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)]; 1924 uint32_t cGuestCpuIdStd; 1925 int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc); 1926 if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd)) 1927 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1928 SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0])); 1929 1930 CPUMCPUID aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)]; 1931 uint32_t cGuestCpuIdExt; 1932 rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc); 1933 if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt)) 1934 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1935 SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0])); 1936 1937 CPUMCPUID aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)]; 1938 uint32_t cGuestCpuIdCentaur; 1939 rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc); 1940 if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur)) 1941 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1942 SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0])); 2244 PCPUMCPUIDLEAF paLeaves; 2245 uint32_t cLeaves; 2246 int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves); 2247 AssertRCReturn(rc, rc); 2248 2249 /** @todo we'll be leaking paLeaves on error return... */ 1943 2250 1944 2251 CPUMCPUID GuestCpuIdDef; … … 1951 2258 if (cRawStd > RT_ELEMENTS(aRawStd)) 1952 2259 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1953 SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0])); 2260 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0])); 2261 AssertRCReturn(rc, rc); 2262 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++) 2263 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx); 1954 2264 1955 2265 CPUMCPUID aRawExt[32]; … … 1960 2270 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0])); 1961 2271 AssertRCReturn(rc, rc); 1962 1963 /*1964 * Note that we support restoring less than the current amount of standard1965 * leaves because we've been allowed more is newer version of VBox.1966 *1967 * So, pad new entries with the default.1968 */1969 for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)1970 aGuestCpuIdStd[i] = GuestCpuIdDef;1971 1972 for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)1973 aGuestCpuIdExt[i] = GuestCpuIdDef;1974 1975 for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)1976 aGuestCpuIdCentaur[i] = GuestCpuIdDef;1977 1978 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)1979 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);1980 1981 2272 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++) 1982 2273 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx); … … 1999 2290 * Note! We currently only need the feature leaves, so skip rest. 2000 2291 */ 2001 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID"); 2002 CPUMCPUID aGuestOverrideStd[2]; 2003 memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd)); 2004 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg); 2005 2006 CPUMCPUID aGuestOverrideExt[2]; 2007 memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt)); 2008 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg); 2009 2010 pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID"); 2292 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID"); 2011 2293 CPUMCPUID aHostOverrideStd[2]; 2012 2294 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd)); … … 2259 2541 * "EMU?" - Can this be emulated? 2260 2542 */ 2543 CPUMCPUID aGuestCpuIdStd[2]; 2544 RT_ZERO(aGuestCpuIdStd); 2545 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]); 2546 2261 2547 /* CPUID(1).ecx */ 2262 2548 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU … … 2328 2614 2329 2615 /* CPUID(0x80000000). */ 2330 if ( aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001) 2331 && aGuestCpuIdExt[0].eax < UINT32_C(0x8000007f)) 2616 CPUMCPUID aGuestCpuIdExt[2]; 2617 RT_ZERO(aGuestCpuIdExt); 2618 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1])) 2332 2619 { 2333 2620 /** @todo deal with no 0x80000001 on the host. */ … … 2407 2694 * We're good, commit the CPU ID leaves. 2408 2695 */ 2409 memcpy(&pVM->cpum.s.aGuestCpuIdStd[0], &aGuestCpuIdStd[0], sizeof(aGuestCpuIdStd)); 2410 memcpy(&pVM->cpum.s.aGuestCpuIdExt[0], &aGuestCpuIdExt[0], sizeof(aGuestCpuIdExt)); 2411 memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur)); 2412 pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef; 2696 MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 2697 pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR; 2698 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR; 2699 pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef; 2700 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves); 2701 RTMemFree(paLeaves); 2702 AssertLogRelRCReturn(rc, rc); 2703 2413 2704 2414 2705 #undef CPUID_CHECK_RET -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r49072 r49893 2398 2398 PGMR3ResetCpu(pVM, pVCpu); 2399 2399 TRPMR3ResetCpu(pVCpu); 2400 CPUMR3ResetCpu(pV Cpu);2400 CPUMR3ResetCpu(pVM, pVCpu); 2401 2401 EMR3ResetCpu(pVCpu); 2402 2402 HMR3ResetCpu(pVCpu); -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r48629 r49893 2168 2168 } 2169 2169 2170 /** @todo query from CPUM. */ 2170 2171 pVM->pgm.s.GCPhysInvAddrMask = 0; 2171 2172 for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++) -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r48528 r49893 4380 4380 PDMR3ResetCpu(pVCpu); 4381 4381 TRPMR3ResetCpu(pVCpu); 4382 CPUMR3ResetCpu(pV Cpu);4382 CPUMR3ResetCpu(pVM, pVCpu); 4383 4383 EMR3ResetCpu(pVCpu); 4384 4384 HMR3ResetCpu(pVCpu); -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r49147 r49893 1420 1420 1421 1421 PGMR3ResetCpu(pVM, pVCpu); 1422 CPUMR3ResetCpu(pV Cpu);1422 CPUMR3ResetCpu(pVM, pVCpu); 1423 1423 1424 1424 return VINF_EM_WAIT_SIPI; -
trunk/src/VBox/VMM/VMMR3/VMMTests.cpp
r49383 r49893 872 872 * Do the experiments. 873 873 */ 874 uint32_t uMsr = 0x c0011011;875 uint64_t uValue = 0x10000;874 uint32_t uMsr = 0x00000277; 875 uint64_t uValue = UINT64_C(0x0007010600070106); 876 876 #if 0 877 uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13)); 878 uValue |= RT_BIT_64(13); 877 879 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 878 880 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 879 881 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n", 880 882 uMsr, pauValues[0], uValue, pauValues[1], rc); 881 #endif 883 #elif 1 884 const uint64_t uOrgValue = uValue; 885 uint32_t cChanges = 0; 886 for (int iBit = 63; iBit >= 58; iBit--) 887 { 888 uValue = uOrgValue & ~RT_BIT_64(iBit); 889 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 890 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 891 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n", 892 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit, 893 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged"); 894 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]); 895 896 uValue = uOrgValue | RT_BIT_64(iBit); 897 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 898 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 899 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset bit=%u -> %s\n", 900 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit, 901 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged"); 902 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]); 903 } 904 RTPrintf("%u change(s)\n", cChanges); 905 #else 906 uint64_t fWriteable = 0; 882 907 for (uint32_t i = 0; i <= 63; i++) 883 908 { 884 909 uValue = RT_BIT_64(i); 910 # if 0 911 if (uValue & (0x7)) 912 continue; 913 # endif 885 914 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 886 915 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 887 916 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n", 888 917 uMsr, pauValues[0], uValue, pauValues[1], rc); 918 if (RT_SUCCESS(rc)) 919 fWriteable |= RT_BIT_64(i); 889 920 } 890 921 … … 900 931 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n", 901 932 uMsr, pauValues[0], uValue, pauValues[1], rc); 933 934 uValue = fWriteable; 935 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 936 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 937 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n", 938 uMsr, pauValues[0], uValue, pauValues[1], rc); 939 940 #endif 902 941 903 942 /*
Note:
See TracChangeset
for help on using the changeset viewer.