- Timestamp:
- Dec 13, 2013 12:40:20 AM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 added
- 18 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r49282 r49893 77 77 endif 78 78 79 ifdef VBOX_WITH_NEW_MSR_CODE 80 VMM_COMMON_DEFS += VBOX_WITH_NEW_MSR_CODE 81 endif 82 79 83 80 84 # … … 118 122 VMMR3/CFGM.cpp \ 119 123 VMMR3/CPUM.cpp \ 124 VMMR3/CPUMR3CpuId.cpp \ 125 VMMR3/CPUMR3Db.cpp \ 120 126 VMMR3/CPUMDbg.cpp \ 121 127 VMMR3/DBGF.cpp \ … … 187 193 ,) \ 188 194 VMMAll/CPUMAllRegs.cpp \ 195 VMMAll/CPUMAllMsrs.cpp \ 189 196 VMMAll/CPUMStack.cpp \ 190 197 VMMAll/DBGFAll.cpp \ … … 438 445 VMMRZ/VMMRZ.cpp \ 439 446 VMMAll/CPUMAllRegs.cpp \ 447 VMMAll/CPUMAllMsrs.cpp \ 440 448 VMMAll/DBGFAll.cpp \ 441 449 VMMAll/IEMAll.cpp \ … … 540 548 VMMRZ/VMMRZ.cpp \ 541 549 VMMAll/CPUMAllRegs.cpp \ 550 VMMAll/CPUMAllMsrs.cpp \ 542 551 VMMAll/CPUMStack.cpp \ 543 552 VMMAll/DBGFAll.cpp \ … … 610 619 LIBRARIES += SSMStandalone 611 620 SSMStandalone_TEMPLATE = VBOXR3EXE 612 SSMStandalone_DEFS = IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE 621 SSMStandalone_DEFS = IN_VMM_R3 IN_VMM_STATIC SSM_STANDALONE CPUM_DB_STANDALONE 613 622 SSMStandalone_INCS = include 614 SSMStandalone_SOURCES = VMMR3/SSM.cpp 623 SSMStandalone_SOURCES = \ 624 VMMR3/SSM.cpp \ 625 VMMR3/CPUMR3Db.cpp 615 626 endif # !VBOX_ONLY_EXTPACKS 616 627 … … 704 715 endif # bird wants good stacks 705 716 717 718 # Alias the CPU database entries. 719 $(foreach base,$(notdir $(basename $(wildcard $(PATH_SUB_CURRENT)/VMMR3/cpus/*.h))), $(eval $(base).o $(base).obj: CPUMR3Db.o)) 720 721 706 722 include $(FILE_KBUILD_SUB_FOOTER) 707 723 … … 732 748 LegacyandAMD64.o LegacyandAMD64.obj: 32BitToAMD64.o PAEToAMD64.o 733 749 AMD64andLegacy.o AMD64andLegacy.obj: AMD64To32Bit.o AMD64ToPAE.o 750 -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r49849 r49893 870 870 } 871 871 872 #ifndef VBOX_WITH_NEW_MSR_CODE 872 873 873 874 /** … … 1585 1586 } 1586 1587 1588 #endif /* !VBOX_WITH_NEW_MSR_CODE */ 1589 1587 1590 1588 1591 VMMDECL(RTGCPTR) CPUMGetGuestIDTR(PVMCPU pVCpu, uint16_t *pcbLimit) … … 1849 1852 { 1850 1853 return pVCpu->cpum.s.Guest.msrEFER; 1854 } 1855 1856 1857 /** 1858 * Looks up a CPUID leaf in the CPUID leaf array. 1859 * 1860 * @returns Pointer to the leaf if found, NULL if not. 1861 * 1862 * @param pVM Pointer to the cross context VM structure. 1863 * @param uLeaf The leaf to get. 1864 * @param uSubLeaf The subleaf, if applicable. Just pass 0 if it 1865 * isn't. 1866 */ 1867 PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf) 1868 { 1869 unsigned iEnd = pVM->cpum.s.GuestInfo.cCpuIdLeaves; 1870 if (iEnd) 1871 { 1872 unsigned iStart = 0; 1873 PCPUMCPUIDLEAF paLeaves = pVM->cpum.s.GuestInfo.CTX_SUFF(paCpuIdLeaves); 1874 for (;;) 1875 { 1876 unsigned i = iStart + (iEnd - iStart) / 2U; 1877 if (uLeaf < paLeaves[i].uLeaf) 1878 { 1879 if (i <= iStart) 1880 return NULL; 1881 iEnd = i; 1882 } 1883 else if (uLeaf > paLeaves[i].uLeaf) 1884 { 1885 i += 1; 1886 if (i >= iEnd) 1887 return NULL; 1888 iStart = i; 1889 } 1890 else 1891 { 1892 uSubLeaf &= paLeaves[i].fSubLeafMask; 1893 if (uSubLeaf != paLeaves[i].uSubLeaf) 1894 { 1895 /* Find the right subleaf. We return the last one before 1896 uSubLeaf if we don't find an exact match. */ 1897 if (uSubLeaf < paLeaves[i].uSubLeaf) 1898 while ( i > 0 1899 && uLeaf == paLeaves[i].uLeaf 1900 && uSubLeaf < paLeaves[i].uSubLeaf) 1901 i--; 1902 else 1903 while ( i + 1 < pVM->cpum.s.GuestInfo.cCpuIdLeaves 1904 && uLeaf == paLeaves[i + 1].uLeaf 1905 && uSubLeaf >= paLeaves[i + 1].uSubLeaf) 1906 i++; 1907 } 1908 return &paLeaves[i]; 1909 } 1910 } 1911 } 1912 1913 return NULL; 1851 1914 } 1852 1915 … … 1895 1958 if ( iLeaf == 4 1896 1959 && cCurrentCacheIndex < 3 1897 && pVM->cpum.s. enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)1960 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 1898 1961 { 1899 1962 uint32_t type, level, sharing, linesize, … … 1997 2060 VMMDECL(void) CPUMSetGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) 1998 2061 { 2062 PCPUMCPUIDLEAF pLeaf; 2063 1999 2064 switch (enmFeature) 2000 2065 { … … 2003 2068 */ 2004 2069 case CPUMCPUIDFEATURE_APIC: 2005 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2006 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC; 2007 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 2008 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 2009 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC; 2070 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2071 if (pLeaf) 2072 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC; 2073 2074 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2075 if ( pLeaf 2076 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2077 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC; 2078 2079 pVM->cpum.s.GuestFeatures.fApic = 1; 2010 2080 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled APIC\n")); 2011 2081 break; … … 2015 2085 */ 2016 2086 case CPUMCPUIDFEATURE_X2APIC: 2017 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2018 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_X2APIC; 2087 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2088 if (pLeaf) 2089 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC; 2090 pVM->cpum.s.GuestFeatures.fX2Apic = 1; 2019 2091 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n")); 2020 2092 break; … … 2025 2097 */ 2026 2098 case CPUMCPUIDFEATURE_SEP: 2027 { 2028 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SEP)) 2099 if (!pVM->cpum.s.HostFeatures.fSysEnter) 2029 2100 { 2030 2101 AssertMsgFailed(("ERROR: Can't turn on SEP when the host doesn't support it!!\n")); … … 2032 2103 } 2033 2104 2034 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2035 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP; 2105 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2106 if (pLeaf) 2107 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP; 2108 pVM->cpum.s.GuestFeatures.fSysEnter = 1; 2036 2109 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n")); 2037 2110 break; 2038 }2039 2111 2040 2112 /* … … 2043 2115 */ 2044 2116 case CPUMCPUIDFEATURE_SYSCALL: 2045 {2046 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x800000012047 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL))2117 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2118 if ( !pLeaf 2119 || !pVM->cpum.s.HostFeatures.fSysCall) 2048 2120 { 2049 2121 #if HC_ARCH_BITS == 32 2050 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32 bits mode. 2051 * Even when the cpu is capable of doing so in 64 bits mode. 2052 */ 2053 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 2054 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 2055 || !(ASMCpuId_EDX(1) & X86_CPUID_EXT_FEATURE_EDX_SYSCALL)) 2122 /* X86_CPUID_EXT_FEATURE_EDX_SYSCALL not set it seems in 32-bit 2123 mode by Intel, even when the cpu is capable of doing so in 2124 64-bit mode. Long mode requires syscall support. */ 2125 if (!pVM->cpum.s.HostFeatures.fLongMode) 2056 2126 #endif 2057 2127 { … … 2060 2130 } 2061 2131 } 2132 2062 2133 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */ 2063 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL; 2134 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL; 2135 pVM->cpum.s.GuestFeatures.fSysCall = 1; 2064 2136 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n")); 2065 2137 break; 2066 }2067 2138 2068 2139 /* … … 2071 2142 */ 2072 2143 case CPUMCPUIDFEATURE_PAE: 2073 { 2074 if (!(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_PAE)) 2144 if (!pVM->cpum.s.HostFeatures.fPae) 2075 2145 { 2076 2146 LogRel(("CPUM: WARNING! Can't turn on PAE when the host doesn't support it!\n")); … … 2078 2148 } 2079 2149 2080 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2081 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE; 2082 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 2083 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 2084 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE; 2150 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2151 if (pLeaf) 2152 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE; 2153 2154 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2155 if ( pLeaf 2156 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2157 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE; 2158 2159 pVM->cpum.s.GuestFeatures.fPae = 1; 2085 2160 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAE\n")); 2086 2161 break; 2087 }2088 2162 2089 2163 /* … … 2092 2166 */ 2093 2167 case CPUMCPUIDFEATURE_LONG_MODE: 2094 {2095 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x800000012096 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))2168 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2169 if ( !pLeaf 2170 || !pVM->cpum.s.HostFeatures.fLongMode) 2097 2171 { 2098 2172 LogRel(("CPUM: WARNING! Can't turn on LONG MODE when the host doesn't support it!\n")); … … 2101 2175 2102 2176 /* Valid for both Intel and AMD. */ 2103 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 2177 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 2178 pVM->cpum.s.GuestFeatures.fLongMode = 1; 2104 2179 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n")); 2105 2180 break; 2106 }2107 2181 2108 2182 /* … … 2111 2185 */ 2112 2186 case CPUMCPUIDFEATURE_NX: 2113 {2114 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x800000012115 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_NX))2187 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2188 if ( !pLeaf 2189 || !pVM->cpum.s.HostFeatures.fNoExecute) 2116 2190 { 2117 2191 LogRel(("CPUM: WARNING! Can't turn on NX/XD when the host doesn't support it!\n")); … … 2120 2194 2121 2195 /* Valid for both Intel and AMD. */ 2122 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_NX; 2196 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX; 2197 pVM->cpum.s.GuestFeatures.fNoExecute = 1; 2123 2198 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n")); 2124 2199 break; 2125 } 2200 2126 2201 2127 2202 /* … … 2130 2205 */ 2131 2206 case CPUMCPUIDFEATURE_LAHF: 2132 {2133 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x800000012134 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF))2207 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2208 if ( !pLeaf 2209 || !pVM->cpum.s.HostFeatures.fLahfSahf) 2135 2210 { 2136 2211 LogRel(("CPUM: WARNING! Can't turn on LAHF/SAHF when the host doesn't support it!\n")); … … 2139 2214 2140 2215 /* Valid for both Intel and AMD. */ 2141 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 2216 pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 2217 pVM->cpum.s.GuestFeatures.fLahfSahf = 1; 2142 2218 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n")); 2143 2219 break; 2144 } 2145 2220 2221 /* 2222 * Set the page attribute table bit. This is alternative page level 2223 * cache control that doesn't much matter when everything is 2224 * virtualized, though it may when passing thru device memory. 2225 */ 2146 2226 case CPUMCPUIDFEATURE_PAT: 2147 { 2148 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2149 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAT; 2150 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 2151 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 2152 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAT; 2227 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2228 if (pLeaf) 2229 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT; 2230 2231 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2232 if ( pLeaf 2233 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2234 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT; 2235 2236 pVM->cpum.s.GuestFeatures.fPat = 1; 2153 2237 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled PAT\n")); 2154 2238 break; 2155 }2156 2239 2157 2240 /* … … 2160 2243 */ 2161 2244 case CPUMCPUIDFEATURE_RDTSCP: 2162 {2163 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x800000012164 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)2165 || 2245 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2246 if ( !pLeaf 2247 || !pVM->cpum.s.HostFeatures.fRdTscP 2248 || pVM->cpum.s.u8PortableCpuIdLevel > 0) 2166 2249 { 2167 2250 if (!pVM->cpum.s.u8PortableCpuIdLevel) … … 2171 2254 2172 2255 /* Valid for both Intel and AMD. */ 2173 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 2256 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 2257 pVM->cpum.s.HostFeatures.fRdTscP = 1; 2174 2258 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n")); 2175 2259 break; 2176 }2177 2260 2178 2261 /* … … 2180 2263 */ 2181 2264 case CPUMCPUIDFEATURE_HVP: 2182 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2265 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2266 if (pLeaf) 2183 2267 pVM->cpum.s.aGuestCpuIdStd[1].ecx |= X86_CPUID_FEATURE_ECX_HVP; 2268 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1; 2184 2269 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n")); 2185 2270 break; … … 2189 2274 break; 2190 2275 } 2276 2191 2277 for (VMCPUID i = 0; i < pVM->cCpus; i++) 2192 2278 { … … 2208 2294 switch (enmFeature) 2209 2295 { 2296 case CPUMCPUIDFEATURE_APIC: return pVM->cpum.s.GuestFeatures.fApic; 2297 case CPUMCPUIDFEATURE_X2APIC: return pVM->cpum.s.GuestFeatures.fX2Apic; 2298 case CPUMCPUIDFEATURE_SYSCALL: return pVM->cpum.s.GuestFeatures.fSysCall; 2299 case CPUMCPUIDFEATURE_SEP: return pVM->cpum.s.GuestFeatures.fSysEnter; 2300 case CPUMCPUIDFEATURE_PAE: return pVM->cpum.s.GuestFeatures.fPae; 2301 case CPUMCPUIDFEATURE_NX: return pVM->cpum.s.GuestFeatures.fNoExecute; 2302 case CPUMCPUIDFEATURE_LAHF: return pVM->cpum.s.GuestFeatures.fLahfSahf; 2303 case CPUMCPUIDFEATURE_LONG_MODE: return pVM->cpum.s.GuestFeatures.fLongMode; 2304 case CPUMCPUIDFEATURE_PAT: return pVM->cpum.s.GuestFeatures.fPat; 2305 case CPUMCPUIDFEATURE_RDTSCP: return pVM->cpum.s.GuestFeatures.fRdTscP; 2306 case CPUMCPUIDFEATURE_HVP: return pVM->cpum.s.GuestFeatures.fHypervisorPresent; 2307 2308 case CPUMCPUIDFEATURE_INVALID: 2309 case CPUMCPUIDFEATURE_32BIT_HACK: 2310 break; 2311 } 2312 AssertFailed(); 2313 return false; 2314 } 2315 2316 2317 /** 2318 * Clears a CPUID feature bit. 2319 * 2320 * @param pVM Pointer to the VM. 2321 * @param enmFeature The feature to clear. 2322 */ 2323 VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) 2324 { 2325 PCPUMCPUIDLEAF pLeaf; 2326 switch (enmFeature) 2327 { 2328 case CPUMCPUIDFEATURE_APIC: 2329 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2330 if (pLeaf) 2331 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC; 2332 2333 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2334 if ( pLeaf 2335 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2336 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC; 2337 2338 pVM->cpum.s.GuestFeatures.fApic = 0; 2339 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n")); 2340 break; 2341 2342 case CPUMCPUIDFEATURE_X2APIC: 2343 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2344 if (pLeaf) 2345 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC; 2346 pVM->cpum.s.GuestFeatures.fX2Apic = 0; 2347 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n")); 2348 break; 2349 2210 2350 case CPUMCPUIDFEATURE_PAE: 2211 { 2212 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2213 return !!(pVM->cpum.s.aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PAE); 2214 break; 2215 } 2216 2217 case CPUMCPUIDFEATURE_NX: 2218 { 2219 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 2220 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_NX); 2221 } 2222 2223 case CPUMCPUIDFEATURE_SYSCALL: 2224 { 2225 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 2226 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_SYSCALL); 2227 } 2351 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2352 if (pLeaf) 2353 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE; 2354 2355 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2356 if ( pLeaf 2357 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2358 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE; 2359 2360 pVM->cpum.s.GuestFeatures.fPae = 0; 2361 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n")); 2362 break; 2363 2364 case CPUMCPUIDFEATURE_PAT: 2365 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2366 if (pLeaf) 2367 pVM->cpum.s.aGuestCpuIdStd[1].edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT; 2368 2369 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2370 if ( pLeaf 2371 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 2372 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT; 2373 2374 pVM->cpum.s.GuestFeatures.fPat = 0; 2375 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n")); 2376 break; 2377 2378 case CPUMCPUIDFEATURE_LONG_MODE: 2379 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2380 if (pLeaf) 2381 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 2382 pVM->cpum.s.GuestFeatures.fLongMode = 0; 2383 break; 2384 2385 case CPUMCPUIDFEATURE_LAHF: 2386 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2387 if (pLeaf) 2388 pVM->cpum.s.aGuestCpuIdExt[1].ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 2389 pVM->cpum.s.GuestFeatures.fLahfSahf = 0; 2390 break; 2228 2391 2229 2392 case CPUMCPUIDFEATURE_RDTSCP: 2230 { 2231 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 2232 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 2233 break; 2234 } 2235 2236 case CPUMCPUIDFEATURE_LONG_MODE: 2237 { 2238 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 2239 return !!(pVM->cpum.s.aGuestCpuIdExt[1].edx & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 2240 break; 2241 } 2393 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 2394 if (pLeaf) 2395 pVM->cpum.s.aGuestCpuIdExt[1].edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 2396 pVM->cpum.s.GuestFeatures.fRdTscP = 0; 2397 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n")); 2398 break; 2399 2400 case CPUMCPUIDFEATURE_HVP: 2401 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 2402 if (pLeaf) 2403 pVM->cpum.s.aGuestCpuIdStd[1].ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP; 2404 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0; 2405 break; 2242 2406 2243 2407 default: … … 2245 2409 break; 2246 2410 } 2247 return false; 2248 } 2249 2250 2251 /** 2252 * Clears a CPUID feature bit. 2253 * 2254 * @param pVM Pointer to the VM. 2255 * @param enmFeature The feature to clear. 2256 */ 2257 VMMDECL(void) CPUMClearGuestCpuIdFeature(PVM pVM, CPUMCPUIDFEATURE enmFeature) 2258 { 2259 switch (enmFeature) 2260 { 2261 /* 2262 * Set the APIC bit in both feature masks. 2263 */ 2264 case CPUMCPUIDFEATURE_APIC: 2265 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2266 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC; 2267 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 2268 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 2269 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC; 2270 Log(("CPUM: ClearGuestCpuIdFeature: Disabled APIC\n")); 2271 break; 2272 2273 /* 2274 * Clear the x2APIC bit in the standard feature mask. 2275 */ 2276 case CPUMCPUIDFEATURE_X2APIC: 2277 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2278 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_X2APIC; 2279 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n")); 2280 break; 2281 2282 case CPUMCPUIDFEATURE_PAE: 2283 { 2284 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2285 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE; 2286 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 2287 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 2288 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE; 2289 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAE!\n")); 2290 break; 2291 } 2292 2293 case CPUMCPUIDFEATURE_PAT: 2294 { 2295 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2296 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAT; 2297 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 2298 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 2299 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT; 2300 Log(("CPUM: ClearGuestCpuIdFeature: Disabled PAT!\n")); 2301 break; 2302 } 2303 2304 case CPUMCPUIDFEATURE_LONG_MODE: 2305 { 2306 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 2307 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 2308 break; 2309 } 2310 2311 case CPUMCPUIDFEATURE_LAHF: 2312 { 2313 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 2314 pVM->cpum.s.aGuestCpuIdExt[1].ecx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 2315 break; 2316 } 2317 2318 case CPUMCPUIDFEATURE_RDTSCP: 2319 { 2320 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 2321 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 2322 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n")); 2323 break; 2324 } 2325 2326 case CPUMCPUIDFEATURE_HVP: 2327 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 2328 pVM->cpum.s.aGuestCpuIdStd[1].ecx &= ~X86_CPUID_FEATURE_ECX_HVP; 2329 break; 2330 2331 default: 2332 AssertMsgFailed(("enmFeature=%d\n", enmFeature)); 2333 break; 2334 } 2411 2335 2412 for (VMCPUID i = 0; i < pVM->cCpus; i++) 2336 2413 { … … 2349 2426 VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM) 2350 2427 { 2351 return pVM->cpum.s.enmHostCpuVendor;2428 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.enmCpuVendor; 2352 2429 } 2353 2430 … … 2361 2438 VMMDECL(CPUMCPUVENDOR) CPUMGetGuestCpuVendor(PVM pVM) 2362 2439 { 2363 return pVM->cpum.s.enmGuestCpuVendor;2440 return (CPUMCPUVENDOR)pVM->cpum.s.GuestFeatures.enmCpuVendor; 2364 2441 } 2365 2442 -
trunk/src/VBox/VMM/VMMAll/MMAll.cpp
r45640 r49893 568 568 TAG2STR(CFGM_USER); 569 569 570 TAG2STR(CPUM_CTX); 571 TAG2STR(CPUM_CPUID); 572 TAG2STR(CPUM_MSRS); 573 570 574 TAG2STR(CSAM); 571 575 TAG2STR(CSAM_PATCH); -
trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp
r44528 r49893 322 322 } 323 323 324 324 325 /** 325 326 * Wrapper for mmHyperAllocInternal … … 327 328 VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv) 328 329 { 329 int rc; 330 331 rc = mmHyperLock(pVM); 330 int rc = mmHyperLock(pVM); 332 331 AssertRCReturn(rc, rc); 333 332 … … 339 338 return rc; 340 339 } 340 341 342 /** 343 * Duplicates a block of memory. 344 */ 345 VMMDECL(int) MMHyperDupMem(PVM pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv) 346 { 347 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv); 348 if (RT_SUCCESS(rc)) 349 memcpy(*ppv, pvSrc, cb); 350 return rc; 351 } 352 341 353 342 354 /** -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r49623 r49893 76 76 #endif 77 77 78 /** 79 * CPUID bits to unify among all cores. 80 */ 81 static struct 82 { 83 uint32_t uLeaf; /**< Leaf to check. */ 84 uint32_t ecx; /**< which bits in ecx to unify between CPUs. */ 85 uint32_t edx; /**< which bits in edx to unify between CPUs. */ 86 } 87 const g_aCpuidUnifyBits[] = 88 { 89 { 90 0x00000001, 91 X86_CPUID_FEATURE_ECX_CX16 | X86_CPUID_FEATURE_ECX_MONITOR, 92 X86_CPUID_FEATURE_EDX_CX8 93 } 94 }; 95 96 78 97 79 98 /******************************************************************************* … … 114 133 115 134 /** 135 * 136 * 116 137 * Check the CPUID features of this particular CPU and disable relevant features 117 138 * for the guest which do not exist on this CPU. We have seen systems where the … … 127 148 static DECLCALLBACK(void) cpumR0CheckCpuid(RTCPUID idCpu, void *pvUser1, void *pvUser2) 128 149 { 150 PVM pVM = (PVM)pvUser1; 151 PCPUM pCPUM = &pVM->cpum.s; 152 129 153 NOREF(idCpu); NOREF(pvUser2); 130 131 struct 132 { 133 uint32_t uLeave; /* leave to check */ 134 uint32_t ecx; /* which bits in ecx to unify between CPUs */ 135 uint32_t edx; /* which bits in edx to unify between CPUs */ 136 } aCpuidUnify[] 137 = 138 { 139 { 0x00000001, X86_CPUID_FEATURE_ECX_CX16 140 | X86_CPUID_FEATURE_ECX_MONITOR, 141 X86_CPUID_FEATURE_EDX_CX8 } 142 }; 143 PVM pVM = (PVM)pvUser1; 144 PCPUM pCPUM = &pVM->cpum.s; 145 for (uint32_t i = 0; i < RT_ELEMENTS(aCpuidUnify); i++) 146 { 147 uint32_t uLeave = aCpuidUnify[i].uLeave; 154 for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++) 155 { 156 /* Note! Cannot use cpumCpuIdGetLeaf from here because we're not 157 necessarily in the VM process context. So, we using the 158 legacy arrays as temporary storage. */ 159 160 uint32_t uLeaf = g_aCpuidUnifyBits[i].uLeaf; 161 PCPUMCPUID pLegacyLeaf; 162 if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)) 163 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf]; 164 else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)) 165 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)]; 166 else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)) 167 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)]; 168 else 169 continue; 170 148 171 uint32_t eax, ebx, ecx, edx; 149 150 ASMCpuId_Idx_ECX(uLeave, 0, &eax, &ebx, &ecx, &edx); 151 PCPUMCPUID paLeaves; 152 if (uLeave < 0x80000000) 153 paLeaves = &pCPUM->aGuestCpuIdStd[uLeave - 0x00000000]; 154 else if (uLeave < 0xc0000000) 155 paLeaves = &pCPUM->aGuestCpuIdExt[uLeave - 0x80000000]; 156 else 157 paLeaves = &pCPUM->aGuestCpuIdCentaur[uLeave - 0xc0000000]; 158 /* unify important bits */ 159 ASMAtomicAndU32(&paLeaves->ecx, ecx | ~aCpuidUnify[i].ecx); 160 ASMAtomicAndU32(&paLeaves->edx, edx | ~aCpuidUnify[i].edx); 172 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx); 173 174 ASMAtomicAndU32(&pLegacyLeaf->ecx, ecx | ~g_aCpuidUnifyBits[i].ecx); 175 ASMAtomicAndU32(&pLegacyLeaf->edx, edx | ~g_aCpuidUnifyBits[i].edx); 161 176 } 162 177 } … … 260 275 } 261 276 277 /* 278 * Unify/cross check some CPUID feature bits on all available CPU cores 279 * and threads. We've seen CPUs where the monitor support differed. 280 * 281 * Because the hyper heap isn't always mapped into ring-0, we cannot 282 * access it from a RTMpOnAll callback. We use the legacy CPUID arrays 283 * as temp ring-0 accessible memory instead, ASSUMING that they're all 284 * up to date when we get here. 285 */ 262 286 RTMpOnAll(cpumR0CheckCpuid, pVM, NULL); 287 288 for (uint32_t i = 0; i < RT_ELEMENTS(g_aCpuidUnifyBits); i++) 289 { 290 uint32_t uLeaf = g_aCpuidUnifyBits[i].uLeaf; 291 PCPUMCPUIDLEAF pLeaf = cpumCpuIdGetLeaf(pVM, uLeaf, 0); 292 if (pLeaf) 293 { 294 PCPUMCPUID pLegacyLeaf; 295 if (uLeaf < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)) 296 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdStd[uLeaf]; 297 else if (uLeaf - UINT32_C(0x80000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)) 298 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdExt[uLeaf - UINT32_C(0x80000000)]; 299 else if (uLeaf - UINT32_C(0xc0000000) < RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)) 300 pLegacyLeaf = &pVM->cpum.s.aGuestCpuIdCentaur[uLeaf - UINT32_C(0xc0000000)]; 301 else 302 continue; 303 304 pLeaf->uEcx = pLegacyLeaf->ecx; 305 pLeaf->uEdx = pLegacyLeaf->edx; 306 } 307 } 308 263 309 } 264 310 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49890 r49893 5899 5899 switch (pMsr->u32Msr) 5900 5900 { 5901 case MSR_K8_TSC_AUX: CPUM SetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);break;5901 case MSR_K8_TSC_AUX: CPUMR0SetGuestTscAux(pVCpu, pMsr->u64Value); break; 5902 5902 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break; 5903 5903 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break; … … 8147 8147 AssertRC(rc2); 8148 8148 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)); 8149 uint64_t u64GuestTscAuxMsr; 8150 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAuxMsr); 8151 AssertRC(rc2); 8152 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, u64GuestTscAuxMsr, true /* fUpdateHostMsr */); 8149 hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMR0GetGuestTscAux(pVCpu), true /* fUpdateHostMsr */); 8153 8150 } 8154 8151 else -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r49538 r49893 55 55 #include <VBox/err.h> 56 56 #include <VBox/log.h> 57 #include <iprt/asm-amd64-x86.h> 57 58 #include <iprt/assert.h> 58 #include <iprt/asm-amd64-x86.h> 59 #include <iprt/cpuset.h> 60 #include <iprt/mem.h> 61 #include <iprt/mp.h> 59 62 #include <iprt/string.h> 60 #include <iprt/mp.h>61 #include <iprt/cpuset.h>62 63 #include "internal/pgm.h" 63 64 … … 115 116 * Internal Functions * 116 117 *******************************************************************************/ 117 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);118 118 static int cpumR3CpuIdInit(PVM pVM); 119 119 static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass); … … 581 581 582 582 /* 583 * Assert alignment and sizes.583 * Assert alignment, sizes and tables. 584 584 */ 585 585 AssertCompileMemberAlignment(VM, cpum.s, 32); … … 592 592 AssertCompileMemberAlignment(VMCPU, cpum.s, 64); 593 593 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64); 594 #ifdef VBOX_STRICT 595 int rc2 = cpumR3MsrStrictInitChecks(); 596 AssertRCReturn(rc2, rc2); 597 #endif 594 598 595 599 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */ 596 600 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum); 597 601 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum); 602 598 603 599 604 /* Calculate the offset from CPUMCPU to CPUM. */ … … 647 652 648 653 /* 649 * Detect the host CPU vendor. 650 * (The guest CPU vendor is re-detected later on.) 651 */ 652 uint32_t uEAX, uEBX, uECX, uEDX; 653 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 654 pVM->cpum.s.enmHostCpuVendor = cpumR3DetectVendor(uEAX, uEBX, uECX, uEDX); 655 pVM->cpum.s.enmGuestCpuVendor = pVM->cpum.s.enmHostCpuVendor; 654 * Gather info about the host CPU. 655 */ 656 PCPUMCPUIDLEAF paLeaves; 657 uint32_t cLeaves; 658 int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves); 659 AssertLogRelRCReturn(rc, rc); 660 661 rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures); 662 RTMemFree(paLeaves); 663 AssertLogRelRCReturn(rc, rc); 664 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor; 656 665 657 666 /* … … 662 671 * Register saved state data item. 663 672 */ 664 intrc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),665 666 667 673 rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM), 674 NULL, cpumR3LiveExec, NULL, 675 NULL, cpumR3SaveExec, NULL, 676 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone); 668 677 if (RT_FAILURE(rc)) 669 678 return rc; … … 700 709 701 710 /** 702 * Detect the CPU vendor give n the 703 * 704 * @returns The vendor. 705 * @param uEAX EAX from CPUID(0). 706 * @param uEBX EBX from CPUID(0). 707 * @param uECX ECX from CPUID(0). 708 * @param uEDX EDX from CPUID(0). 709 */ 710 static CPUMCPUVENDOR cpumR3DetectVendor(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX) 711 { 712 if (ASMIsValidStdRange(uEAX)) 713 { 714 if (ASMIsAmdCpuEx(uEBX, uECX, uEDX)) 715 return CPUMCPUVENDOR_AMD; 716 717 if (ASMIsIntelCpuEx(uEBX, uECX, uEDX)) 718 return CPUMCPUVENDOR_INTEL; 719 720 if (ASMIsViaCentaurCpuEx(uEBX, uECX, uEDX)) 721 return CPUMCPUVENDOR_VIA; 722 723 /** @todo detect the other buggers... */ 724 } 725 726 return CPUMCPUVENDOR_UNKNOWN; 711 * Loads MSR range overrides. 712 * 713 * This must be called before the MSR ranges are moved from the normal heap to 714 * the hyper heap! 715 * 716 * @returns VBox status code (VMSetError called). 717 * @param pVM Pointer to the cross context VM structure 718 * @param pMsrNode The CFGM node with the MSR overrides. 719 */ 720 static int cpumR3LoadMsrOverrides(PVM pVM, PCFGMNODE pMsrNode) 721 { 722 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pMsrNode); pNode; pNode = CFGMR3GetNextChild(pNode)) 723 { 724 /* 725 * Assemble a valid MSR range. 726 */ 727 CPUMMSRRANGE MsrRange; 728 MsrRange.offCpumCpu = 0; 729 MsrRange.fReserved = 0; 730 731 int rc = CFGMR3GetName(pNode, MsrRange.szName, sizeof(MsrRange.szName)); 732 if (RT_FAILURE(rc)) 733 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry (name is probably too long): %Rrc\n", rc); 734 735 rc = CFGMR3QueryU32(pNode, "First", &MsrRange.uFirst); 736 if (RT_FAILURE(rc)) 737 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying mandatory 'First' value: %Rrc\n", 738 MsrRange.szName, rc); 739 740 rc = CFGMR3QueryU32Def(pNode, "Last", &MsrRange.uLast, MsrRange.uFirst); 741 if (RT_FAILURE(rc)) 742 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Last' value: %Rrc\n", 743 MsrRange.szName, rc); 744 745 char szType[32]; 746 rc = CFGMR3QueryStringDef(pNode, "Type", szType, sizeof(szType), "FixedValue"); 747 if (RT_FAILURE(rc)) 748 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Type' value: %Rrc\n", 749 MsrRange.szName, rc); 750 if (!RTStrICmp(szType, "FixedValue")) 751 { 752 MsrRange.enmRdFn = kCpumMsrRdFn_FixedValue; 753 MsrRange.enmWrFn = kCpumMsrWrFn_IgnoreWrite; 754 755 rc = CFGMR3QueryU64Def(pNode, "Value", &MsrRange.uInitOrReadValue, 0); 756 if (RT_FAILURE(rc)) 757 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'Value' value: %Rrc\n", 758 MsrRange.szName, rc); 759 760 rc = CFGMR3QueryU64Def(pNode, "WrGpMask", &MsrRange.fWrGpMask, 0); 761 if (RT_FAILURE(rc)) 762 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrGpMask' value: %Rrc\n", 763 MsrRange.szName, rc); 764 765 rc = CFGMR3QueryU64Def(pNode, "WrIgnMask", &MsrRange.fWrIgnMask, 0); 766 if (RT_FAILURE(rc)) 767 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid MSR entry '%s': Error querying 'WrIgnMask' value: %Rrc\n", 768 MsrRange.szName, rc); 769 } 770 else 771 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, 772 "Invalid MSR entry '%s': Unknown type '%s'\n", MsrRange.szName, szType); 773 774 /* 775 * Insert the range into the table (replaces/splits/shrinks existing 776 * MSR ranges). 777 */ 778 rc = cpumR3MsrRangesInsert(&pVM->cpum.s.GuestInfo.paMsrRangesR3, &pVM->cpum.s.GuestInfo.cMsrRanges, &MsrRange); 779 if (RT_FAILURE(rc)) 780 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding MSR entry '%s': %Rrc\n", MsrRange.szName, rc); 781 } 782 783 return VINF_SUCCESS; 727 784 } 785 786 787 /** 788 * Loads CPUID leaf overrides. 789 * 790 * This must be called before the CPUID leaves are moved from the normal 791 * heap to the hyper heap! 792 * 793 * @returns VBox status code (VMSetError called). 794 * @param pVM Pointer to the cross context VM structure 795 * @param pParentNode The CFGM node with the CPUID leaves. 796 * @param pszLabel How to label the overrides we're loading. 797 */ 798 static int cpumR3LoadCpuIdOverrides(PVM pVM, PCFGMNODE pParentNode, const char *pszLabel) 799 { 800 for (PCFGMNODE pNode = CFGMR3GetFirstChild(pParentNode); pNode; pNode = CFGMR3GetNextChild(pNode)) 801 { 802 /* 803 * Get the leaf and subleaf numbers. 804 */ 805 char szName[128]; 806 int rc = CFGMR3GetName(pNode, szName, sizeof(szName)); 807 if (RT_FAILURE(rc)) 808 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry (name is probably too long): %Rrc\n", pszLabel, rc); 809 810 /* The leaf number is either specified directly or thru the node name. */ 811 uint32_t uLeaf; 812 rc = CFGMR3QueryU32(pNode, "Leaf", &uLeaf); 813 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 814 { 815 rc = RTStrToUInt32Full(szName, 16, &uLeaf); 816 if (rc != VINF_SUCCESS) 817 return VMSetError(pVM, VERR_INVALID_NAME, RT_SRC_POS, 818 "Invalid %s entry: Invalid leaf number: '%s' \n", pszLabel, szName); 819 } 820 else if (RT_FAILURE(rc)) 821 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'Leaf' value: %Rrc\n", 822 pszLabel, szName, rc); 823 824 uint32_t uSubLeaf; 825 rc = CFGMR3QueryU32Def(pNode, "SubLeaf", &uSubLeaf, 0); 826 if (RT_FAILURE(rc)) 827 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeaf' value: %Rrc\n", 828 pszLabel, szName, rc); 829 830 uint32_t fSubLeafMask; 831 rc = CFGMR3QueryU32Def(pNode, "SubLeafMask", &fSubLeafMask, 0); 832 if (RT_FAILURE(rc)) 833 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'SubLeafMask' value: %Rrc\n", 834 pszLabel, szName, rc); 835 836 /* 837 * Look up the specified leaf, since the output register values 838 * defaults to any existing values. This allows overriding a single 839 * register, without needing to know the other values. 840 */ 841 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, pVM->cpum.s.GuestInfo.cCpuIdLeaves, 842 uLeaf, uSubLeaf); 843 CPUMCPUIDLEAF Leaf; 844 if (pLeaf) 845 Leaf = *pLeaf; 846 else 847 RT_ZERO(Leaf); 848 Leaf.uLeaf = uLeaf; 849 Leaf.uSubLeaf = uSubLeaf; 850 Leaf.fSubLeafMask = fSubLeafMask; 851 852 rc = CFGMR3QueryU32Def(pNode, "eax", &Leaf.uEax, Leaf.uEax); 853 if (RT_FAILURE(rc)) 854 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'eax' value: %Rrc\n", 855 pszLabel, szName, rc); 856 rc = CFGMR3QueryU32Def(pNode, "ebx", &Leaf.uEbx, Leaf.uEbx); 857 if (RT_FAILURE(rc)) 858 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ebx' value: %Rrc\n", 859 pszLabel, szName, rc); 860 rc = CFGMR3QueryU32Def(pNode, "ecx", &Leaf.uEcx, Leaf.uEcx); 861 if (RT_FAILURE(rc)) 862 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'ecx' value: %Rrc\n", 863 pszLabel, szName, rc); 864 rc = CFGMR3QueryU32Def(pNode, "edx", &Leaf.uEdx, Leaf.uEdx); 865 if (RT_FAILURE(rc)) 866 return VMSetError(pVM, rc, RT_SRC_POS, "Invalid %s entry '%s': Error querying 'edx' value: %Rrc\n", 867 pszLabel, szName, rc); 868 869 /* 870 * Insert the leaf into the table (replaces existing ones). 871 */ 872 rc = cpumR3CpuIdInsert(&pVM->cpum.s.GuestInfo.paCpuIdLeavesR3, &pVM->cpum.s.GuestInfo.cCpuIdLeaves, &Leaf); 873 if (RT_FAILURE(rc)) 874 return VMSetError(pVM, rc, RT_SRC_POS, "Error adding CPUID leaf entry '%s': %Rrc\n", szName, rc); 875 } 876 877 return VINF_SUCCESS; 878 } 879 728 880 729 881 … … 815 967 816 968 969 static int cpumR3CpuIdInstallAndExplodeLeaves(PVM pVM, PCPUM pCPUM, PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves) 970 { 971 /* 972 * Install the CPUID information. 973 */ 974 int rc = MMHyperDupMem(pVM, paLeaves, sizeof(paLeaves[0]) * cLeaves, 32, 975 MM_TAG_CPUM_CPUID, (void **)&pCPUM->GuestInfo.paCpuIdLeavesR3); 976 977 AssertLogRelRCReturn(rc, rc); 978 979 pCPUM->GuestInfo.paCpuIdLeavesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3); 980 pCPUM->GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paCpuIdLeavesR3); 981 Assert(MMHyperR0ToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesR0) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3); 982 Assert(MMHyperRCToR3(pVM, pCPUM->GuestInfo.paCpuIdLeavesRC) == (void *)pCPUM->GuestInfo.paCpuIdLeavesR3); 983 984 /* 985 * Explode the guest CPU features. 986 */ 987 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures); 988 AssertLogRelRCReturn(rc, rc); 989 990 991 /* 992 * Populate the legacy arrays. Currently used for everything, later only 993 * for patch manager. 994 */ 995 struct { PCPUMCPUID paCpuIds; uint32_t cCpuIds, uBase; } aOldRanges[] = 996 { 997 { pCPUM->aGuestCpuIdStd, RT_ELEMENTS(pCPUM->aGuestCpuIdStd), 0x00000000 }, 998 { pCPUM->aGuestCpuIdExt, RT_ELEMENTS(pCPUM->aGuestCpuIdExt), 0x80000000 }, 999 { pCPUM->aGuestCpuIdCentaur, RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), 0xc0000000 }, 1000 { pCPUM->aGuestCpuIdHyper, RT_ELEMENTS(pCPUM->aGuestCpuIdHyper), 0x40000000 }, 1001 }; 1002 for (uint32_t i = 0; i < RT_ELEMENTS(aOldRanges); i++) 1003 { 1004 uint32_t cLeft = aOldRanges[i].cCpuIds; 1005 uint32_t uLeaf = aOldRanges[i].uBase + cLeft; 1006 PCPUMCPUID pLegacyLeaf = &aOldRanges[i].paCpuIds[cLeft]; 1007 while (cLeft-- > 0) 1008 { 1009 uLeaf--; 1010 pLegacyLeaf--; 1011 1012 PCCPUMCPUIDLEAF pLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, uLeaf, 0); 1013 if (pLeaf) 1014 { 1015 pLegacyLeaf->eax = pLeaf->uEax; 1016 pLegacyLeaf->ebx = pLeaf->uEbx; 1017 pLegacyLeaf->ecx = pLeaf->uEcx; 1018 pLegacyLeaf->edx = pLeaf->uEdx; 1019 } 1020 else 1021 *pLegacyLeaf = pCPUM->GuestInfo.DefCpuId; 1022 } 1023 } 1024 1025 pCPUM->GuestCpuIdDef = pCPUM->GuestInfo.DefCpuId; 1026 1027 return VINF_SUCCESS; 1028 } 1029 1030 817 1031 /** 818 1032 * Initializes the emulated CPU's cpuid information. … … 825 1039 PCPUM pCPUM = &pVM->cpum.s; 826 1040 PCFGMNODE pCpumCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM"); 827 uint32_t i;828 1041 int rc; 829 1042 830 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, LeafSuffReg, FeatNm, fMask, uValue) \831 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg& (fMask)) == (uValue) ) \1043 #define PORTABLE_CLEAR_BITS_WHEN(Lvl, a_pLeafReg, FeatNm, fMask, uValue) \ 1044 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fMask)) == (uValue) ) \ 832 1045 { \ 833 LogRel(("PortableCpuId: " # LeafSuffReg "[" #FeatNm "]: %#x -> 0\n", pCPUM->aGuestCpuId##LeafSuffReg& (fMask))); \834 pCPUM->aGuestCpuId##LeafSuffReg&= ~(uint32_t)(fMask); \835 } 836 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, LeafSuffReg, FeatNm, fBitMask) \837 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && (pCPUM->aGuestCpuId##LeafSuffReg& (fBitMask)) ) \1046 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: %#x -> 0\n", (a_pLeafReg) & (fMask))); \ 1047 (a_pLeafReg) &= ~(uint32_t)(fMask); \ 1048 } 1049 #define PORTABLE_DISABLE_FEATURE_BIT(Lvl, a_pLeafReg, FeatNm, fBitMask) \ 1050 if ( pCPUM->u8PortableCpuIdLevel >= (Lvl) && ((a_pLeafReg) & (fBitMask)) ) \ 838 1051 { \ 839 LogRel(("PortableCpuId: " # LeafSuffReg "[" #FeatNm "]: 1 -> 0\n")); \840 pCPUM->aGuestCpuId##LeafSuffReg&= ~(uint32_t)(fBitMask); \1052 LogRel(("PortableCpuId: " #a_pLeafReg "[" #FeatNm "]: 1 -> 0\n")); \ 1053 (a_pLeafReg) &= ~(uint32_t)(fBitMask); \ 841 1054 } 842 1055 … … 847 1060 * Enables the Synthetic CPU. The Vendor ID and Processor Name are 848 1061 * completely overridden by VirtualBox custom strings. Some 849 * CPUID information is withheld, like the cache info. */ 850 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &pCPUM->fSyntheticCpu, false); 1062 * CPUID information is withheld, like the cache info. 1063 * 1064 * This is obsoleted by PortableCpuIdLevel. */ 1065 bool fSyntheticCpu; 1066 rc = CFGMR3QueryBoolDef(pCpumCfg, "SyntheticCpu", &fSyntheticCpu, false); 851 1067 AssertRCReturn(rc, rc); 852 1068 … … 856 1072 * values should only be used when older CPUs are involved since it may 857 1073 * harm performance and maybe also cause problems with specific guests. */ 858 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, 0); 859 AssertRCReturn(rc, rc); 860 861 AssertLogRelReturn(!pCPUM->fSyntheticCpu || !pCPUM->u8PortableCpuIdLevel, VERR_CPUM_INCOMPATIBLE_CONFIG); 862 863 /* 864 * Get the host CPUID leaves and redetect the guest CPU vendor (could've 865 * been overridden). 866 */ 867 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 868 * Overrides the host CPUID leaf values used for calculating the guest CPUID 869 * leaves. This can be used to preserve the CPUID values when moving a VM to a 870 * different machine. Another use is restricting (or extending) the feature set 871 * exposed to the guest. */ 872 PCFGMNODE pHostOverrideCfg = CFGMR3GetChild(pCpumCfg, "HostCPUID"); 873 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pHostOverrideCfg); 874 AssertRCReturn(rc, rc); 875 rc = cpumR3CpuIdInitHostSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pHostOverrideCfg); 876 AssertRCReturn(rc, rc); 877 rc = cpumR3CpuIdInitHostSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pHostOverrideCfg); 878 AssertRCReturn(rc, rc); 879 880 pCPUM->enmGuestCpuVendor = cpumR3DetectVendor(pCPUM->aGuestCpuIdStd[0].eax, pCPUM->aGuestCpuIdStd[0].ebx, 881 pCPUM->aGuestCpuIdStd[0].ecx, pCPUM->aGuestCpuIdStd[0].edx); 882 883 /* 884 * Determine the default leaf. 885 * 886 * Intel returns values of the highest standard function, while AMD 887 * returns zeros. VIA on the other hand seems to returning nothing or 888 * perhaps some random garbage, we don't try to duplicate this behavior. 889 */ 890 ASMCpuIdExSlow(pCPUM->aGuestCpuIdStd[0].eax + 10, 0, 0, 0, /** @todo r=bird: Use the host value here in case of overrides and more than 10 leaves being stripped already. */ 891 &pCPUM->GuestCpuIdDef.eax, &pCPUM->GuestCpuIdDef.ebx, 892 &pCPUM->GuestCpuIdDef.ecx, &pCPUM->GuestCpuIdDef.edx); 1074 rc = CFGMR3QueryU8Def(pCpumCfg, "PortableCpuIdLevel", &pCPUM->u8PortableCpuIdLevel, fSyntheticCpu ? 1 : 0); 1075 AssertLogRelRCReturn(rc, rc); 1076 1077 /** @cfgm{CPUM/GuestCpuName, string} 1078 * The name of of the CPU we're to emulate. The default is the host CPU. 1079 * Note! CPUs other than "host" one is currently unsupported. */ 1080 char szCpuName[128]; 1081 rc = CFGMR3QueryStringDef(pCpumCfg, "GuestCpuName", szCpuName, sizeof(szCpuName), "host"); 1082 AssertLogRelRCReturn(rc, rc); 893 1083 894 1084 /** @cfgm{/CPUM/CMPXCHG16B, boolean, false} … … 896 1086 */ 897 1087 bool fCmpXchg16b; 898 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); AssertRCReturn(rc, rc); 1088 rc = CFGMR3QueryBoolDef(pCpumCfg, "CMPXCHG16B", &fCmpXchg16b, false); 1089 AssertLogRelRCReturn(rc, rc); 899 1090 900 1091 /** @cfgm{/CPUM/MONITOR, boolean, true} … … 902 1093 */ 903 1094 bool fMonitor; 904 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); AssertRCReturn(rc, rc); 905 906 /* Cpuid 1 & 0x80000001: 1095 rc = CFGMR3QueryBoolDef(pCpumCfg, "MONITOR", &fMonitor, true); 1096 AssertLogRelRCReturn(rc, rc); 1097 1098 /** @cfgm{/CPUM/MWaitExtensions, boolean, false} 1099 * Expose MWAIT extended features to the guest. For now we expose just MWAIT 1100 * break on interrupt feature (bit 1). 1101 */ 1102 bool fMWaitExtensions; 1103 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); 1104 AssertLogRelRCReturn(rc, rc); 1105 1106 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false} 1107 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from 1108 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e). 1109 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22]. 1110 */ 1111 bool fNt4LeafLimit; 1112 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); 1113 AssertLogRelRCReturn(rc, rc); 1114 1115 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX} 1116 * Restrict the reported CPU family+model+stepping of intel CPUs. This is 1117 * probably going to be a temporary hack, so don't depend on this. 1118 * The 1st byte of the value is the stepping, the 2nd byte value is the model 1119 * number and the 3rd byte value is the family, and the 4th value must be zero. 1120 */ 1121 uint32_t uMaxIntelFamilyModelStep; 1122 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX); 1123 AssertLogRelRCReturn(rc, rc); 1124 1125 /* 1126 * Get the guest CPU data from the database and/or the host. 1127 */ 1128 rc = cpumR3DbGetCpuInfo(szCpuName, &pCPUM->GuestInfo); 1129 if (RT_FAILURE(rc)) 1130 return rc == VERR_CPUM_DB_CPU_NOT_FOUND 1131 ? VMSetError(pVM, rc, RT_SRC_POS, 1132 "Info on guest CPU '%s' could not be found. Please, select a different CPU.", szCpuName) 1133 : rc; 1134 1135 /** @cfgm{CPUM/MSRs/[Name]/[First|Last|Type|Value|...],} 1136 * Overrides the guest MSRs. 1137 */ 1138 rc = cpumR3LoadMsrOverrides(pVM, CFGMR3GetChild(pCpumCfg, "MSRs")); 1139 1140 /** @cfgm{CPUM/HostCPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 1141 * Overrides the CPUID leaf values (from the host CPU usually) used for 1142 * calculating the guest CPUID leaves. This can be used to preserve the CPUID 1143 * values when moving a VM to a different machine. Another use is restricting 1144 * (or extending) the feature set exposed to the guest. */ 1145 if (RT_SUCCESS(rc)) 1146 rc = cpumR3LoadCpuIdOverrides(pVM, CFGMR3GetChild(pCpumCfg, "HostCPUID"), "HostCPUID"); 1147 1148 if (RT_SUCCESS(rc) && CFGMR3GetChild(pCpumCfg, "CPUID")) /* 2nd override, now discontinued. */ 1149 rc = VMSetError(pVM, VERR_CFGM_CONFIG_UNKNOWN_NODE, RT_SRC_POS, 1150 "Found unsupported configuration node '/CPUM/CPUID/'. " 1151 "Please use IMachine::setCPUIDLeaf() instead."); 1152 1153 /* 1154 * Pre-exploded the CPUID info. 1155 */ 1156 if (RT_SUCCESS(rc)) 1157 rc = cpumR3CpuIdExplodeFeatures(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, &pCPUM->GuestFeatures); 1158 if (RT_FAILURE(rc)) 1159 { 1160 RTMemFree(pCPUM->GuestInfo.paCpuIdLeavesR3); 1161 pCPUM->GuestInfo.paCpuIdLeavesR3 = NULL; 1162 RTMemFree(pCPUM->GuestInfo.paMsrRangesR3); 1163 pCPUM->GuestInfo.paMsrRangesR3 = NULL; 1164 return rc; 1165 } 1166 1167 1168 /* ... split this function about here ... */ 1169 1170 1171 PCPUMCPUIDLEAF pStdLeaf0 = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0); 1172 AssertLogRelReturn(pStdLeaf0, VERR_CPUM_IPE_2); 1173 1174 1175 /* Cpuid 1: 907 1176 * Only report features we can support. 908 1177 * … … 910 1179 * options may require adjusting (i.e. stripping what was enabled). 911 1180 */ 912 pCPUM->aGuestCpuIdStd[1].edx &= X86_CPUID_FEATURE_EDX_FPU 1181 PCPUMCPUIDLEAF pStdFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1, 0); 1182 AssertLogRelReturn(pStdFeatureLeaf, VERR_CPUM_IPE_2); 1183 pStdFeatureLeaf->uEdx &= X86_CPUID_FEATURE_EDX_FPU 913 1184 | X86_CPUID_FEATURE_EDX_VME 914 1185 | X86_CPUID_FEATURE_EDX_DE … … 941 1212 //| X86_CPUID_FEATURE_EDX_PBE - no pending break enabled. 942 1213 | 0; 943 p CPUM->aGuestCpuIdStd[1].ecx&= 01214 pStdFeatureLeaf->uEcx &= 0 944 1215 | X86_CPUID_FEATURE_ECX_SSE3 945 1216 /* Can't properly emulate monitor & mwait with guest SMP; force the guest to use hlt for idling VCPUs. */ … … 961 1232 if (pCPUM->u8PortableCpuIdLevel > 0) 962 1233 { 963 PORTABLE_CLEAR_BITS_WHEN(1, Std[1].eax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12));964 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3);965 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, SSE3, X86_CPUID_FEATURE_ECX_SSE3);966 PORTABLE_DISABLE_FEATURE_BIT(1, Std[1].ecx, CX16, X86_CPUID_FEATURE_ECX_CX16);967 PORTABLE_DISABLE_FEATURE_BIT(2, Std[1].edx, SSE2, X86_CPUID_FEATURE_EDX_SSE2);968 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, SSE, X86_CPUID_FEATURE_EDX_SSE);969 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH);970 PORTABLE_DISABLE_FEATURE_BIT(3, Std[1].edx, CMOV, X86_CPUID_FEATURE_EDX_CMOV);971 972 Assert(!(p CPUM->aGuestCpuIdStd[1].edx& ( X86_CPUID_FEATURE_EDX_SEP1234 PORTABLE_CLEAR_BITS_WHEN(1, pStdFeatureLeaf->uEax, ProcessorType, (UINT32_C(3) << 12), (UINT32_C(2) << 12)); 1235 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSSE3, X86_CPUID_FEATURE_ECX_SSSE3); 1236 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, SSE3, X86_CPUID_FEATURE_ECX_SSE3); 1237 PORTABLE_DISABLE_FEATURE_BIT(1, pStdFeatureLeaf->uEcx, CX16, X86_CPUID_FEATURE_ECX_CX16); 1238 PORTABLE_DISABLE_FEATURE_BIT(2, pStdFeatureLeaf->uEdx, SSE2, X86_CPUID_FEATURE_EDX_SSE2); 1239 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, SSE, X86_CPUID_FEATURE_EDX_SSE); 1240 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CLFSH, X86_CPUID_FEATURE_EDX_CLFSH); 1241 PORTABLE_DISABLE_FEATURE_BIT(3, pStdFeatureLeaf->uEdx, CMOV, X86_CPUID_FEATURE_EDX_CMOV); 1242 1243 Assert(!(pStdFeatureLeaf->uEdx & ( X86_CPUID_FEATURE_EDX_SEP 973 1244 | X86_CPUID_FEATURE_EDX_PSN 974 1245 | X86_CPUID_FEATURE_EDX_DS … … 978 1249 | X86_CPUID_FEATURE_EDX_PBE 979 1250 ))); 980 Assert(!(p CPUM->aGuestCpuIdStd[1].ecx& ( X86_CPUID_FEATURE_ECX_PCLMUL1251 Assert(!(pStdFeatureLeaf->uEcx & ( X86_CPUID_FEATURE_ECX_PCLMUL 981 1252 | X86_CPUID_FEATURE_ECX_DTES64 982 1253 | X86_CPUID_FEATURE_ECX_CPLDS … … 1008 1279 * ASSUMES that this is ALWAYS the AMD defined feature set if present. 1009 1280 */ 1010 pCPUM->aGuestCpuIdExt[1].edx &= X86_CPUID_AMD_FEATURE_EDX_FPU 1281 PCPUMCPUIDLEAF pExtFeatureLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1282 UINT32_C(0x80000001), 0); 1283 if (pExtFeatureLeaf) 1284 { 1285 pExtFeatureLeaf->uEdx &= X86_CPUID_AMD_FEATURE_EDX_FPU 1011 1286 | X86_CPUID_AMD_FEATURE_EDX_VME 1012 1287 | X86_CPUID_AMD_FEATURE_EDX_DE … … 1037 1312 | X86_CPUID_AMD_FEATURE_EDX_3DNOW 1038 1313 | 0; 1039 pCPUM->aGuestCpuIdExt[1].ecx&= 01314 pExtFeatureLeaf->uEcx &= 0 1040 1315 //| X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF 1041 1316 //| X86_CPUID_AMD_FEATURE_ECX_CMPL … … 1054 1329 //| X86_CPUID_AMD_FEATURE_ECX_WDT 1055 1330 | 0; 1056 if (pCPUM->u8PortableCpuIdLevel > 0) 1057 { 1058 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].ecx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L); 1059 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 1060 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 1061 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 1062 PORTABLE_DISABLE_FEATURE_BIT(1, Ext[1].edx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1063 PORTABLE_DISABLE_FEATURE_BIT(2, Ext[1].ecx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 1064 PORTABLE_DISABLE_FEATURE_BIT(3, Ext[1].ecx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV); 1065 1066 Assert(!(pCPUM->aGuestCpuIdExt[1].ecx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL 1067 | X86_CPUID_AMD_FEATURE_ECX_SVM 1068 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC 1069 | X86_CPUID_AMD_FEATURE_ECX_CR8L 1070 | X86_CPUID_AMD_FEATURE_ECX_ABM 1071 | X86_CPUID_AMD_FEATURE_ECX_SSE4A 1072 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE 1073 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF 1074 | X86_CPUID_AMD_FEATURE_ECX_OSVW 1075 | X86_CPUID_AMD_FEATURE_ECX_IBS 1076 | X86_CPUID_AMD_FEATURE_ECX_SSE5 1077 | X86_CPUID_AMD_FEATURE_ECX_SKINIT 1078 | X86_CPUID_AMD_FEATURE_ECX_WDT 1079 | UINT32_C(0xffffc000) 1080 ))); 1081 Assert(!(pCPUM->aGuestCpuIdExt[1].edx & ( RT_BIT(10) 1082 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL 1083 | RT_BIT(18) 1084 | RT_BIT(19) 1085 | RT_BIT(21) 1086 | X86_CPUID_AMD_FEATURE_EDX_AXMMX 1087 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 1088 | RT_BIT(28) 1089 ))); 1090 } 1091 1092 /* 1093 * Apply the Synthetic CPU modifications. (TODO: move this up) 1094 */ 1095 if (pCPUM->fSyntheticCpu) 1096 { 1097 static const char s_szVendor[13] = "VirtualBox "; 1098 static const char s_szProcessor[48] = "VirtualBox SPARCx86 Processor v1000 "; /* includes null terminator */ 1099 1100 pCPUM->enmGuestCpuVendor = CPUMCPUVENDOR_SYNTHETIC; 1101 1102 /* Limit the nr of standard leaves; 5 for monitor/mwait */ 1103 pCPUM->aGuestCpuIdStd[0].eax = RT_MIN(pCPUM->aGuestCpuIdStd[0].eax, 5); 1104 1105 /* 0: Vendor */ 1106 pCPUM->aGuestCpuIdStd[0].ebx = pCPUM->aGuestCpuIdExt[0].ebx = ((uint32_t *)s_szVendor)[0]; 1107 pCPUM->aGuestCpuIdStd[0].ecx = pCPUM->aGuestCpuIdExt[0].ecx = ((uint32_t *)s_szVendor)[2]; 1108 pCPUM->aGuestCpuIdStd[0].edx = pCPUM->aGuestCpuIdExt[0].edx = ((uint32_t *)s_szVendor)[1]; 1109 1110 /* 1.eax: Version information. family : model : stepping */ 1111 pCPUM->aGuestCpuIdStd[1].eax = (0xf << 8) + (0x1 << 4) + 1; 1112 1113 /* Leaves 2 - 4 are Intel only - zero them out */ 1114 memset(&pCPUM->aGuestCpuIdStd[2], 0, sizeof(pCPUM->aGuestCpuIdStd[2])); 1115 memset(&pCPUM->aGuestCpuIdStd[3], 0, sizeof(pCPUM->aGuestCpuIdStd[3])); 1116 memset(&pCPUM->aGuestCpuIdStd[4], 0, sizeof(pCPUM->aGuestCpuIdStd[4])); 1117 1118 /* Leaf 5 = monitor/mwait */ 1119 1120 /* Limit the nr of extended leaves: 0x80000008 to include the max virtual and physical address size (64 bits guests). */ 1121 pCPUM->aGuestCpuIdExt[0].eax = RT_MIN(pCPUM->aGuestCpuIdExt[0].eax, 0x80000008); 1122 /* AMD only - set to zero. */ 1123 pCPUM->aGuestCpuIdExt[0].ebx = pCPUM->aGuestCpuIdExt[0].ecx = pCPUM->aGuestCpuIdExt[0].edx = 0; 1124 1125 /* 0x800000001: shared feature bits are set dynamically. */ 1126 memset(&pCPUM->aGuestCpuIdExt[1], 0, sizeof(pCPUM->aGuestCpuIdExt[1])); 1127 1128 /* 0x800000002-4: Processor Name String Identifier. */ 1129 pCPUM->aGuestCpuIdExt[2].eax = ((uint32_t *)s_szProcessor)[0]; 1130 pCPUM->aGuestCpuIdExt[2].ebx = ((uint32_t *)s_szProcessor)[1]; 1131 pCPUM->aGuestCpuIdExt[2].ecx = ((uint32_t *)s_szProcessor)[2]; 1132 pCPUM->aGuestCpuIdExt[2].edx = ((uint32_t *)s_szProcessor)[3]; 1133 pCPUM->aGuestCpuIdExt[3].eax = ((uint32_t *)s_szProcessor)[4]; 1134 pCPUM->aGuestCpuIdExt[3].ebx = ((uint32_t *)s_szProcessor)[5]; 1135 pCPUM->aGuestCpuIdExt[3].ecx = ((uint32_t *)s_szProcessor)[6]; 1136 pCPUM->aGuestCpuIdExt[3].edx = ((uint32_t *)s_szProcessor)[7]; 1137 pCPUM->aGuestCpuIdExt[4].eax = ((uint32_t *)s_szProcessor)[8]; 1138 pCPUM->aGuestCpuIdExt[4].ebx = ((uint32_t *)s_szProcessor)[9]; 1139 pCPUM->aGuestCpuIdExt[4].ecx = ((uint32_t *)s_szProcessor)[10]; 1140 pCPUM->aGuestCpuIdExt[4].edx = ((uint32_t *)s_szProcessor)[11]; 1141 1142 /* 0x800000005-7 - reserved -> zero */ 1143 memset(&pCPUM->aGuestCpuIdExt[5], 0, sizeof(pCPUM->aGuestCpuIdExt[5])); 1144 memset(&pCPUM->aGuestCpuIdExt[6], 0, sizeof(pCPUM->aGuestCpuIdExt[6])); 1145 memset(&pCPUM->aGuestCpuIdExt[7], 0, sizeof(pCPUM->aGuestCpuIdExt[7])); 1146 1147 /* 0x800000008: only the max virtual and physical address size. */ 1148 pCPUM->aGuestCpuIdExt[8].ecx = pCPUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */ 1331 if (pCPUM->u8PortableCpuIdLevel > 0) 1332 { 1333 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L); 1334 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 1335 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, 3DNOW_EX, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 1336 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, FFXSR, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 1337 PORTABLE_DISABLE_FEATURE_BIT(1, pExtFeatureLeaf->uEdx, RDTSCP, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 1338 PORTABLE_DISABLE_FEATURE_BIT(2, pExtFeatureLeaf->uEcx, LAHF_SAHF, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 1339 PORTABLE_DISABLE_FEATURE_BIT(3, pExtFeatureLeaf->uEcx, CMOV, X86_CPUID_AMD_FEATURE_EDX_CMOV); 1340 1341 Assert(!(pExtFeatureLeaf->uEcx & ( X86_CPUID_AMD_FEATURE_ECX_CMPL 1342 | X86_CPUID_AMD_FEATURE_ECX_SVM 1343 | X86_CPUID_AMD_FEATURE_ECX_EXT_APIC 1344 | X86_CPUID_AMD_FEATURE_ECX_CR8L 1345 | X86_CPUID_AMD_FEATURE_ECX_ABM 1346 | X86_CPUID_AMD_FEATURE_ECX_SSE4A 1347 | X86_CPUID_AMD_FEATURE_ECX_MISALNSSE 1348 | X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF 1349 | X86_CPUID_AMD_FEATURE_ECX_OSVW 1350 | X86_CPUID_AMD_FEATURE_ECX_IBS 1351 | X86_CPUID_AMD_FEATURE_ECX_SSE5 1352 | X86_CPUID_AMD_FEATURE_ECX_SKINIT 1353 | X86_CPUID_AMD_FEATURE_ECX_WDT 1354 | UINT32_C(0xffffc000) 1355 ))); 1356 Assert(!(pExtFeatureLeaf->uEdx & ( RT_BIT(10) 1357 | X86_CPUID_EXT_FEATURE_EDX_SYSCALL 1358 | RT_BIT(18) 1359 | RT_BIT(19) 1360 | RT_BIT(21) 1361 | X86_CPUID_AMD_FEATURE_EDX_AXMMX 1362 | X86_CPUID_EXT_FEATURE_EDX_PAGE1GB 1363 | RT_BIT(28) 1364 ))); 1365 } 1149 1366 } 1150 1367 … … 1153 1370 * (APIC-ID := 0 and #LogCpus := 0) 1154 1371 */ 1155 p CPUM->aGuestCpuIdStd[1].ebx &= 0x0000ffff;1372 pStdFeatureLeaf->uEbx &= 0x0000ffff; 1156 1373 #ifdef VBOX_WITH_MULTI_CORE 1157 if ( pCPUM->enmGuestCpuVendor != CPUMCPUVENDOR_SYNTHETIC 1158 && pVM->cCpus > 1) 1374 if (pVM->cCpus > 1) 1159 1375 { 1160 1376 /* If CPUID Fn0000_0001_EDX[HTT] = 1 then LogicalProcessorCount is the number of threads per CPU core times the number of CPU cores per processor */ 1161 p CPUM->aGuestCpuIdStd[1].ebx |= (pVM->cCpus << 16);1162 p CPUM->aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */1377 pStdFeatureLeaf->uEbx |= (pVM->cCpus << 16); 1378 pStdFeatureLeaf->uEdx |= X86_CPUID_FEATURE_EDX_HTT; /* necessary for hyper-threading *or* multi-core CPUs */ 1163 1379 } 1164 1380 #endif … … 1170 1386 * Safe to expose; restrict the number of calls to 1 for the portable case. 1171 1387 */ 1172 if ( pCPUM->u8PortableCpuIdLevel > 0 1173 && pCPUM->aGuestCpuIdStd[0].eax >= 2 1174 && (pCPUM->aGuestCpuIdStd[2].eax & 0xff) > 1) 1175 { 1176 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCPUM->aGuestCpuIdStd[2].eax & 0xff)); 1177 pCPUM->aGuestCpuIdStd[2].eax &= UINT32_C(0xfffffffe); 1388 PCPUMCPUIDLEAF pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 2, 0); 1389 if ( pCPUM->u8PortableCpuIdLevel > 0 1390 && pCurLeaf 1391 && (pCurLeaf->uEax & 0xff) > 1) 1392 { 1393 LogRel(("PortableCpuId: Std[2].al: %d -> 1\n", pCurLeaf->uEax & 0xff)); 1394 pCurLeaf->uEax &= UINT32_C(0xfffffffe); 1178 1395 } 1179 1396 … … 1185 1402 * Safe to expose 1186 1403 */ 1187 if (!(pCPUM->aGuestCpuIdStd[1].edx & X86_CPUID_FEATURE_EDX_PSN)) 1188 { 1189 pCPUM->aGuestCpuIdStd[3].ecx = pCPUM->aGuestCpuIdStd[3].edx = 0; 1404 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 3, 0); 1405 if ( !(pStdFeatureLeaf->uEdx & X86_CPUID_FEATURE_EDX_PSN) 1406 && pCurLeaf) 1407 { 1408 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 1190 1409 if (pCPUM->u8PortableCpuIdLevel > 0) 1191 pC PUM->aGuestCpuIdStd[3].eax = pCPUM->aGuestCpuIdStd[3].ebx = 0;1410 pCurLeaf->uEax = pCurLeaf->uEbx = 0; 1192 1411 } 1193 1412 … … 1202 1421 * Note: These SMP values are constant regardless of ECX 1203 1422 */ 1204 pCPUM->aGuestCpuIdStd[4].ecx = pCPUM->aGuestCpuIdStd[4].edx = 0; 1205 pCPUM->aGuestCpuIdStd[4].eax = pCPUM->aGuestCpuIdStd[4].ebx = 0; 1423 CPUMCPUIDLEAF NewLeaf; 1424 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0); 1425 if (pCurLeaf) 1426 { 1427 NewLeaf.uLeaf = 4; 1428 NewLeaf.uSubLeaf = 0; 1429 NewLeaf.fSubLeafMask = 0; 1430 NewLeaf.uEax = 0; 1431 NewLeaf.uEbx = 0; 1432 NewLeaf.uEcx = 0; 1433 NewLeaf.uEdx = 0; 1434 NewLeaf.fFlags = 0; 1206 1435 #ifdef VBOX_WITH_MULTI_CORE 1207 if ( pVM->cCpus > 11208 && pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_INTEL)1209 {1210 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS);1211 /* One logical processor with possibly multiple cores. */1212 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */1213 pCPUM->aGuestCpuIdStd[4].eax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */1214 }1436 if ( pVM->cCpus > 1 1437 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 1438 { 1439 AssertReturn(pVM->cCpus <= 64, VERR_TOO_MANY_CPUS); 1440 /* One logical processor with possibly multiple cores. */ 1441 /* See http://www.intel.com/Assets/PDF/appnote/241618.pdf p. 29 */ 1442 NewLeaf.uEax |= ((pVM->cCpus - 1) << 26); /* 6 bits only -> 64 cores! */ 1443 } 1215 1444 #endif 1445 rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 1446 AssertLogRelRCReturn(rc, rc); 1447 } 1216 1448 1217 1449 /* Cpuid 5: Monitor/mwait Leaf … … 1224 1456 * Safe to expose 1225 1457 */ 1226 if (!(pCPUM->aGuestCpuIdStd[1].ecx & X86_CPUID_FEATURE_ECX_MONITOR)) 1227 pCPUM->aGuestCpuIdStd[5].eax = pCPUM->aGuestCpuIdStd[5].ebx = 0; 1228 1229 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0; 1230 /** @cfgm{/CPUM/MWaitExtensions, boolean, false} 1231 * Expose MWAIT extended features to the guest. For now we expose 1232 * just MWAIT break on interrupt feature (bit 1). 1233 */ 1234 bool fMWaitExtensions; 1235 rc = CFGMR3QueryBoolDef(pCpumCfg, "MWaitExtensions", &fMWaitExtensions, false); AssertRCReturn(rc, rc); 1236 if (fMWaitExtensions) 1237 { 1238 pCPUM->aGuestCpuIdStd[5].ecx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0; 1239 /** @todo: for now we just expose host's MWAIT C-states, although conceptually 1240 it shall be part of our power management virtualization model */ 1458 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 4, 0); 1459 if (pCurLeaf) 1460 { 1461 if (!(pStdFeatureLeaf->uEcx & X86_CPUID_FEATURE_ECX_MONITOR)) 1462 pCurLeaf->uEax = pCurLeaf->uEbx = 0; 1463 1464 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 1465 if (fMWaitExtensions) 1466 { 1467 pCurLeaf->uEcx = X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0; 1468 /** @todo: for now we just expose host's MWAIT C-states, although conceptually 1469 it shall be part of our power management virtualization model */ 1241 1470 #if 0 1242 /* MWAIT sub C-states */1243 pCPUM->aGuestCpuIdStd[5].edx =1244 (0 << 0) /* 0 in C0 */ |1245 (2 << 4) /* 2 in C1 */ |1246 (2 << 8) /* 2 in C2 */ |1247 (2 << 12) /* 2 in C3 */ |1248 (0 << 16) /* 0 in C4 */1249 ;1471 /* MWAIT sub C-states */ 1472 pCurLeaf->uEdx = 1473 (0 << 0) /* 0 in C0 */ | 1474 (2 << 4) /* 2 in C1 */ | 1475 (2 << 8) /* 2 in C2 */ | 1476 (2 << 12) /* 2 in C3 */ | 1477 (0 << 16) /* 0 in C4 */ 1478 ; 1250 1479 #endif 1251 } 1252 else 1253 pCPUM->aGuestCpuIdStd[5].ecx = pCPUM->aGuestCpuIdStd[5].edx = 0; 1480 } 1481 else 1482 pCurLeaf->uEcx = pCurLeaf->uEdx = 0; 1483 } 1254 1484 1255 1485 /* Cpuid 0x800000005 & 0x800000006 contain information about L1, L2 & L3 cache and TLB identifiers. … … 1270 1500 * VIA: Reserved 1271 1501 */ 1272 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000007)) 1273 { 1274 Assert(pVM->cpum.s.enmGuestCpuVendor != CPUMCPUVENDOR_INVALID); 1275 1276 pCPUM->aGuestCpuIdExt[7].eax = pCPUM->aGuestCpuIdExt[7].ebx = pCPUM->aGuestCpuIdExt[7].ecx = 0; 1277 1278 if (pVM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD) 1502 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000007), 0); 1503 if (pCurLeaf) 1504 { 1505 Assert(pCPUM->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID); 1506 1507 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0; 1508 1509 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1279 1510 { 1280 1511 /* Only expose the TSC invariant capability bit to the guest. */ 1281 pC PUM->aGuestCpuIdExt[7].edx&= 01512 pCurLeaf->uEdx &= 0 1282 1513 //| X86_CPUID_AMD_ADVPOWER_EDX_TS 1283 1514 //| X86_CPUID_AMD_ADVPOWER_EDX_FID … … 1300 1531 } 1301 1532 else 1302 pC PUM->aGuestCpuIdExt[7].edx= 0;1533 pCurLeaf->uEdx = 0; 1303 1534 } 1304 1535 … … 1312 1543 * EBX, ECX, EDX - reserved 1313 1544 */ 1314 if (pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000008)) 1545 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000008), 0); 1546 if (pCurLeaf) 1315 1547 { 1316 1548 /* Only expose the virtual and physical address sizes to the guest. */ 1317 pC PUM->aGuestCpuIdExt[8].eax &= UINT32_C(0x0000ffff);1318 pC PUM->aGuestCpuIdExt[8].ebx = pCPUM->aGuestCpuIdExt[8].edx = 0; /* reserved */1549 pCurLeaf->uEax &= UINT32_C(0x0000ffff); 1550 pCurLeaf->uEbx = pCurLeaf->uEdx = 0; /* reserved */ 1319 1551 /* Set APICIdCoreIdSize to zero (use legacy method to determine the number of cores per cpu) 1320 1552 * NC (0-7) Number of cores; 0 equals 1 core */ 1321 pC PUM->aGuestCpuIdExt[8].ecx = 0;1553 pCurLeaf->uEcx = 0; 1322 1554 #ifdef VBOX_WITH_MULTI_CORE 1323 1555 if ( pVM->cCpus > 1 1324 && p VM->cpum.s.enmGuestCpuVendor == CPUMCPUVENDOR_AMD)1556 && pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1325 1557 { 1326 1558 /* Legacy method to determine the number of cores. */ 1327 pCPUM->aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; 1328 pCPUM->aGuestCpuIdExt[8].ecx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */ 1559 pCurLeaf->uEcx |= (pVM->cCpus - 1); /* NC: Number of CPU cores - 1; 8 bits */ 1560 if (pExtFeatureLeaf) 1561 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; 1329 1562 } 1330 1563 #endif 1331 1564 } 1332 1565 1333 /** @cfgm{/CPUM/NT4LeafLimit, boolean, false} 1334 * Limit the number of standard CPUID leaves to 0..3 to prevent NT4 from 1335 * bugchecking with MULTIPROCESSOR_CONFIGURATION_NOT_SUPPORTED (0x3e). 1336 * This option corresponds somewhat to IA32_MISC_ENABLES.BOOT_NT4[bit 22]. 1337 */ 1338 bool fNt4LeafLimit; 1339 rc = CFGMR3QueryBoolDef(pCpumCfg, "NT4LeafLimit", &fNt4LeafLimit, false); AssertRCReturn(rc, rc); 1340 if (fNt4LeafLimit && pCPUM->aGuestCpuIdStd[0].eax > 3) 1341 pCPUM->aGuestCpuIdStd[0].eax = 3; 1342 1343 /* 1344 * Limit it the number of entries and fill the remaining with the defaults. 1566 1567 /* 1568 * Limit it the number of entries, zapping the remainder. 1345 1569 * 1346 1570 * The limits are masking off stuff about power saving and similar, this … … 1348 1572 * info too in these leaves (like words about having a constant TSC). 1349 1573 */ 1350 if (pCPUM->aGuestCpuIdStd[0].eax > 5) 1351 pCPUM->aGuestCpuIdStd[0].eax = 5; 1352 for (i = pCPUM->aGuestCpuIdStd[0].eax + 1; i < RT_ELEMENTS(pCPUM->aGuestCpuIdStd); i++) 1353 pCPUM->aGuestCpuIdStd[i] = pCPUM->GuestCpuIdDef; 1354 1355 if (pCPUM->aGuestCpuIdExt[0].eax > UINT32_C(0x80000008)) 1356 pCPUM->aGuestCpuIdExt[0].eax = UINT32_C(0x80000008); 1357 for (i = pCPUM->aGuestCpuIdExt[0].eax >= UINT32_C(0x80000000) 1358 ? pCPUM->aGuestCpuIdExt[0].eax - UINT32_C(0x80000000) + 1 1359 : 0; 1360 i < RT_ELEMENTS(pCPUM->aGuestCpuIdExt); 1361 i++) 1362 pCPUM->aGuestCpuIdExt[i] = pCPUM->GuestCpuIdDef; 1574 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 0, 0); 1575 if (pCurLeaf) 1576 { 1577 if (pCurLeaf->uEax > 5) 1578 { 1579 pCurLeaf->uEax = 5; 1580 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1581 UINT32_C(0x00000006), UINT32_C(0x000fffff)); 1582 } 1583 1584 /* NT4 hack, no zapping of extra leaves here. */ 1585 if (fNt4LeafLimit && pCurLeaf->uEax > 3) 1586 pCurLeaf->uEax = 3; 1587 } 1588 1589 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0x80000000), 0); 1590 if (pCurLeaf) 1591 { 1592 if (pCurLeaf->uEax > UINT32_C(0x80000008)) 1593 { 1594 pCurLeaf->uEax = UINT32_C(0x80000008); 1595 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1596 UINT32_C(0x80000008), UINT32_C(0x800fffff)); 1597 } 1598 } 1363 1599 1364 1600 /* … … 1370 1606 * temperature/hz/++ stuff, include it as well (static). 1371 1607 */ 1372 if ( pCPUM->aGuestCpuIdCentaur[0].eax >= UINT32_C(0xc0000000) 1373 && pCPUM->aGuestCpuIdCentaur[0].eax <= UINT32_C(0xc0000004)) 1374 { 1375 pCPUM->aGuestCpuIdCentaur[0].eax = RT_MIN(pCPUM->aGuestCpuIdCentaur[0].eax, UINT32_C(0xc0000002)); 1376 pCPUM->aGuestCpuIdCentaur[1].edx = 0; /* all features hidden */ 1377 for (i = pCPUM->aGuestCpuIdCentaur[0].eax - UINT32_C(0xc0000000); 1378 i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); 1379 i++) 1380 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef; 1381 } 1382 else 1383 for (i = 0; i < RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur); i++) 1384 pCPUM->aGuestCpuIdCentaur[i] = pCPUM->GuestCpuIdDef; 1608 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, UINT32_C(0xc0000000), 0); 1609 if (pCurLeaf) 1610 { 1611 if ( pCurLeaf->uEax >= UINT32_C(0xc0000000) 1612 && pCurLeaf->uEax <= UINT32_C(0xc0000004)) 1613 { 1614 pCurLeaf->uEax = RT_MIN(pCurLeaf->uEax, UINT32_C(0xc0000002)); 1615 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1616 UINT32_C(0xc0000002), UINT32_C(0xc00fffff)); 1617 1618 pCurLeaf = cpumR3CpuIdGetLeaf(pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves, 1619 UINT32_C(0xc0000001), 0); 1620 if (pCurLeaf) 1621 pCurLeaf->uEdx = 0; /* all features hidden */ 1622 } 1623 else 1624 cpumR3CpuIdRemoveRange(pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, 1625 UINT32_C(0xc0000000), UINT32_C(0xc00fffff)); 1626 } 1385 1627 1386 1628 /* … … 1391 1633 * Currently we do not support any hypervisor-specific interface. 1392 1634 */ 1393 pCPUM->aGuestCpuIdHyper[0].eax = UINT32_C(0x40000001); 1394 pCPUM->aGuestCpuIdHyper[0].ebx = pCPUM->aGuestCpuIdHyper[0].ecx 1395 = pCPUM->aGuestCpuIdHyper[0].edx = 0x786f4256; /* 'VBox' */ 1396 pCPUM->aGuestCpuIdHyper[1].eax = 0x656e6f6e; /* 'none' */ 1397 pCPUM->aGuestCpuIdHyper[1].ebx = pCPUM->aGuestCpuIdHyper[1].ecx 1398 = pCPUM->aGuestCpuIdHyper[1].edx = 0; /* Reserved */ 1635 NewLeaf.uLeaf = UINT32_C(0x40000000); 1636 NewLeaf.uSubLeaf = 0; 1637 NewLeaf.fSubLeafMask = 0; 1638 NewLeaf.uEax = UINT32_C(0x40000001); 1639 NewLeaf.uEbx = 0x786f4256 /* 'VBox' */; 1640 NewLeaf.uEcx = 0x786f4256 /* 'VBox' */; 1641 NewLeaf.uEdx = 0x786f4256 /* 'VBox' */; 1642 NewLeaf.fFlags = 0; 1643 rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 1644 AssertLogRelRCReturn(rc, rc); 1645 1646 NewLeaf.uLeaf = UINT32_C(0x40000001); 1647 NewLeaf.uEax = 0x656e6f6e; /* 'none' */ 1648 NewLeaf.uEbx = 0; 1649 NewLeaf.uEcx = 0; 1650 NewLeaf.uEdx = 0; 1651 NewLeaf.fFlags = 0; 1652 rc = cpumR3CpuIdInsert(&pCPUM->GuestInfo.paCpuIdLeavesR3, &pCPUM->GuestInfo.cCpuIdLeaves, &NewLeaf); 1653 AssertLogRelRCReturn(rc, rc); 1399 1654 1400 1655 /* 1401 1656 * Mini CPU selection support for making Mac OS X happy. 1402 1657 */ 1403 if (pCPUM->enmGuestCpuVendor == CPUMCPUVENDOR_INTEL) 1404 { 1405 /** @cfgm{/CPUM/MaxIntelFamilyModelStep, uint32_t, UINT32_MAX} 1406 * Restrict the reported CPU family+model+stepping of intel CPUs. This is 1407 * probably going to be a temporary hack, so don't depend on this. 1408 * The 1st byte of the value is the stepping, the 2nd byte value is the model 1409 * number and the 3rd byte value is the family, and the 4th value must be zero. 1410 */ 1411 uint32_t uMaxIntelFamilyModelStep; 1412 rc = CFGMR3QueryU32Def(pCpumCfg, "MaxIntelFamilyModelStep", &uMaxIntelFamilyModelStep, UINT32_MAX); 1413 AssertRCReturn(rc, rc); 1414 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pCPUM->aGuestCpuIdStd[1].eax), 1415 ASMGetCpuModelIntel(pCPUM->aGuestCpuIdStd[1].eax), 1416 ASMGetCpuFamily(pCPUM->aGuestCpuIdStd[1].eax), 1658 if (pCPUM->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_INTEL) 1659 { 1660 uint32_t uCurIntelFamilyModelStep = RT_MAKE_U32_FROM_U8(ASMGetCpuStepping(pStdFeatureLeaf->uEax), 1661 ASMGetCpuModelIntel(pStdFeatureLeaf->uEax), 1662 ASMGetCpuFamily(pStdFeatureLeaf->uEax), 1417 1663 0); 1418 1664 if (uMaxIntelFamilyModelStep < uCurIntelFamilyModelStep) 1419 1665 { 1420 uint32_t uNew = p CPUM->aGuestCpuIdStd[1].eax & UINT32_C(0xf0003000);1666 uint32_t uNew = pStdFeatureLeaf->uEax & UINT32_C(0xf0003000); 1421 1667 uNew |= RT_BYTE1(uMaxIntelFamilyModelStep) & 0xf; /* stepping */ 1422 1668 uNew |= (RT_BYTE2(uMaxIntelFamilyModelStep) & 0xf) << 4; /* 4 low model bits */ … … 1426 1672 uNew |= ( (RT_BYTE3(uMaxIntelFamilyModelStep) - (RT_BYTE3(uMaxIntelFamilyModelStep) & 0xf)) & 0xff ) << 20; 1427 1673 LogRel(("CPU: CPUID(0).EAX %#x -> %#x (uMaxIntelFamilyModelStep=%#x, uCurIntelFamilyModelStep=%#x\n", 1428 p CPUM->aGuestCpuIdStd[1].eax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep));1429 p CPUM->aGuestCpuIdStd[1].eax = uNew;1674 pStdFeatureLeaf->uEax, uNew, uMaxIntelFamilyModelStep, uCurIntelFamilyModelStep)); 1675 pStdFeatureLeaf->uEax = uNew; 1430 1676 } 1431 1677 } 1432 1678 1433 /* 1434 * Load CPUID overrides from configuration. 1435 * Note: Kind of redundant now, but allows unchanged overrides 1436 */ 1437 /** @cfgm{CPUM/CPUID/[000000xx|800000xx|c000000x]/[eax|ebx|ecx|edx],32-bit} 1438 * Overrides the CPUID leaf values. */ 1439 PCFGMNODE pOverrideCfg = CFGMR3GetChild(pCpumCfg, "CPUID"); 1440 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &pCPUM->aGuestCpuIdStd[0], RT_ELEMENTS(pCPUM->aGuestCpuIdStd), pOverrideCfg); 1441 AssertRCReturn(rc, rc); 1442 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &pCPUM->aGuestCpuIdExt[0], RT_ELEMENTS(pCPUM->aGuestCpuIdExt), pOverrideCfg); 1443 AssertRCReturn(rc, rc); 1444 rc = cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0xc0000000), &pCPUM->aGuestCpuIdCentaur[0], RT_ELEMENTS(pCPUM->aGuestCpuIdCentaur), pOverrideCfg); 1445 AssertRCReturn(rc, rc); 1446 1447 /* 1448 * Check if PAE was explicitely enabled by the user. 1449 */ 1679 1680 /* 1681 * Move the MSR and CPUID arrays over on the hypervisor heap, and explode 1682 * guest CPU features again. 1683 */ 1684 void *pvFree = pCPUM->GuestInfo.paCpuIdLeavesR3; 1685 int rc1 = cpumR3CpuIdInstallAndExplodeLeaves(pVM, pCPUM, pCPUM->GuestInfo.paCpuIdLeavesR3, pCPUM->GuestInfo.cCpuIdLeaves); 1686 RTMemFree(pvFree); 1687 1688 pvFree = pCPUM->GuestInfo.paMsrRangesR3; 1689 int rc2 = MMHyperDupMem(pVM, pvFree, 1690 sizeof(pCPUM->GuestInfo.paMsrRangesR3[0]) * pCPUM->GuestInfo.cMsrRanges, 32, 1691 MM_TAG_CPUM_MSRS, (void **)&pCPUM->GuestInfo.paMsrRangesR3); 1692 RTMemFree(pvFree); 1693 AssertLogRelRCReturn(rc1, rc1); 1694 AssertLogRelRCReturn(rc2, rc2); 1695 1696 pCPUM->GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, pCPUM->GuestInfo.paMsrRangesR3); 1697 pCPUM->GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pCPUM->GuestInfo.paMsrRangesR3); 1698 cpumR3MsrRegStats(pVM); 1699 1700 /* 1701 * Some more configuration that we're applying at the end of everything 1702 * via the CPUMSetGuestCpuIdFeature API. 1703 */ 1704 1705 /* Check if PAE was explicitely enabled by the user. */ 1450 1706 bool fEnable; 1451 1707 rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "EnablePAE", &fEnable, false); AssertRCReturn(rc, rc); … … 1453 1709 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE); 1454 1710 1455 /* 1456 * We don't normally enable NX for raw-mode, so give the user a chance to 1457 * force it on. 1458 */ 1711 /* We don't normally enable NX for raw-mode, so give the user a chance to force it on. */ 1459 1712 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableNX", &fEnable, false); AssertRCReturn(rc, rc); 1460 1713 if (fEnable) 1461 1714 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1462 1715 1463 /* 1464 * We don't enable the Hypervisor Present bit by default, but it may 1465 * be needed by some guests. 1466 */ 1716 /* We don't enable the Hypervisor Present bit by default, but it may be needed by some guests. */ 1467 1717 rc = CFGMR3QueryBoolDef(pCpumCfg, "EnableHVP", &fEnable, false); AssertRCReturn(rc, rc); 1468 1718 if (fEnable) … … 1488 1738 { 1489 1739 LogFlow(("CPUMR3Relocate\n")); 1740 1741 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3); 1742 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 1490 1743 1491 1744 /* Recheck the guest DRx values in raw-mode. */ … … 1552 1805 * Used by CPUMR3Reset and CPU hot plugging. 1553 1806 * 1554 * @param pVCpu Pointer to the VMCPU. 1555 */ 1556 VMMR3DECL(void) CPUMR3ResetCpu(PVMCPU pVCpu) 1807 * @param pVM Pointer to the cross context VM structure. 1808 * @param pVCpu Pointer to the cross context virtual CPU structure of 1809 * the CPU that is being reset. This may differ from the 1810 * current EMT. 1811 */ 1812 VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu) 1557 1813 { 1558 1814 /** @todo anything different for VCPU > 0? */ … … 1635 1891 supports all bits, since a zero value here should be read as 0xffbf. */ 1636 1892 1893 /* 1894 * MSRs. 1895 */ 1637 1896 /* Init PAT MSR */ 1638 1897 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */ … … 1642 1901 Assert(!pCtx->msrEFER); 1643 1902 1903 /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really 1904 is supposed to be here, just trying provide useful/sensible values. */ 1905 PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE); 1906 if (pRange) 1907 { 1908 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL 1909 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL 1910 | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0) 1911 | MSR_IA32_MISC_ENABLE_FAST_STRINGS; 1912 pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL 1913 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; 1914 pRange->fWrGpMask &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable; 1915 } 1916 1917 /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */ 1918 1644 1919 /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be 1645 1920 * called from each EMT while we're getting called by CPUMR3Reset() 1646 1921 * iteratively on the same thread. Fix later. */ 1647 #if 0 1922 #if 0 /** @todo r=bird: This we will do in TM, not here. */ 1648 1923 /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */ 1649 1924 CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0); … … 1673 1948 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1674 1949 { 1675 CPUMR3ResetCpu( &pVM->aCpus[i]);1950 CPUMR3ResetCpu(pVM, &pVM->aCpus[i]); 1676 1951 1677 1952 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 1725 2000 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt)); 1726 2001 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt)); 2002 } 2003 2004 2005 static int cpumR3LoadCpuIdOneGuestArray(PSSMHANDLE pSSM, uint32_t uBase, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves) 2006 { 2007 uint32_t cCpuIds; 2008 int rc = SSMR3GetU32(pSSM, &cCpuIds); 2009 if (RT_SUCCESS(rc)) 2010 { 2011 if (cCpuIds < 64) 2012 { 2013 for (uint32_t i = 0; i < cCpuIds; i++) 2014 { 2015 CPUMCPUID CpuId; 2016 rc = SSMR3GetMem(pSSM, &CpuId, sizeof(CpuId)); 2017 if (RT_FAILURE(rc)) 2018 break; 2019 2020 CPUMCPUIDLEAF NewLeaf; 2021 NewLeaf.uLeaf = uBase + i; 2022 NewLeaf.uSubLeaf = 0; 2023 NewLeaf.fSubLeafMask = 0; 2024 NewLeaf.uEax = CpuId.eax; 2025 NewLeaf.uEbx = CpuId.ebx; 2026 NewLeaf.uEcx = CpuId.ecx; 2027 NewLeaf.uEdx = CpuId.edx; 2028 NewLeaf.fFlags = 0; 2029 rc = cpumR3CpuIdInsert(ppaLeaves, pcLeaves, &NewLeaf); 2030 } 2031 } 2032 else 2033 rc = VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 2034 } 2035 if (RT_FAILURE(rc)) 2036 { 2037 RTMemFree(*ppaLeaves); 2038 *ppaLeaves = NULL; 2039 *pcLeaves = 0; 2040 } 2041 return rc; 2042 } 2043 2044 2045 static int cpumR3LoadCpuIdGuestArrays(PSSMHANDLE pSSM, uint32_t uVersion, PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves) 2046 { 2047 *ppaLeaves = NULL; 2048 *pcLeaves = 0; 2049 2050 int rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x00000000), ppaLeaves, pcLeaves); 2051 if (RT_SUCCESS(rc)) 2052 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0x80000000), ppaLeaves, pcLeaves); 2053 if (RT_SUCCESS(rc)) 2054 rc = cpumR3LoadCpuIdOneGuestArray(pSSM, UINT32_C(0xc0000000), ppaLeaves, pcLeaves); 2055 2056 return rc; 1727 2057 } 1728 2058 … … 1809 2139 && !(aHostRaw##set [1].reg & bit) \ 1810 2140 && !(aHostOverride##set [1].reg & bit) \ 1811 && !(aGuestOverride##set [1].reg & bit) \1812 2141 ) \ 1813 2142 { \ … … 1823 2152 && !(aHostRaw##set [1].reg & bit) \ 1824 2153 && !(aHostOverride##set [1].reg & bit) \ 1825 && !(aGuestOverride##set [1].reg & bit) \1826 2154 ) \ 1827 2155 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ … … 1832 2160 && !(aHostRaw##set [1].reg & bit) \ 1833 2161 && !(aHostOverride##set [1].reg & bit) \ 1834 && !(aGuestOverride##set [1].reg & bit) \1835 2162 ) \ 1836 2163 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ … … 1845 2172 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 1846 2173 && !(aHostOverride##set [1].reg & bit) \ 1847 && !(aGuestOverride##set [1].reg & bit) \1848 2174 ) \ 1849 2175 { \ … … 1860 2186 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 1861 2187 && !(aHostOverride##set [1].reg & bit) \ 1862 && !(aGuestOverride##set [1].reg & bit) \1863 2188 ) \ 1864 2189 LogRel(("CPUM: " #bit " is not supported by the host but has already exposed to the guest\n")); \ … … 1870 2195 && (!fGuestAmd || !(aHostRaw##set [1].reg & bit)) \ 1871 2196 && !(aHostOverride##set [1].reg & bit) \ 1872 && !(aGuestOverride##set [1].reg & bit) \1873 2197 ) \ 1874 2198 LogRel(("CPUM: Warning - " #bit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ … … 1885 2209 : aHostRawStd[1].reg & (StdBit)) \ 1886 2210 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 1887 && !(aGuestOverrideExt[1].reg & (ExtBit)) \1888 2211 ) \ 1889 2212 { \ … … 1901 2224 : aHostRawStd[1].reg & (StdBit)) \ 1902 2225 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 1903 && !(aGuestOverrideExt[1].reg & (ExtBit)) \1904 2226 ) \ 1905 2227 LogRel(("CPUM: " #ExtBit " is not supported by the host but has already exposed to the guest\n")); \ … … 1912 2234 : aHostRawStd[1].reg & (StdBit)) \ 1913 2235 && !(aHostOverrideExt[1].reg & (ExtBit)) \ 1914 && !(aGuestOverrideExt[1].reg & (ExtBit)) \1915 2236 ) \ 1916 2237 LogRel(("CPUM: Warning - " #ExtBit " is not supported by the host but already exposed to the guest. This may impact performance.\n")); \ … … 1921 2242 * Load them into stack buffers first. 1922 2243 */ 1923 CPUMCPUID aGuestCpuIdStd[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdStd)]; 1924 uint32_t cGuestCpuIdStd; 1925 int rc = SSMR3GetU32(pSSM, &cGuestCpuIdStd); AssertRCReturn(rc, rc); 1926 if (cGuestCpuIdStd > RT_ELEMENTS(aGuestCpuIdStd)) 1927 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1928 SSMR3GetMem(pSSM, &aGuestCpuIdStd[0], cGuestCpuIdStd * sizeof(aGuestCpuIdStd[0])); 1929 1930 CPUMCPUID aGuestCpuIdExt[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdExt)]; 1931 uint32_t cGuestCpuIdExt; 1932 rc = SSMR3GetU32(pSSM, &cGuestCpuIdExt); AssertRCReturn(rc, rc); 1933 if (cGuestCpuIdExt > RT_ELEMENTS(aGuestCpuIdExt)) 1934 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1935 SSMR3GetMem(pSSM, &aGuestCpuIdExt[0], cGuestCpuIdExt * sizeof(aGuestCpuIdExt[0])); 1936 1937 CPUMCPUID aGuestCpuIdCentaur[RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdCentaur)]; 1938 uint32_t cGuestCpuIdCentaur; 1939 rc = SSMR3GetU32(pSSM, &cGuestCpuIdCentaur); AssertRCReturn(rc, rc); 1940 if (cGuestCpuIdCentaur > RT_ELEMENTS(aGuestCpuIdCentaur)) 1941 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1942 SSMR3GetMem(pSSM, &aGuestCpuIdCentaur[0], cGuestCpuIdCentaur * sizeof(aGuestCpuIdCentaur[0])); 2244 PCPUMCPUIDLEAF paLeaves; 2245 uint32_t cLeaves; 2246 int rc = cpumR3LoadCpuIdGuestArrays(pSSM, uVersion, &paLeaves, &cLeaves); 2247 AssertRCReturn(rc, rc); 2248 2249 /** @todo we'll be leaking paLeaves on error return... */ 1943 2250 1944 2251 CPUMCPUID GuestCpuIdDef; … … 1951 2258 if (cRawStd > RT_ELEMENTS(aRawStd)) 1952 2259 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED; 1953 SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0])); 2260 rc = SSMR3GetMem(pSSM, &aRawStd[0], cRawStd * sizeof(aRawStd[0])); 2261 AssertRCReturn(rc, rc); 2262 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++) 2263 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx); 1954 2264 1955 2265 CPUMCPUID aRawExt[32]; … … 1960 2270 rc = SSMR3GetMem(pSSM, &aRawExt[0], cRawExt * sizeof(aRawExt[0])); 1961 2271 AssertRCReturn(rc, rc); 1962 1963 /*1964 * Note that we support restoring less than the current amount of standard1965 * leaves because we've been allowed more is newer version of VBox.1966 *1967 * So, pad new entries with the default.1968 */1969 for (uint32_t i = cGuestCpuIdStd; i < RT_ELEMENTS(aGuestCpuIdStd); i++)1970 aGuestCpuIdStd[i] = GuestCpuIdDef;1971 1972 for (uint32_t i = cGuestCpuIdExt; i < RT_ELEMENTS(aGuestCpuIdExt); i++)1973 aGuestCpuIdExt[i] = GuestCpuIdDef;1974 1975 for (uint32_t i = cGuestCpuIdCentaur; i < RT_ELEMENTS(aGuestCpuIdCentaur); i++)1976 aGuestCpuIdCentaur[i] = GuestCpuIdDef;1977 1978 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++)1979 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);1980 1981 2272 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++) 1982 2273 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx); … … 1999 2290 * Note! We currently only need the feature leaves, so skip rest. 2000 2291 */ 2001 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/CPUID"); 2002 CPUMCPUID aGuestOverrideStd[2]; 2003 memcpy(&aGuestOverrideStd[0], &aHostRawStd[0], sizeof(aGuestOverrideStd)); 2004 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x00000000), &aGuestOverrideStd[0], RT_ELEMENTS(aGuestOverrideStd), pOverrideCfg); 2005 2006 CPUMCPUID aGuestOverrideExt[2]; 2007 memcpy(&aGuestOverrideExt[0], &aHostRawExt[0], sizeof(aGuestOverrideExt)); 2008 cpumR3CpuIdInitLoadOverrideSet(UINT32_C(0x80000000), &aGuestOverrideExt[0], RT_ELEMENTS(aGuestOverrideExt), pOverrideCfg); 2009 2010 pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID"); 2292 PCFGMNODE pOverrideCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/HostCPUID"); 2011 2293 CPUMCPUID aHostOverrideStd[2]; 2012 2294 memcpy(&aHostOverrideStd[0], &aHostRawStd[0], sizeof(aHostOverrideStd)); … … 2259 2541 * "EMU?" - Can this be emulated? 2260 2542 */ 2543 CPUMCPUID aGuestCpuIdStd[2]; 2544 RT_ZERO(aGuestCpuIdStd); 2545 cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, 1, 0, &aGuestCpuIdStd[1]); 2546 2261 2547 /* CPUID(1).ecx */ 2262 2548 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU … … 2328 2614 2329 2615 /* CPUID(0x80000000). */ 2330 if ( aGuestCpuIdExt[0].eax >= UINT32_C(0x80000001) 2331 && aGuestCpuIdExt[0].eax < UINT32_C(0x8000007f)) 2616 CPUMCPUID aGuestCpuIdExt[2]; 2617 RT_ZERO(aGuestCpuIdExt); 2618 if (cpumR3CpuIdGetLeafLegacy(paLeaves, cLeaves, UINT32_C(0x80000001), 0, &aGuestCpuIdExt[1])) 2332 2619 { 2333 2620 /** @todo deal with no 0x80000001 on the host. */ … … 2407 2694 * We're good, commit the CPU ID leaves. 2408 2695 */ 2409 memcpy(&pVM->cpum.s.aGuestCpuIdStd[0], &aGuestCpuIdStd[0], sizeof(aGuestCpuIdStd)); 2410 memcpy(&pVM->cpum.s.aGuestCpuIdExt[0], &aGuestCpuIdExt[0], sizeof(aGuestCpuIdExt)); 2411 memcpy(&pVM->cpum.s.aGuestCpuIdCentaur[0], &aGuestCpuIdCentaur[0], sizeof(aGuestCpuIdCentaur)); 2412 pVM->cpum.s.GuestCpuIdDef = GuestCpuIdDef; 2696 MMHyperFree(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 2697 pVM->cpum.s.GuestInfo.paCpuIdLeavesR0 = NIL_RTR0PTR; 2698 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = NIL_RTRCPTR; 2699 pVM->cpum.s.GuestInfo.DefCpuId = GuestCpuIdDef; 2700 rc = cpumR3CpuIdInstallAndExplodeLeaves(pVM, &pVM->cpum.s, paLeaves, cLeaves); 2701 RTMemFree(paLeaves); 2702 AssertLogRelRCReturn(rc, rc); 2703 2413 2704 2414 2705 #undef CPUID_CHECK_RET -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r49072 r49893 2398 2398 PGMR3ResetCpu(pVM, pVCpu); 2399 2399 TRPMR3ResetCpu(pVCpu); 2400 CPUMR3ResetCpu(pV Cpu);2400 CPUMR3ResetCpu(pVM, pVCpu); 2401 2401 EMR3ResetCpu(pVCpu); 2402 2402 HMR3ResetCpu(pVCpu); -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r48629 r49893 2168 2168 } 2169 2169 2170 /** @todo query from CPUM. */ 2170 2171 pVM->pgm.s.GCPhysInvAddrMask = 0; 2171 2172 for (uint32_t iBit = cMaxPhysAddrWidth; iBit < 64; iBit++) -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r48528 r49893 4380 4380 PDMR3ResetCpu(pVCpu); 4381 4381 TRPMR3ResetCpu(pVCpu); 4382 CPUMR3ResetCpu(pV Cpu);4382 CPUMR3ResetCpu(pVM, pVCpu); 4383 4383 EMR3ResetCpu(pVCpu); 4384 4384 HMR3ResetCpu(pVCpu); -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r49147 r49893 1420 1420 1421 1421 PGMR3ResetCpu(pVM, pVCpu); 1422 CPUMR3ResetCpu(pV Cpu);1422 CPUMR3ResetCpu(pVM, pVCpu); 1423 1423 1424 1424 return VINF_EM_WAIT_SIPI; -
trunk/src/VBox/VMM/VMMR3/VMMTests.cpp
r49383 r49893 872 872 * Do the experiments. 873 873 */ 874 uint32_t uMsr = 0x c0011011;875 uint64_t uValue = 0x10000;874 uint32_t uMsr = 0x00000277; 875 uint64_t uValue = UINT64_C(0x0007010600070106); 876 876 #if 0 877 uValue &= ~(RT_BIT_64(17) | RT_BIT_64(16) | RT_BIT_64(15) | RT_BIT_64(14) | RT_BIT_64(13)); 878 uValue |= RT_BIT_64(13); 877 879 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 878 880 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 879 881 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n", 880 882 uMsr, pauValues[0], uValue, pauValues[1], rc); 881 #endif 883 #elif 1 884 const uint64_t uOrgValue = uValue; 885 uint32_t cChanges = 0; 886 for (int iBit = 63; iBit >= 58; iBit--) 887 { 888 uValue = uOrgValue & ~RT_BIT_64(iBit); 889 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 890 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 891 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nclear bit=%u -> %s\n", 892 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit, 893 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged"); 894 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]); 895 896 uValue = uOrgValue | RT_BIT_64(iBit); 897 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 898 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 899 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\nset bit=%u -> %s\n", 900 uMsr, pauValues[0], uValue, pauValues[1], rc, iBit, 901 (pauValues[0] ^ pauValues[1]) & RT_BIT_64(iBit) ? "changed" : "unchanged"); 902 cChanges += RT_BOOL(pauValues[0] ^ pauValues[1]); 903 } 904 RTPrintf("%u change(s)\n", cChanges); 905 #else 906 uint64_t fWriteable = 0; 882 907 for (uint32_t i = 0; i <= 63; i++) 883 908 { 884 909 uValue = RT_BIT_64(i); 910 # if 0 911 if (uValue & (0x7)) 912 continue; 913 # endif 885 914 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 886 915 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 887 916 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n", 888 917 uMsr, pauValues[0], uValue, pauValues[1], rc); 918 if (RT_SUCCESS(rc)) 919 fWriteable |= RT_BIT_64(i); 889 920 } 890 921 … … 900 931 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc\n", 901 932 uMsr, pauValues[0], uValue, pauValues[1], rc); 933 934 uValue = fWriteable; 935 rc = VMMR3CallRC(pVM, RCPtrEP, 6, pVM->pVMRC, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue), 936 RCPtrValues, RCPtrValues + sizeof(uint64_t)); 937 RTPrintf("uMsr=%#010x before=%#018llx written=%#018llx after=%#018llx rc=%Rrc [fWriteable]\n", 938 uMsr, pauValues[0], uValue, pauValues[1], rc); 939 940 #endif 902 941 903 942 /* -
trunk/src/VBox/VMM/VMMRC/VMMRC.cpp
r49147 r49893 5 5 6 6 /* 7 * Copyright (C) 2006-201 2Oracle Corporation7 * Copyright (C) 2006-2013 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 49 49 static DECLCALLBACK(int) vmmGCTestTmpPFHandlerCorruptFS(PVM pVM, PCPUMCTXCORE pRegFrame); 50 50 DECLASM(bool) vmmRCSafeMsrRead(uint32_t uMsr, uint64_t *pu64Value); 51 DECLASM(bool) vmmRCSafeMsrWrite(uint32_t uMsr, uint64_t u64Value); 51 52 52 53 … … 378 379 379 380 381 /** 382 * Tries to write the given value to an MSR, returns the effect and restors the 383 * original value. 384 * 385 * This is called directly via VMMR3CallRC. 386 * 387 * @returns VBox status code. 388 * @param pVM The VM handle. 389 * @param uMsr The MSR to start at. 390 * @param u32ValueLow The low part of the value to write. 391 * @param u32ValueHi The high part of the value to write. 392 * @param puValueBefore The value before writing. 393 * @param puValueAfter The value read back after writing. 394 */ 395 extern "C" VMMRCDECL(int) 396 VMMRCTestTestWriteMsr(PVM pVM, uint32_t uMsr, uint32_t u32ValueLow, uint32_t u32ValueHi, 397 uint64_t *puValueBefore, uint64_t *puValueAfter) 398 { 399 AssertPtrReturn(puValueBefore, VERR_INVALID_POINTER); 400 AssertPtrReturn(puValueAfter, VERR_INVALID_POINTER); 401 ASMIntDisable(); 402 403 int rc = VINF_SUCCESS; 404 uint64_t uValueBefore = UINT64_MAX; 405 uint64_t uValueAfter = UINT64_MAX; 406 if (vmmRCSafeMsrRead(uMsr, &uValueBefore)) 407 { 408 if (!vmmRCSafeMsrWrite(uMsr, RT_MAKE_U64(u32ValueLow, u32ValueHi))) 409 rc = VERR_WRITE_PROTECT; 410 if (!vmmRCSafeMsrRead(uMsr, &uValueAfter) && RT_SUCCESS(rc)) 411 rc = VERR_READ_ERROR; 412 vmmRCSafeMsrWrite(uMsr, uValueBefore); 413 } 414 else 415 rc = VERR_ACCESS_DENIED; 416 417 *puValueBefore = uValueBefore; 418 *puValueAfter = uValueAfter; 419 return rc; 420 } 421 422 380 423 381 424 /** -
trunk/src/VBox/VMM/VMMRC/VMMRCA.asm
r49362 r49893 235 235 pushf 236 236 cli 237 push esi 238 push edi 239 push ebx 240 push ebp 237 241 238 242 mov ecx, [ebp + 8] ; The MSR to read. … … 247 251 mov [ecx + 4], edx 248 252 253 mov eax, 1 254 .return: 255 pop ebp 256 pop ebx 257 pop edi 258 pop esi 249 259 popf 250 mov eax, 1251 260 leave 252 261 ret 253 262 254 263 .trapped: 255 popf256 264 mov eax, 0 257 leave 258 ret 265 jmp .return 259 266 ENDPROC vmmRCSafeMsrRead 260 267 … … 271 278 pushf 272 279 cli 280 push esi 281 push edi 282 push ebx 283 push ebp 273 284 274 285 mov ecx, [ebp + 8] ; The MSR to write to. … … 279 290 wrmsr 280 291 292 mov eax, 1 293 .return: 294 pop ebp 295 pop ebx 296 pop edi 297 pop esi 281 298 popf 282 mov eax, 1283 299 leave 284 300 ret 285 301 286 302 .trapped: 287 popf288 303 mov eax, 0 289 leave 290 ret 304 jmp .return 291 305 ENDPROC vmmRCSafeMsrWrite 292 306 -
trunk/src/VBox/VMM/include/CPUMInternal.h
r49019 r49893 22 22 # include <VBox/cdefs.h> 23 23 # include <VBox/types.h> 24 # include <VBox/vmm/stam.h> 24 25 # include <iprt/x86.h> 25 26 #else … … 108 109 #endif 109 110 #endif 111 112 113 /** 114 * MSR read functions. 115 */ 116 typedef enum CPUMMSRRDFN 117 { 118 /** Invalid zero value. */ 119 kCpumMsrRdFn_Invalid = 0, 120 /** Return the CPUMMSRRANGE::uInitOrReadValue. */ 121 kCpumMsrRdFn_FixedValue, 122 /** Alias to the MSR range starting at the MSR given by 123 * CPUMMSRRANGE::uInitOrReadValue. Must be used in pair with 124 * kCpumMsrWrFn_MsrAlias. */ 125 kCpumMsrRdFn_MsrAlias, 126 /** Write only register, GP all read attempts. */ 127 kCpumMsrRdFn_WriteOnly, 128 129 kCpumMsrRdFn_Ia32P5McAddr, 130 kCpumMsrRdFn_Ia32P5McType, 131 kCpumMsrRdFn_Ia32TimestampCounter, 132 kCpumMsrRdFn_Ia32ApicBase, 133 kCpumMsrRdFn_Ia32FeatureControl, 134 kCpumMsrRdFn_Ia32SmmMonitorCtl, 135 kCpumMsrRdFn_Ia32PmcN, 136 kCpumMsrRdFn_Ia32MonitorFilterLineSize, 137 kCpumMsrRdFn_Ia32MPerf, 138 kCpumMsrRdFn_Ia32APerf, 139 kCpumMsrRdFn_Ia32MtrrCap, /**< Takes real CPU value for reference. */ 140 kCpumMsrRdFn_Ia32MtrrPhysBaseN, /**< Takes register number. */ 141 kCpumMsrRdFn_Ia32MtrrPhysMaskN, /**< Takes register number. */ 142 kCpumMsrRdFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */ 143 kCpumMsrRdFn_Ia32MtrrDefType, 144 kCpumMsrRdFn_Ia32Pat, 145 kCpumMsrRdFn_Ia32SysEnterCs, 146 kCpumMsrRdFn_Ia32SysEnterEsp, 147 kCpumMsrRdFn_Ia32SysEnterEip, 148 kCpumMsrRdFn_Ia32McgCap, 149 kCpumMsrRdFn_Ia32McgStatus, 150 kCpumMsrRdFn_Ia32McgCtl, 151 kCpumMsrRdFn_Ia32DebugCtl, 152 kCpumMsrRdFn_Ia32SmrrPhysBase, 153 kCpumMsrRdFn_Ia32SmrrPhysMask, 154 kCpumMsrRdFn_Ia32PlatformDcaCap, 155 kCpumMsrRdFn_Ia32CpuDcaCap, 156 kCpumMsrRdFn_Ia32Dca0Cap, 157 kCpumMsrRdFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */ 158 kCpumMsrRdFn_Ia32PerfStatus, /**< Range value returned. */ 159 kCpumMsrRdFn_Ia32PerfCtl, /**< Range value returned. */ 160 kCpumMsrRdFn_Ia32FixedCtrN, /**< Takes register number of start of range. */ 161 kCpumMsrRdFn_Ia32PerfCapabilities, /**< Takes reference value. */ 162 kCpumMsrRdFn_Ia32FixedCtrCtrl, 163 kCpumMsrRdFn_Ia32PerfGlobalStatus, /**< Takes reference value. */ 164 kCpumMsrRdFn_Ia32PerfGlobalCtrl, 165 kCpumMsrRdFn_Ia32PerfGlobalOvfCtrl, 166 kCpumMsrRdFn_Ia32PebsEnable, 167 kCpumMsrRdFn_Ia32ClockModulation, /**< Range value returned. */ 168 kCpumMsrRdFn_Ia32ThermInterrupt, /**< Range value returned. */ 169 kCpumMsrRdFn_Ia32ThermStatus, /**< Range value returned. */ 170 kCpumMsrRdFn_Ia32Therm2Ctl, /**< Range value returned. */ 171 kCpumMsrRdFn_Ia32MiscEnable, /**< Range value returned. */ 172 kCpumMsrRdFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */ 173 kCpumMsrRdFn_Ia32McNCtl2, /**< Takes register number of start of range. */ 174 kCpumMsrRdFn_Ia32DsArea, 175 kCpumMsrRdFn_Ia32TscDeadline, 176 kCpumMsrRdFn_Ia32X2ApicN, 177 kCpumMsrRdFn_Ia32VmxBase, /**< Takes real value as reference. */ 178 kCpumMsrRdFn_Ia32VmxPinbasedCtls, /**< Takes real value as reference. */ 179 kCpumMsrRdFn_Ia32VmxProcbasedCtls, /**< Takes real value as reference. */ 180 kCpumMsrRdFn_Ia32VmxExitCtls, /**< Takes real value as reference. */ 181 kCpumMsrRdFn_Ia32VmxEntryCtls, /**< Takes real value as reference. */ 182 kCpumMsrRdFn_Ia32VmxMisc, /**< Takes real value as reference. */ 183 kCpumMsrRdFn_Ia32VmxCr0Fixed0, /**< Takes real value as reference. */ 184 kCpumMsrRdFn_Ia32VmxCr0Fixed1, /**< Takes real value as reference. */ 185 kCpumMsrRdFn_Ia32VmxCr4Fixed0, /**< Takes real value as reference. */ 186 kCpumMsrRdFn_Ia32VmxCr4Fixed1, /**< Takes real value as reference. */ 187 kCpumMsrRdFn_Ia32VmxVmcsEnum, /**< Takes real value as reference. */ 188 kCpumMsrRdFn_Ia32VmxProcBasedCtls2, /**< Takes real value as reference. */ 189 kCpumMsrRdFn_Ia32VmxEptVpidCap, /**< Takes real value as reference. */ 190 kCpumMsrRdFn_Ia32VmxTruePinbasedCtls, /**< Takes real value as reference. */ 191 kCpumMsrRdFn_Ia32VmxTrueProcbasedCtls, /**< Takes real value as reference. */ 192 kCpumMsrRdFn_Ia32VmxTrueExitCtls, /**< Takes real value as reference. */ 193 kCpumMsrRdFn_Ia32VmxTrueEntryCtls, /**< Takes real value as reference. */ 194 195 kCpumMsrRdFn_Amd64Efer, 196 kCpumMsrRdFn_Amd64SyscallTarget, 197 kCpumMsrRdFn_Amd64LongSyscallTarget, 198 kCpumMsrRdFn_Amd64CompSyscallTarget, 199 kCpumMsrRdFn_Amd64SyscallFlagMask, 200 kCpumMsrRdFn_Amd64FsBase, 201 kCpumMsrRdFn_Amd64GsBase, 202 kCpumMsrRdFn_Amd64KernelGsBase, 203 kCpumMsrRdFn_Amd64TscAux, 204 205 kCpumMsrRdFn_IntelEblCrPowerOn, 206 kCpumMsrRdFn_IntelPlatformInfo100MHz, 207 kCpumMsrRdFn_IntelPlatformInfo133MHz, 208 kCpumMsrRdFn_IntelPkgCStConfigControl, 209 kCpumMsrRdFn_IntelPmgIoCaptureBase, 210 kCpumMsrRdFn_IntelLastBranchFromToN, 211 kCpumMsrRdFn_IntelLastBranchFromN, 212 kCpumMsrRdFn_IntelLastBranchToN, 213 kCpumMsrRdFn_IntelLastBranchTos, 214 kCpumMsrRdFn_IntelBblCrCtl, 215 kCpumMsrRdFn_IntelBblCrCtl3, 216 kCpumMsrRdFn_IntelI7TemperatureTarget, /**< Range value returned. */ 217 kCpumMsrRdFn_IntelI7MsrOffCoreResponseN,/**< Takes register number. */ 218 kCpumMsrRdFn_IntelI7MiscPwrMgmt, 219 kCpumMsrRdFn_IntelP6CrN, 220 kCpumMsrRdFn_IntelCpuId1FeatureMaskEcdx, 221 kCpumMsrRdFn_IntelCpuId1FeatureMaskEax, 222 kCpumMsrRdFn_IntelCpuId80000001FeatureMaskEcdx, 223 kCpumMsrRdFn_IntelI7SandyAesNiCtl, 224 kCpumMsrRdFn_IntelI7TurboRatioLimit, /**< Returns range value. */ 225 kCpumMsrRdFn_IntelI7LbrSelect, 226 kCpumMsrRdFn_IntelI7SandyErrorControl, 227 kCpumMsrRdFn_IntelI7VirtualLegacyWireCap,/**< Returns range value. */ 228 kCpumMsrRdFn_IntelI7PowerCtl, 229 kCpumMsrRdFn_IntelI7SandyPebsNumAlt, 230 kCpumMsrRdFn_IntelI7PebsLdLat, 231 kCpumMsrRdFn_IntelI7PkgCnResidencyN, /**< Takes C-state number. */ 232 kCpumMsrRdFn_IntelI7CoreCnResidencyN, /**< Takes C-state number. */ 233 kCpumMsrRdFn_IntelI7SandyVrCurrentConfig,/**< Takes real value as reference. */ 234 kCpumMsrRdFn_IntelI7SandyVrMiscConfig, /**< Takes real value as reference. */ 235 kCpumMsrRdFn_IntelI7SandyRaplPowerUnit, /**< Takes real value as reference. */ 236 kCpumMsrRdFn_IntelI7SandyPkgCnIrtlN, /**< Takes real value as reference. */ 237 kCpumMsrRdFn_IntelI7SandyPkgC2Residency, /**< Takes real value as reference. */ 238 kCpumMsrRdFn_IntelI7RaplPkgPowerLimit, /**< Takes real value as reference. */ 239 kCpumMsrRdFn_IntelI7RaplPkgEnergyStatus, /**< Takes real value as reference. */ 240 kCpumMsrRdFn_IntelI7RaplPkgPerfStatus, /**< Takes real value as reference. */ 241 kCpumMsrRdFn_IntelI7RaplPkgPowerInfo, /**< Takes real value as reference. */ 242 kCpumMsrRdFn_IntelI7RaplDramPowerLimit, /**< Takes real value as reference. */ 243 kCpumMsrRdFn_IntelI7RaplDramEnergyStatus,/**< Takes real value as reference. */ 244 kCpumMsrRdFn_IntelI7RaplDramPerfStatus, /**< Takes real value as reference. */ 245 kCpumMsrRdFn_IntelI7RaplDramPowerInfo, /**< Takes real value as reference. */ 246 kCpumMsrRdFn_IntelI7RaplPp0PowerLimit, /**< Takes real value as reference. */ 247 kCpumMsrRdFn_IntelI7RaplPp0EnergyStatus, /**< Takes real value as reference. */ 248 kCpumMsrRdFn_IntelI7RaplPp0Policy, /**< Takes real value as reference. */ 249 kCpumMsrRdFn_IntelI7RaplPp0PerfStatus, /**< Takes real value as reference. */ 250 kCpumMsrRdFn_IntelI7RaplPp1PowerLimit, /**< Takes real value as reference. */ 251 kCpumMsrRdFn_IntelI7RaplPp1EnergyStatus, /**< Takes real value as reference. */ 252 kCpumMsrRdFn_IntelI7RaplPp1Policy, /**< Takes real value as reference. */ 253 254 kCpumMsrRdFn_P6LastBranchFromIp, 255 kCpumMsrRdFn_P6LastBranchToIp, 256 kCpumMsrRdFn_P6LastIntFromIp, 257 kCpumMsrRdFn_P6LastIntToIp, 258 259 kCpumMsrRdFn_AmdFam15hTscRate, 260 kCpumMsrRdFn_AmdFam15hLwpCfg, 261 kCpumMsrRdFn_AmdFam15hLwpCbAddr, 262 kCpumMsrRdFn_AmdFam10hMc4MiscN, 263 kCpumMsrRdFn_AmdK8PerfCtlN, 264 kCpumMsrRdFn_AmdK8PerfCtrN, 265 kCpumMsrRdFn_AmdK8SysCfg, /**< Range value returned. */ 266 kCpumMsrRdFn_AmdK8HwCr, 267 kCpumMsrRdFn_AmdK8IorrBaseN, 268 kCpumMsrRdFn_AmdK8IorrMaskN, 269 kCpumMsrRdFn_AmdK8TopOfMemN, 270 kCpumMsrRdFn_AmdK8NbCfg1, 271 kCpumMsrRdFn_AmdK8McXcptRedir, 272 kCpumMsrRdFn_AmdK8CpuNameN, 273 kCpumMsrRdFn_AmdK8HwThermalCtrl, /**< Range value returned. */ 274 kCpumMsrRdFn_AmdK8SwThermalCtrl, 275 kCpumMsrRdFn_AmdK8McCtlMaskN, 276 kCpumMsrRdFn_AmdK8SmiOnIoTrapN, 277 kCpumMsrRdFn_AmdK8SmiOnIoTrapCtlSts, 278 kCpumMsrRdFn_AmdK8IntPendingMessage, 279 kCpumMsrRdFn_AmdK8SmiTriggerIoCycle, 280 kCpumMsrRdFn_AmdFam10hMmioCfgBaseAddr, 281 kCpumMsrRdFn_AmdFam10hTrapCtlMaybe, 282 kCpumMsrRdFn_AmdFam10hPStateCurLimit, /**< Returns range value. */ 283 kCpumMsrRdFn_AmdFam10hPStateControl, /**< Returns range value. */ 284 kCpumMsrRdFn_AmdFam10hPStateStatus, /**< Returns range value. */ 285 kCpumMsrRdFn_AmdFam10hPStateN, /**< Returns range value. This isn't an register index! */ 286 kCpumMsrRdFn_AmdFam10hCofVidControl, /**< Returns range value. */ 287 kCpumMsrRdFn_AmdFam10hCofVidStatus, /**< Returns range value. */ 288 kCpumMsrRdFn_AmdFam10hCStateIoBaseAddr, 289 kCpumMsrRdFn_AmdFam10hCpuWatchdogTimer, 290 kCpumMsrRdFn_AmdK8SmmBase, 291 kCpumMsrRdFn_AmdK8SmmAddr, 292 kCpumMsrRdFn_AmdK8SmmMask, 293 kCpumMsrRdFn_AmdK8VmCr, 294 kCpumMsrRdFn_AmdK8IgnNe, 295 kCpumMsrRdFn_AmdK8SmmCtl, 296 kCpumMsrRdFn_AmdK8VmHSavePa, 297 kCpumMsrRdFn_AmdFam10hVmLockKey, 298 kCpumMsrRdFn_AmdFam10hSmmLockKey, 299 kCpumMsrRdFn_AmdFam10hLocalSmiStatus, 300 kCpumMsrRdFn_AmdFam10hOsVisWrkIdLength, 301 kCpumMsrRdFn_AmdFam10hOsVisWrkStatus, 302 kCpumMsrRdFn_AmdFam16hL2IPerfCtlN, 303 kCpumMsrRdFn_AmdFam16hL2IPerfCtrN, 304 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtlN, 305 kCpumMsrRdFn_AmdFam15hNorthbridgePerfCtrN, 306 kCpumMsrRdFn_AmdK7MicrocodeCtl, /**< Returns range value. */ 307 kCpumMsrRdFn_AmdK7ClusterIdMaybe, /**< Returns range value. */ 308 kCpumMsrRdFn_AmdK8CpuIdCtlStd07hEbax, 309 kCpumMsrRdFn_AmdK8CpuIdCtlStd06hEcx, 310 kCpumMsrRdFn_AmdK8CpuIdCtlStd01hEdcx, 311 kCpumMsrRdFn_AmdK8CpuIdCtlExt01hEdcx, 312 kCpumMsrRdFn_AmdK7DebugStatusMaybe, 313 kCpumMsrRdFn_AmdK7BHTraceBaseMaybe, 314 kCpumMsrRdFn_AmdK7BHTracePtrMaybe, 315 kCpumMsrRdFn_AmdK7BHTraceLimitMaybe, 316 kCpumMsrRdFn_AmdK7HardwareDebugToolCfgMaybe, 317 kCpumMsrRdFn_AmdK7FastFlushCountMaybe, 318 kCpumMsrRdFn_AmdK7NodeId, 319 kCpumMsrRdFn_AmdK7DrXAddrMaskN, /**< Takes register index. */ 320 kCpumMsrRdFn_AmdK7Dr0DataMatchMaybe, 321 kCpumMsrRdFn_AmdK7Dr0DataMaskMaybe, 322 kCpumMsrRdFn_AmdK7LoadStoreCfg, 323 kCpumMsrRdFn_AmdK7InstrCacheCfg, 324 kCpumMsrRdFn_AmdK7DataCacheCfg, 325 kCpumMsrRdFn_AmdK7BusUnitCfg, 326 kCpumMsrRdFn_AmdK7DebugCtl2Maybe, 327 kCpumMsrRdFn_AmdFam15hFpuCfg, 328 kCpumMsrRdFn_AmdFam15hDecoderCfg, 329 kCpumMsrRdFn_AmdFam10hBusUnitCfg2, 330 kCpumMsrRdFn_AmdFam15hCombUnitCfg, 331 kCpumMsrRdFn_AmdFam15hCombUnitCfg2, 332 kCpumMsrRdFn_AmdFam15hCombUnitCfg3, 333 kCpumMsrRdFn_AmdFam15hExecUnitCfg, 334 kCpumMsrRdFn_AmdFam15hLoadStoreCfg2, 335 kCpumMsrRdFn_AmdFam10hIbsFetchCtl, 336 kCpumMsrRdFn_AmdFam10hIbsFetchLinAddr, 337 kCpumMsrRdFn_AmdFam10hIbsFetchPhysAddr, 338 kCpumMsrRdFn_AmdFam10hIbsOpExecCtl, 339 kCpumMsrRdFn_AmdFam10hIbsOpRip, 340 kCpumMsrRdFn_AmdFam10hIbsOpData, 341 kCpumMsrRdFn_AmdFam10hIbsOpData2, 342 kCpumMsrRdFn_AmdFam10hIbsOpData3, 343 kCpumMsrRdFn_AmdFam10hIbsDcLinAddr, 344 kCpumMsrRdFn_AmdFam10hIbsDcPhysAddr, 345 kCpumMsrRdFn_AmdFam10hIbsCtl, 346 kCpumMsrRdFn_AmdFam14hIbsBrTarget, 347 348 /** End of valid MSR read function indexes. */ 349 kCpumMsrRdFn_End 350 } CPUMMSRRDFN; 351 352 /** 353 * MSR write functions. 354 */ 355 typedef enum CPUMMSRWRFN 356 { 357 /** Invalid zero value. */ 358 kCpumMsrWrFn_Invalid = 0, 359 /** Writes are ignored, the fWrGpMask is observed though. */ 360 kCpumMsrWrFn_IgnoreWrite, 361 /** Writes cause GP(0) to be raised, the fWrGpMask should be UINT64_MAX. */ 362 kCpumMsrWrFn_ReadOnly, 363 /** Alias to the MSR range starting at the MSR given by 364 * CPUMMSRRANGE::uInitOrReadValue. Must be used in pair with 365 * kCpumMsrRdFn_MsrAlias. */ 366 kCpumMsrWrFn_MsrAlias, 367 368 kCpumMsrWrFn_Ia32P5McAddr, 369 kCpumMsrWrFn_Ia32P5McType, 370 kCpumMsrWrFn_Ia32TimestampCounter, 371 kCpumMsrWrFn_Ia32ApicBase, 372 kCpumMsrWrFn_Ia32FeatureControl, 373 kCpumMsrWrFn_Ia32BiosUpdateTrigger, 374 kCpumMsrWrFn_Ia32SmmMonitorCtl, 375 kCpumMsrWrFn_Ia32PmcN, 376 kCpumMsrWrFn_Ia32MonitorFilterLineSize, 377 kCpumMsrWrFn_Ia32MPerf, 378 kCpumMsrWrFn_Ia32APerf, 379 kCpumMsrWrFn_Ia32MtrrPhysBaseN, /**< Takes register number. */ 380 kCpumMsrWrFn_Ia32MtrrPhysMaskN, /**< Takes register number. */ 381 kCpumMsrWrFn_Ia32MtrrFixed, /**< Takes CPUMCPU offset. */ 382 kCpumMsrWrFn_Ia32MtrrDefType, 383 kCpumMsrWrFn_Ia32Pat, 384 kCpumMsrWrFn_Ia32SysEnterCs, 385 kCpumMsrWrFn_Ia32SysEnterEsp, 386 kCpumMsrWrFn_Ia32SysEnterEip, 387 kCpumMsrWrFn_Ia32McgStatus, 388 kCpumMsrWrFn_Ia32McgCtl, 389 kCpumMsrWrFn_Ia32DebugCtl, 390 kCpumMsrWrFn_Ia32SmrrPhysBase, 391 kCpumMsrWrFn_Ia32SmrrPhysMask, 392 kCpumMsrWrFn_Ia32PlatformDcaCap, 393 kCpumMsrWrFn_Ia32Dca0Cap, 394 kCpumMsrWrFn_Ia32PerfEvtSelN, /**< Range value indicates the register number. */ 395 kCpumMsrWrFn_Ia32PerfCtl, 396 kCpumMsrWrFn_Ia32FixedCtrN, /**< Takes register number of start of range. */ 397 kCpumMsrWrFn_Ia32PerfCapabilities, 398 kCpumMsrWrFn_Ia32FixedCtrCtrl, 399 kCpumMsrWrFn_Ia32PerfGlobalStatus, 400 kCpumMsrWrFn_Ia32PerfGlobalCtrl, 401 kCpumMsrWrFn_Ia32PerfGlobalOvfCtrl, 402 kCpumMsrWrFn_Ia32PebsEnable, 403 kCpumMsrWrFn_Ia32ClockModulation, 404 kCpumMsrWrFn_Ia32ThermInterrupt, 405 kCpumMsrWrFn_Ia32ThermStatus, 406 kCpumMsrWrFn_Ia32Therm2Ctl, 407 kCpumMsrWrFn_Ia32MiscEnable, 408 kCpumMsrWrFn_Ia32McCtlStatusAddrMiscN, /**< Takes bank number. */ 409 kCpumMsrWrFn_Ia32McNCtl2, /**< Takes register number of start of range. */ 410 kCpumMsrWrFn_Ia32DsArea, 411 kCpumMsrWrFn_Ia32TscDeadline, 412 kCpumMsrWrFn_Ia32X2ApicN, 413 414 kCpumMsrWrFn_Amd64Efer, 415 kCpumMsrWrFn_Amd64SyscallTarget, 416 kCpumMsrWrFn_Amd64LongSyscallTarget, 417 kCpumMsrWrFn_Amd64CompSyscallTarget, 418 kCpumMsrWrFn_Amd64SyscallFlagMask, 419 kCpumMsrWrFn_Amd64FsBase, 420 kCpumMsrWrFn_Amd64GsBase, 421 kCpumMsrWrFn_Amd64KernelGsBase, 422 kCpumMsrWrFn_Amd64TscAux, 423 kCpumMsrWrFn_IntelEblCrPowerOn, 424 kCpumMsrWrFn_IntelPkgCStConfigControl, 425 kCpumMsrWrFn_IntelPmgIoCaptureBase, 426 kCpumMsrWrFn_IntelLastBranchFromToN, 427 kCpumMsrWrFn_IntelLastBranchFromN, 428 kCpumMsrWrFn_IntelLastBranchToN, 429 kCpumMsrWrFn_IntelLastBranchTos, 430 kCpumMsrWrFn_IntelBblCrCtl, 431 kCpumMsrWrFn_IntelBblCrCtl3, 432 kCpumMsrWrFn_IntelI7TemperatureTarget, 433 kCpumMsrWrFn_IntelI7MsrOffCoreResponseN, /**< Takes register number. */ 434 kCpumMsrWrFn_IntelI7MiscPwrMgmt, 435 kCpumMsrWrFn_IntelP6CrN, 436 kCpumMsrWrFn_IntelCpuId1FeatureMaskEcdx, 437 kCpumMsrWrFn_IntelCpuId1FeatureMaskEax, 438 kCpumMsrWrFn_IntelCpuId80000001FeatureMaskEcdx, 439 kCpumMsrWrFn_IntelI7SandyAesNiCtl, 440 kCpumMsrWrFn_IntelI7TurboRatioLimit, 441 kCpumMsrWrFn_IntelI7LbrSelect, 442 kCpumMsrWrFn_IntelI7SandyErrorControl, 443 kCpumMsrWrFn_IntelI7PowerCtl, 444 kCpumMsrWrFn_IntelI7SandyPebsNumAlt, 445 kCpumMsrWrFn_IntelI7PebsLdLat, 446 kCpumMsrWrFn_IntelI7SandyVrCurrentConfig, 447 kCpumMsrWrFn_IntelI7SandyVrMiscConfig, 448 kCpumMsrWrFn_IntelI7SandyPkgCnIrtlN, 449 kCpumMsrWrFn_IntelI7RaplPkgPowerLimit, 450 kCpumMsrWrFn_IntelI7RaplDramPowerLimit, 451 kCpumMsrWrFn_IntelI7RaplPp0PowerLimit, 452 kCpumMsrWrFn_IntelI7RaplPp0Policy, 453 kCpumMsrWrFn_IntelI7RaplPp1PowerLimit, 454 kCpumMsrWrFn_IntelI7RaplPp1Policy, 455 456 kCpumMsrWrFn_P6LastIntFromIp, 457 kCpumMsrWrFn_P6LastIntToIp, 458 459 kCpumMsrWrFn_AmdFam15hTscRate, 460 kCpumMsrWrFn_AmdFam15hLwpCfg, 461 kCpumMsrWrFn_AmdFam15hLwpCbAddr, 462 kCpumMsrWrFn_AmdFam10hMc4MiscN, 463 kCpumMsrWrFn_AmdK8PerfCtlN, 464 kCpumMsrWrFn_AmdK8PerfCtrN, 465 kCpumMsrWrFn_AmdK8SysCfg, 466 kCpumMsrWrFn_AmdK8HwCr, 467 kCpumMsrWrFn_AmdK8IorrBaseN, 468 kCpumMsrWrFn_AmdK8IorrMaskN, 469 kCpumMsrWrFn_AmdK8TopOfMemN, 470 kCpumMsrWrFn_AmdK8NbCfg1, 471 kCpumMsrWrFn_AmdK8McXcptRedir, 472 kCpumMsrWrFn_AmdK8CpuNameN, 473 kCpumMsrWrFn_AmdK8HwThermalCtrl, 474 kCpumMsrWrFn_AmdK8SwThermalCtrl, 475 kCpumMsrWrFn_AmdK8McCtlMaskN, 476 kCpumMsrWrFn_AmdK8SmiOnIoTrapN, 477 kCpumMsrWrFn_AmdK8SmiOnIoTrapCtlSts, 478 kCpumMsrWrFn_AmdK8IntPendingMessage, 479 kCpumMsrWrFn_AmdK8SmiTriggerIoCycle, 480 kCpumMsrWrFn_AmdFam10hMmioCfgBaseAddr, 481 kCpumMsrWrFn_AmdFam10hTrapCtlMaybe, 482 kCpumMsrWrFn_AmdFam10hPStateControl, 483 kCpumMsrWrFn_AmdFam10hPStateStatus, 484 kCpumMsrWrFn_AmdFam10hPStateN, 485 kCpumMsrWrFn_AmdFam10hCofVidControl, 486 kCpumMsrWrFn_AmdFam10hCofVidStatus, 487 kCpumMsrWrFn_AmdFam10hCStateIoBaseAddr, 488 kCpumMsrWrFn_AmdFam10hCpuWatchdogTimer, 489 kCpumMsrWrFn_AmdK8SmmBase, 490 kCpumMsrWrFn_AmdK8SmmAddr, 491 kCpumMsrWrFn_AmdK8SmmMask, 492 kCpumMsrWrFn_AmdK8VmCr, 493 kCpumMsrWrFn_AmdK8IgnNe, 494 kCpumMsrWrFn_AmdK8SmmCtl, 495 kCpumMsrWrFn_AmdK8VmHSavePa, 496 kCpumMsrWrFn_AmdFam10hVmLockKey, 497 kCpumMsrWrFn_AmdFam10hSmmLockKey, 498 kCpumMsrWrFn_AmdFam10hLocalSmiStatus, 499 kCpumMsrWrFn_AmdFam10hOsVisWrkIdLength, 500 kCpumMsrWrFn_AmdFam10hOsVisWrkStatus, 501 kCpumMsrWrFn_AmdFam16hL2IPerfCtlN, 502 kCpumMsrWrFn_AmdFam16hL2IPerfCtrN, 503 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtlN, 504 kCpumMsrWrFn_AmdFam15hNorthbridgePerfCtrN, 505 kCpumMsrWrFn_AmdK7MicrocodeCtl, 506 kCpumMsrWrFn_AmdK7ClusterIdMaybe, 507 kCpumMsrWrFn_AmdK8CpuIdCtlStd07hEbax, 508 kCpumMsrWrFn_AmdK8CpuIdCtlStd06hEcx, 509 kCpumMsrWrFn_AmdK8CpuIdCtlStd01hEdcx, 510 kCpumMsrWrFn_AmdK8CpuIdCtlExt01hEdcx, 511 kCpumMsrWrFn_AmdK7DebugStatusMaybe, 512 kCpumMsrWrFn_AmdK7BHTraceBaseMaybe, 513 kCpumMsrWrFn_AmdK7BHTracePtrMaybe, 514 kCpumMsrWrFn_AmdK7BHTraceLimitMaybe, 515 kCpumMsrWrFn_AmdK7HardwareDebugToolCfgMaybe, 516 kCpumMsrWrFn_AmdK7FastFlushCountMaybe, 517 kCpumMsrWrFn_AmdK7NodeId, 518 kCpumMsrWrFn_AmdK7DrXAddrMaskN, /**< Takes register index. */ 519 kCpumMsrWrFn_AmdK7Dr0DataMatchMaybe, 520 kCpumMsrWrFn_AmdK7Dr0DataMaskMaybe, 521 kCpumMsrWrFn_AmdK7LoadStoreCfg, 522 kCpumMsrWrFn_AmdK7InstrCacheCfg, 523 kCpumMsrWrFn_AmdK7DataCacheCfg, 524 kCpumMsrWrFn_AmdK7BusUnitCfg, 525 kCpumMsrWrFn_AmdK7DebugCtl2Maybe, 526 kCpumMsrWrFn_AmdFam15hFpuCfg, 527 kCpumMsrWrFn_AmdFam15hDecoderCfg, 528 kCpumMsrWrFn_AmdFam10hBusUnitCfg2, 529 kCpumMsrWrFn_AmdFam15hCombUnitCfg, 530 kCpumMsrWrFn_AmdFam15hCombUnitCfg2, 531 kCpumMsrWrFn_AmdFam15hCombUnitCfg3, 532 kCpumMsrWrFn_AmdFam15hExecUnitCfg, 533 kCpumMsrWrFn_AmdFam15hLoadStoreCfg2, 534 kCpumMsrWrFn_AmdFam10hIbsFetchCtl, 535 kCpumMsrWrFn_AmdFam10hIbsFetchLinAddr, 536 kCpumMsrWrFn_AmdFam10hIbsFetchPhysAddr, 537 kCpumMsrWrFn_AmdFam10hIbsOpExecCtl, 538 kCpumMsrWrFn_AmdFam10hIbsOpRip, 539 kCpumMsrWrFn_AmdFam10hIbsOpData, 540 kCpumMsrWrFn_AmdFam10hIbsOpData2, 541 kCpumMsrWrFn_AmdFam10hIbsOpData3, 542 kCpumMsrWrFn_AmdFam10hIbsDcLinAddr, 543 kCpumMsrWrFn_AmdFam10hIbsDcPhysAddr, 544 kCpumMsrWrFn_AmdFam10hIbsCtl, 545 kCpumMsrWrFn_AmdFam14hIbsBrTarget, 546 547 /** End of valid MSR write function indexes. */ 548 kCpumMsrWrFn_End 549 } CPUMMSRWRFN; 550 551 /** 552 * MSR range. 553 */ 554 typedef struct CPUMMSRRANGE 555 { 556 /** The first MSR. [0] */ 557 uint32_t uFirst; 558 /** The last MSR. [4] */ 559 uint32_t uLast; 560 /** The read function (CPUMMSRRDFN). [8] */ 561 uint16_t enmRdFn; 562 /** The write function (CPUMMSRWRFN). [10] */ 563 uint16_t enmWrFn; 564 /** The offset of the 64-bit MSR value relative to the start of CPUMCPU. 565 * UINT16_MAX if not used by the read and write functions. [12] */ 566 uint16_t offCpumCpu; 567 /** Reserved for future hacks. [14] */ 568 uint16_t fReserved; 569 /** The init/read value. [16] 570 * When enmRdFn is kCpumMsrRdFn_INIT_VALUE, this is the value returned on RDMSR. 571 * offCpumCpu must be UINT16_MAX in that case, otherwise it must be a valid 572 * offset into CPUM. */ 573 uint64_t uInitOrReadValue; 574 /** The bits to ignore when writing. [24] */ 575 uint64_t fWrIgnMask; 576 /** The bits that will cause a GP(0) when writing. [32] 577 * This is always checked prior to calling the write function. Using 578 * UINT64_MAX effectively marks the MSR as read-only. */ 579 uint64_t fWrGpMask; 580 /** The register name, if applicable. [40] */ 581 char szName[56]; 582 583 #ifdef VBOX_WITH_STATISTICS 584 /** The number of reads. */ 585 STAMCOUNTER cReads; 586 /** The number of writes. */ 587 STAMCOUNTER cWrites; 588 /** The number of times ignored bits were written. */ 589 STAMCOUNTER cIgnoredBits; 590 /** The number of GPs generated. */ 591 STAMCOUNTER cGps; 592 #endif 593 } CPUMMSRRANGE; 594 #ifdef VBOX_WITH_STATISTICS 595 AssertCompileSize(CPUMMSRRANGE, 128); 596 #else 597 AssertCompileSize(CPUMMSRRANGE, 96); 598 #endif 599 /** Pointer to an MSR range. */ 600 typedef CPUMMSRRANGE *PCPUMMSRRANGE; 601 /** Pointer to a const MSR range. */ 602 typedef CPUMMSRRANGE const *PCCPUMMSRRANGE; 603 604 605 606 607 /** 608 * CPU features and quirks. 609 * This is mostly exploded CPUID info. 610 */ 611 typedef struct CPUMFEATURES 612 { 613 /** The CPU vendor (CPUMCPUVENDOR). */ 614 uint8_t enmCpuVendor; 615 /** The CPU family. */ 616 uint8_t uFamily; 617 /** The CPU model. */ 618 uint8_t uModel; 619 /** The CPU stepping. */ 620 uint8_t uStepping; 621 /** The microarchitecture. */ 622 CPUMMICROARCH enmMicroarch; 623 /** The maximum physical address with of the CPU. */ 624 uint8_t cMaxPhysAddrWidth; 625 /** Alignment padding. */ 626 uint8_t abPadding[3]; 627 628 /** Supports MSRs. */ 629 uint32_t fMsr : 1; 630 /** Supports the page size extension (4/2 MB pages). */ 631 uint32_t fPse : 1; 632 /** Supports 36-bit page size extension (4 MB pages can map memory above 633 * 4GB). */ 634 uint32_t fPse36 : 1; 635 /** Supports physical address extension (PAE). */ 636 uint32_t fPae : 1; 637 /** Page attribute table (PAT) support (page level cache control). */ 638 uint32_t fPat : 1; 639 /** Supports the FXSAVE and FXRSTOR instructions. */ 640 uint32_t fFxSaveRstor : 1; 641 /** Intel SYSENTER/SYSEXIT support */ 642 uint32_t fSysEnter : 1; 643 /** First generation APIC. */ 644 uint32_t fApic : 1; 645 /** Second generation APIC. */ 646 uint32_t fX2Apic : 1; 647 /** Hypervisor present. */ 648 uint32_t fHypervisorPresent : 1; 649 /** MWAIT & MONITOR instructions supported. */ 650 uint32_t fMonitorMWait : 1; 651 652 /** AMD64: Supports long mode. */ 653 uint32_t fLongMode : 1; 654 /** AMD64: SYSCALL/SYSRET support. */ 655 uint32_t fSysCall : 1; 656 /** AMD64: No-execute page table bit. */ 657 uint32_t fNoExecute : 1; 658 /** AMD64: Supports LAHF & SAHF instructions in 64-bit mode. */ 659 uint32_t fLahfSahf : 1; 660 /** AMD64: Supports RDTSCP. */ 661 uint32_t fRdTscP : 1; 662 663 /** Indicates that FPU instruction and data pointers may leak. 664 * This generally applies to recent AMD CPUs, where the FPU IP and DP pointer 665 * is only saved and restored if an exception is pending. */ 666 uint32_t fLeakyFxSR : 1; 667 668 /** Alignment padding. */ 669 uint32_t fPadding : 9; 670 671 uint64_t auPadding[2]; 672 } CPUMFEATURES; 673 AssertCompileSize(CPUMFEATURES, 32); 674 /** Pointer to a CPU feature structure. */ 675 typedef CPUMFEATURES *PCPUMFEATURES; 676 /** Pointer to a const CPU feature structure. */ 677 typedef CPUMFEATURES const *PCCPUMFEATURES; 678 679 680 /** 681 * CPU info 682 */ 683 typedef struct CPUMINFO 684 { 685 /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */ 686 uint32_t cMsrRanges; 687 /** Mask applied to ECX before looking up the MSR for a RDMSR/WRMSR 688 * instruction. Older hardware has been observed to ignore higher bits. */ 689 uint32_t fMsrMask; 690 691 /** The number of CPUID leaves (CPUMCPUIDLEAF) in the array pointed to below. */ 692 uint32_t cCpuIdLeaves; 693 /** The index of the first extended CPUID leaf in the array. 694 * Set to cCpuIdLeaves if none present. */ 695 uint32_t iFirstExtCpuIdLeaf; 696 /** How to handle unknown CPUID leaves. */ 697 CPUMUKNOWNCPUID enmUnknownCpuIdMethod; 698 /** For use with CPUMUKNOWNCPUID_DEFAULTS. */ 699 CPUMCPUID DefCpuId; 700 701 /** Alignment padding. */ 702 uint32_t uPadding; 703 704 /** Pointer to the MSR ranges (ring-0 pointer). */ 705 R0PTRTYPE(PCPUMMSRRANGE) paMsrRangesR0; 706 /** Pointer to the CPUID leaves (ring-0 pointer). */ 707 R0PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR0; 708 709 /** Pointer to the MSR ranges (ring-3 pointer). */ 710 R3PTRTYPE(PCPUMMSRRANGE) paMsrRangesR3; 711 /** Pointer to the CPUID leaves (ring-3 pointer). */ 712 R3PTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesR3; 713 714 /** Pointer to the MSR ranges (raw-mode context pointer). */ 715 RCPTRTYPE(PCPUMMSRRANGE) paMsrRangesRC; 716 /** Pointer to the CPUID leaves (raw-mode context pointer). */ 717 RCPTRTYPE(PCPUMCPUIDLEAF) paCpuIdLeavesRC; 718 } CPUMINFO; 719 /** Pointer to a CPU info structure. */ 720 typedef CPUMINFO *PCPUMINFO; 721 /** Pointer to a const CPU info structure. */ 722 typedef CPUMINFO const *CPCPUMINFO; 110 723 111 724 … … 310 923 } CPUFeaturesExt; 311 924 312 /** Host CPU manufacturer. */313 CPUMCPUVENDOR enmHostCpuVendor;314 /** Guest CPU manufacturer. */315 CPUMCPUVENDOR enmGuestCpuVendor;316 317 925 /** CR4 mask */ 318 926 struct … … 322 930 } CR4; 323 931 324 /** Synthetic CPU type? */325 bool fSyntheticCpu;326 932 /** The (more) portable CPUID level. */ 327 933 uint8_t u8PortableCpuIdLevel; … … 329 935 * This is used to verify load order dependencies (PGM). */ 330 936 bool fPendingRestore; 331 uint8_t abPadding[HC_ARCH_BITS == 64 ? 5 : 1];937 uint8_t abPadding[HC_ARCH_BITS == 64 ? 6 : 2]; 332 938 333 939 /** The standard set of CpuId leaves. */ … … 345 951 uint8_t abPadding2[4]; 346 952 #endif 953 954 /** Guest CPU info. */ 955 CPUMINFO GuestInfo; 956 /** Guest CPU feature information. */ 957 CPUMFEATURES GuestFeatures; 958 /** Host CPU feature information. */ 959 CPUMFEATURES HostFeatures; 960 961 /** @name MSR statistics. 962 * @{ */ 963 STAMCOUNTER cMsrWrites; 964 STAMCOUNTER cMsrWritesToIgnoredBits; 965 STAMCOUNTER cMsrWritesRaiseGp; 966 STAMCOUNTER cMsrWritesUnknown; 967 STAMCOUNTER cMsrReads; 968 STAMCOUNTER cMsrReadsRaiseGp; 969 STAMCOUNTER cMsrReadsUnknown; 970 /** @} */ 347 971 } CPUM; 348 972 /** Pointer to the CPUM instance data residing in the shared VM structure. */ … … 430 1054 RT_C_DECLS_BEGIN 431 1055 1056 PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf); 1057 432 1058 #ifdef IN_RING3 433 1059 int cpumR3DbgInit(PVM pVM); 1060 PCPUMCPUIDLEAF cpumR3CpuIdGetLeaf(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf); 1061 bool cpumR3CpuIdGetLeafLegacy(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf, 1062 PCPUMCPUID pLeagcy); 1063 int cpumR3CpuIdInsert(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves, PCPUMCPUIDLEAF pNewLeaf); 1064 void cpumR3CpuIdRemoveRange(PCPUMCPUIDLEAF paLeaves, uint32_t *pcLeaves, uint32_t uFirst, uint32_t uLast); 1065 int cpumR3CpuIdExplodeFeatures(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCPUMFEATURES pFeatures); 1066 int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo); 1067 int cpumR3MsrRangesInsert(PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange); 1068 int cpumR3MsrStrictInitChecks(void); 1069 int cpumR3MsrRegStats(PVM pVM); 1070 PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr); 434 1071 #endif 435 1072 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r49019 r49893 67 67 .CPUFeaturesExt.ecx resd 1 68 68 69 .enmHostCpuVendor resd 170 .enmGuestCpuVendor resd 171 72 69 ; CR4 masks 73 70 .CR4.AndMask resd 1 74 71 .CR4.OrMask resd 1 75 72 ; entered rawmode? 76 .fSyntheticCpu resb 177 73 .u8PortableCpuIdLevel resb 1 78 74 .fPendingRestore resb 1 79 75 %if RTHCPTR_CB == 8 80 .abPadding resb 576 .abPadding resb 6 81 77 %else 82 .abPadding resb 178 .abPadding resb 2 83 79 %endif 84 80 … … 93 89 .abPadding2 resb 4 94 90 %endif 91 92 .GuestInfo resb RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*10 93 .GuestFeatures resb 32 94 .HostFeatures resb 32 95 96 .cMsrWrites resq 1 97 .cMsrWritesToIgnoredBits resq 1 98 .cMsrWritesRaiseGp resq 1 99 .cMsrWritesUnknown resq 1 100 .cMsrReads resq 1 101 .cMsrReadsRaiseGp resq 1 102 .cMsrReadsUnknown resq 1 95 103 endstruc 96 104 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r47844 r49893 32 32 GEN_CHECK_OFF(CPUM, CPUFeaturesExt); 33 33 GEN_CHECK_OFF(CPUM, CPUFeaturesExt); 34 GEN_CHECK_OFF(CPUM, enmHostCpuVendor);35 GEN_CHECK_OFF(CPUM, enmGuestCpuVendor);36 34 GEN_CHECK_OFF(CPUM, CR4); 37 35 #ifndef VBOX_FOR_DTRACE_LIB 38 GEN_CHECK_OFF(CPUM, fSyntheticCpu);39 36 GEN_CHECK_OFF(CPUM, u8PortableCpuIdLevel); 40 37 GEN_CHECK_OFF(CPUM, fPendingRestore); -
trunk/src/VBox/VMM/tools/Makefile.kmk
r49282 r49893 51 51 52 52 53 # 54 # CPU report program (CPUM DB). 55 # 56 PROGRAMS += VBoxCpuReport 57 VBoxCpuReport_TEMPLATE := VBoxR3Static 58 VBoxCpuReport_DEFS = IN_VMM_R3 59 VBoxCpuReport_INCS = ../include 60 VBoxCpuReport_SOURCES = \ 61 VBoxCpuReport.cpp \ 62 ../VMMR3/CPUMR3CpuId.cpp 63 VBoxCpuReport_LIBS = \ 64 $(PATH_STAGE_LIB)/SUPR3Static$(VBOX_SUFF_LIB) \ 65 $(VBOX_LIB_RUNTIME_STATIC) 66 VBoxCpuReport_LDFLAGS.darwin = \ 67 -framework IOKit -framework CoreFoundation -framework CoreServices 68 69 53 70 include $(FILE_KBUILD_SUB_FOOTER) 54 71
Note:
See TracChangeset
for help on using the changeset viewer.