Changeset 9354 in vbox for trunk/src/VBox
- Timestamp:
- Jun 3, 2008 1:45:14 PM (17 years ago)
- svn:sync-xref-src-repo-rev:
- 31571
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUM.cpp
r9216 r9354 164 164 return rc; 165 165 166 /* Query the CPU manufacturer. */ 167 uint32_t uEAX, uEBX, uECX, uEDX; 168 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 169 if ( uEAX >= 1 170 && uEBX == X86_CPUID_VENDOR_AMD_EBX 171 && uECX == X86_CPUID_VENDOR_AMD_ECX 172 && uEDX == X86_CPUID_VENDOR_AMD_EDX) 173 { 174 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_AMD; 175 } 176 else if ( uEAX >= 1 177 && uEBX == X86_CPUID_VENDOR_INTEL_EBX 178 && uECX == X86_CPUID_VENDOR_INTEL_ECX 179 && uEDX == X86_CPUID_VENDOR_INTEL_EDX) 180 { 181 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_INTEL; 182 } 183 else /* @todo Via */ 184 pVM->cpum.s.enmCPUVendor = CPUMCPUVENDOR_UNKNOWN; 185 166 186 /* 167 187 * Register info handlers. … … 171 191 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper); 172 192 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost); 173 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", 193 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo); 174 194 175 195 /* -
trunk/src/VBox/VMM/CPUMInternal.h
r9212 r9354 321 321 } CPUFeatures; 322 322 323 /* CPU manufacturer. */ 324 CPUMCPUVENDOR enmCPUVendor; 325 323 326 /** CR4 mask */ 324 327 struct -
trunk/src/VBox/VMM/CPUMInternal.mac
r9212 r9354 415 415 .CPUFeatures.edx resd 1 416 416 .CPUFeatures.ecx resd 1 417 418 .enmCPUVendor resd 1 419 417 420 ; CR4 masks 418 421 .CR4.AndMask resd 1 -
trunk/src/VBox/VMM/HWACCM.cpp
r9116 r9354 432 432 433 433 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP); 434 #ifdef VBOX_WITH_64_BITS_GUESTS 435 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE); 436 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */ 437 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF); 438 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE); 439 #endif 434 440 LogRel(("HWACCM: VMX enabled!\n")); 435 441 } … … 513 519 hwaccmr3DisableRawMode(pVM); 514 520 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP); 521 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); 522 #ifdef VBOX_WITH_64_BITS_GUESTS 523 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE); 524 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NXE); 525 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LAHF); 526 #endif 515 527 } 516 528 else -
trunk/src/VBox/VMM/TM.cpp
r9148 r9354 134 134 * Internal Functions * 135 135 *******************************************************************************/ 136 static bool tmR3HasFixedTSC( void);137 static uint64_t tmR3CalibrateTSC( void);136 static bool tmR3HasFixedTSC(PVM pVM); 137 static uint64_t tmR3CalibrateTSC(PVM pVM); 138 138 static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM); 139 139 static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version); … … 294 294 { 295 295 if (!pVM->tm.s.fTSCUseRealTSC) 296 pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC( );296 pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC(pVM); 297 297 else 298 298 pVM->tm.s.fMaybeUseOffsettedHostTSC = true; … … 303 303 if (rc == VERR_CFGM_VALUE_NOT_FOUND) 304 304 { 305 pVM->tm.s.cTSCTicksPerSecond = tmR3CalibrateTSC( );305 pVM->tm.s.cTSCTicksPerSecond = tmR3CalibrateTSC(pVM); 306 306 if ( !pVM->tm.s.fTSCUseRealTSC 307 307 && pVM->tm.s.cTSCTicksPerSecond >= _4G) … … 546 546 * This isn't currently relevant. 547 547 */ 548 static bool tmR3HasFixedTSC( void)548 static bool tmR3HasFixedTSC(PVM pVM) 549 549 { 550 550 if (ASMHasCpuId()) 551 551 { 552 552 uint32_t uEAX, uEBX, uECX, uEDX; 553 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 554 if ( uEAX >= 1 555 && uEBX == X86_CPUID_VENDOR_AMD_EBX 556 && uECX == X86_CPUID_VENDOR_AMD_ECX 557 && uEDX == X86_CPUID_VENDOR_AMD_EDX) 553 554 if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_AMD) 558 555 { 559 556 /* … … 575 572 } 576 573 } 577 else if ( uEAX >= 1 578 && uEBX == X86_CPUID_VENDOR_INTEL_EBX 579 && uECX == X86_CPUID_VENDOR_INTEL_ECX 580 && uEDX == X86_CPUID_VENDOR_INTEL_EDX) 574 else if (CPUMGetCPUVendor(pVM) == CPUMCPUVENDOR_INTEL) 581 575 { 582 576 /* … … 607 601 * @returns Number of ticks per second. 608 602 */ 609 static uint64_t tmR3CalibrateTSC( void)603 static uint64_t tmR3CalibrateTSC(PVM pVM) 610 604 { 611 605 /* … … 622 616 else 623 617 { 624 if (tmR3HasFixedTSC( ))618 if (tmR3HasFixedTSC(pVM)) 625 619 /* Sleep a bit to get a more reliable CpuHz value. */ 626 620 RTThreadSleep(32); -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r9212 r9354 1001 1001 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_APIC; 1002 1002 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 1003 && pVM->cpum.s. aGuestCpuIdExt[1].edx)1003 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD) 1004 1004 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_APIC; 1005 Log (("CPUMSetGuestCpuIdFeature: Enabled APIC\n"));1005 LogRel(("CPUMSetGuestCpuIdFeature: Enabled APIC\n")); 1006 1006 break; 1007 1007 1008 1008 /* 1009 * Set the sysenter/sysexit bit in both feature masks.1009 * Set the sysenter/sysexit bit in the standard feature mask. 1010 1010 * Assumes the caller knows what it's doing! (host must support these) 1011 1011 */ … … 1020 1020 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 1021 1021 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_SEP; 1022 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 1023 && pVM->cpum.s.aGuestCpuIdExt[1].edx) 1024 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP; 1025 Log(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n")); 1022 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n")); 1023 break; 1024 } 1025 1026 /* 1027 * Set the syscall/sysret bit in the extended feature mask. 1028 * Assumes the caller knows what it's doing! (host must support these) 1029 */ 1030 case CPUMCPUIDFEATURE_SYSCALL: 1031 { 1032 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1033 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP)) 1034 { 1035 LogRel(("WARNING: Can't turn on SYSCALL/SYSRET when the host doesn't support it!!\n")); 1036 return; 1037 } 1038 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */ 1039 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_SEP; 1040 LogRel(("CPUMSetGuestCpuIdFeature: Enabled sysenter/exit\n")); 1026 1041 break; 1027 1042 } … … 1042 1057 pVM->cpum.s.aGuestCpuIdStd[1].edx |= X86_CPUID_FEATURE_EDX_PAE; 1043 1058 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 1044 && pVM->cpum.s. aGuestCpuIdExt[1].edx)1059 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD) 1045 1060 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_PAE; 1046 Log (("CPUMSetGuestCpuIdFeature: Enabled PAE\n"));1061 LogRel(("CPUMSetGuestCpuIdFeature: Enabled PAE\n")); 1047 1062 break; 1048 1063 } … … 1057 1072 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)) 1058 1073 { 1059 AssertMsgFailed(("ERROR: Can't turn on LONG MODE when the host doesn't support it!!\n"));1074 LogRel(("WARNING: Can't turn on LONG MODE when the host doesn't support it!!\n")); 1060 1075 return; 1061 1076 } 1062 1077 1063 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 1064 && pVM->cpum.s.aGuestCpuIdExt[1].edx) 1065 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE; 1066 Log(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n")); 1078 /* Valid for both Intel and AMD. */ 1079 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_LONG_MODE; 1080 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LONG MODE\n")); 1081 break; 1082 } 1083 1084 /* 1085 * Set the NXE bit in the extended feature mask. 1086 * Assumes the caller knows what it's doing! (host must support these) 1087 */ 1088 case CPUMCPUIDFEATURE_NXE: 1089 { 1090 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1091 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_NX)) 1092 { 1093 LogRel(("WARNING: Can't turn on NXE when the host doesn't support it!!\n")); 1094 return; 1095 } 1096 1097 /* Valid for both Intel and AMD. */ 1098 pVM->cpum.s.aGuestCpuIdExt[1].edx |= X86_CPUID_AMD_FEATURE_EDX_NX; 1099 LogRel(("CPUMSetGuestCpuIdFeature: Enabled NXE\n")); 1100 break; 1101 } 1102 1103 case CPUMCPUIDFEATURE_LAHF: 1104 { 1105 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax < 0x80000001 1106 || !(ASMCpuId_ECX(0x80000001) & X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF)) 1107 { 1108 LogRel(("WARNING: Can't turn on LAHF/SAHF when the host doesn't support it!!\n")); 1109 return; 1110 } 1111 1112 pVM->cpum.s.aGuestCpuIdExt[1].ecx |= X86_CPUID_AMD_FEATURE_ECX_LAHF_SAHF; 1113 LogRel(("CPUMSetGuestCpuIdFeature: Enabled LAHF/SAHF\n")); 1067 1114 break; 1068 1115 } … … 1116 1163 if (pVM->cpum.s.aGuestCpuIdStd[0].eax >= 1) 1117 1164 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_APIC; 1118 if (pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001) 1165 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 1166 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD) 1119 1167 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC; 1120 1168 Log(("CPUMSetGuestCpuIdFeature: Disabled APIC\n")); … … 1126 1174 pVM->cpum.s.aGuestCpuIdStd[1].edx &= ~X86_CPUID_FEATURE_EDX_PAE; 1127 1175 if ( pVM->cpum.s.aGuestCpuIdExt[0].eax >= 0x80000001 1128 && pVM->cpum.s. aGuestCpuIdExt[1].edx)1176 && pVM->cpum.s.enmCPUVendor == CPUMCPUVENDOR_AMD) 1129 1177 pVM->cpum.s.aGuestCpuIdExt[1].edx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE; 1130 1178 LogRel(("CPUMClearGuestCpuIdFeature: Disabled PAE!\n")); … … 1139 1187 } 1140 1188 1189 /** 1190 * Gets the CPU vendor 1191 * 1192 * @returns CPU vendor 1193 * @param pVM The VM handle. 1194 */ 1195 CPUMDECL(CPUMCPUVENDOR) CPUMGetCPUVendor(PVM pVM) 1196 { 1197 return pVM->cpum.s.enmCPUVendor; 1198 } 1141 1199 1142 1200
Note:
See TracChangeset
for help on using the changeset viewer.