Changeset 81605 in vbox
- Timestamp:
- Oct 31, 2019 2:29:46 PM (5 years ago)
- Location:
- trunk
- Files:
-
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r81369 r81605 98 98 CPUMCPUVENDOR_CYRIX, 99 99 CPUMCPUVENDOR_SHANGHAI, 100 CPUMCPUVENDOR_HYGON, 100 101 CPUMCPUVENDOR_UNKNOWN, 101 102 /** 32bit hackishness. */ … … 275 276 kCpumMicroarch_Shanghai_Unknown, 276 277 kCpumMicroarch_Shanghai_End, 278 279 kCpumMicroarch_Hygon_First, 280 kCpumMicroarch_Hygon_Dhyana = kCpumMicroarch_Hygon_First, 281 kCpumMicroarch_Hygon_Unknown, 282 kCpumMicroarch_Hygon_End, 277 283 278 284 kCpumMicroarch_Unknown, -
trunk/include/iprt/asm-amd64-x86.h
r81076 r81605 1587 1587 1588 1588 /** 1589 * Tests if it a genuine Hygon CPU based on the ASMCpuId(0) output. 1590 * 1591 * @returns true/false. 1592 * @param uEBX EBX return from ASMCpuId(0) 1593 * @param uECX ECX return from ASMCpuId(0) 1594 * @param uEDX EDX return from ASMCpuId(0) 1595 */ 1596 DECLINLINE(bool) ASMIsHygonCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX) 1597 { 1598 return uEBX == UINT32_C(0x6f677948) 1599 && uECX == UINT32_C(0x656e6975) 1600 && uEDX == UINT32_C(0x6e65476e); 1601 } 1602 1603 1604 /** 1605 * Tests if this is a genuine Hygon CPU. 1606 * 1607 * @returns true/false. 1608 * @remarks ASSUMES that cpuid is supported by the CPU. 1609 */ 1610 DECLINLINE(bool) ASMIsHygonCpu(void) 1611 { 1612 uint32_t uEAX, uEBX, uECX, uEDX; 1613 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 1614 return ASMIsHygonCpuEx(uEBX, uECX, uEDX); 1615 } 1616 1617 1618 /** 1589 1619 * Checks whether ASMCpuId_EAX(0x00000000) indicates a valid range. 1590 1620 * -
trunk/include/iprt/x86.h
r81602 r81605 401 401 #define X86_CPUID_VENDOR_SHANGHAI_ECX 0x20206961 /* ai */ 402 402 #define X86_CPUID_VENDOR_SHANGHAI_EDX 0x68676e61 /* angh */ 403 404 #define X86_CPUID_VENDOR_HYGON_EBX 0x6f677948 /* Hygo */ 405 #define X86_CPUID_VENDOR_HYGON_ECX 0x656e6975 /* uine */ 406 #define X86_CPUID_VENDOR_HYGON_EDX 0x6e65476e /* nGen */ 403 407 /** @} */ 404 408 -
trunk/include/iprt/x86.mac
r76886 r81605 91 91 %define X86_CPUID_VENDOR_SHANGHAI_ECX 0x20206961 92 92 %define X86_CPUID_VENDOR_SHANGHAI_EDX 0x68676e61 93 %define X86_CPUID_VENDOR_HYGON_EBX 0x6f677948 94 %define X86_CPUID_VENDOR_HYGON_ECX 0x656e6975 95 %define X86_CPUID_VENDOR_HYGON_EDX 0x6e65476e 93 96 %define X86_CPUID_FEATURE_ECX_SSE3 RT_BIT_32(0) 94 97 %define X86_CPUID_FEATURE_ECX_PCLMUL RT_BIT_32(1) -
trunk/src/VBox/HostDrivers/Support/SUPDrv.cpp
r81304 r81605 4150 4150 4151 4151 /* Check if the vendor is AMD (or compatible). */ 4152 if (ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)) 4152 if ( ASMIsAmdCpuEx(uVendorEbx, uVendorEcx, uVendorEdx) 4153 || ASMIsHygonCpuEx(uVendorEbx, uVendorEcx, uVendorEdx)) 4153 4154 { 4154 4155 uint32_t fExtFeatEcx, uExtMaxId; … … 4512 4513 } 4513 4514 } 4514 else if (ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)) 4515 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX) 4516 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX)) 4515 4517 { 4516 4518 /* Not well documented, but at least all AMD64 CPUs support this. */ -
trunk/src/VBox/HostDrivers/Support/SUPDrvGip.cpp
r81106 r81605 1801 1801 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX); 1802 1802 if ( ASMIsValidStdRange(uEAX) 1803 && ASMIsAmdCpuEx(uEBX, uECX, uEDX))1803 && (ASMIsAmdCpuEx(uEBX, uECX, uEDX) || ASMIsHygonCpuEx(uEBX, uECX, uEDX)) ) 1804 1804 { 1805 1805 /* Check for APM support. */ … … 4021 4021 && ASMIsValidStdRange(ASMCpuId_EAX(0)) 4022 4022 && (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_HTT) 4023 && ( !ASMIsAmdCpu()4023 && ( (!ASMIsAmdCpu() && !ASMIsHygonCpu()) 4024 4024 || ASMGetCpuFamily(u32Tmp = ASMCpuId_EAX(1)) > 0x15 4025 4025 || ( ASMGetCpuFamily(u32Tmp) == 0x15 /* Piledriver+, not bulldozer (FX-4150 didn't like it). */ -
trunk/src/VBox/Main/src-server/HostImpl.cpp
r81305 r81605 347 347 } 348 348 /* AMD-V */ 349 else if (ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX)) 349 else if ( ASMIsAmdCpuEx(uVendorEBX, uVendorECX, uVendorEDX) 350 || ASMIsHygonCpuEx(uVendorEBX, uVendorECX, uVendorEDX)) 350 351 { 351 352 if ( (fExtFeaturesEcx & X86_CPUID_AMD_FEATURE_ECX_SVM) -
trunk/src/VBox/VMM/VMMAll/GIMAll.cpp
r80333 r81605 425 425 { 426 426 case CPUMCPUVENDOR_AMD: 427 case CPUMCPUVENDOR_HYGON: 427 428 { 428 429 if (puDisOpcode) -
trunk/src/VBox/VMM/VMMAll/GIMAllHv.cpp
r80333 r81605 1404 1404 || enmGuestCpuVendor == CPUMCPUVENDOR_SHANGHAI)) 1405 1405 || ( uDisOpcode == OP_VMMCALL 1406 && enmGuestCpuVendor == CPUMCPUVENDOR_AMD)) 1406 && ( enmGuestCpuVendor == CPUMCPUVENDOR_AMD 1407 || enmGuestCpuVendor == CPUMCPUVENDOR_HYGON)) ) 1407 1408 return gimHvHypercall(pVCpu, pCtx); 1408 1409 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r81002 r81605 359 359 * Evaluates to true if we're presenting an AMD CPU to the guest. 360 360 */ 361 #define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )361 #define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON ) 362 362 363 363 /** -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r80333 r81605 723 723 || ASMIsAmdCpuEx(u32EBX, u32ECX, u32EDX) 724 724 || ASMIsViaCentaurCpuEx(u32EBX, u32ECX, u32EDX) 725 || ASMIsShanghaiCpuEx(u32EBX, u32ECX, u32EDX)) 725 || ASMIsShanghaiCpuEx(u32EBX, u32ECX, u32EDX) 726 || ASMIsHygonCpuEx(u32EBX, u32ECX, u32EDX)) 726 727 && ASMIsValidStdRange(uMaxLeaf)) 727 728 { -
trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp
r80346 r81605 1529 1529 # endif 1530 1530 # ifdef LOG_ENABLED 1531 if (enmCpuVendor != CPUMCPUVENDOR_AMD )1531 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCpuVendor != CPUMCPUVENDOR_HYGON) 1532 1532 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl; 1533 1533 # endif … … 2106 2106 # endif 2107 2107 # ifdef LOG_ENABLED 2108 if (enmCpuVendor != CPUMCPUVENDOR_AMD )2108 if (enmCpuVendor != CPUMCPUVENDOR_AMD && enmCPUVendor != CPUMCPUVENDOR_HYGON) 2109 2109 { 2110 2110 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl); -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r81292 r81605 980 980 uint32_t const u32Family = u32CpuVersion >> 8; 981 981 if ( u32Family >= 6 /* K7 and higher */ 982 && ASMIsAmdCpu())982 && (ASMIsAmdCpu() || ASMIsHygonCpu()) ) 983 983 { 984 984 uint32_t cExt = ASMCpuId_EAX(0x80000000); -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r81249 r81605 483 483 } 484 484 485 if (enmVendor == CPUMCPUVENDOR_HYGON) 486 { 487 switch (bFamily) 488 { 489 case 0x18: 490 return kCpumMicroarch_Hygon_Dhyana; 491 default: 492 break; 493 } 494 return kCpumMicroarch_Hygon_Unknown; 495 } 496 485 497 return kCpumMicroarch_Unknown; 486 498 } … … 624 636 CASE_RET_STR(kCpumMicroarch_NEC_V20); 625 637 CASE_RET_STR(kCpumMicroarch_NEC_V30); 638 639 CASE_RET_STR(kCpumMicroarch_Hygon_Dhyana); 640 CASE_RET_STR(kCpumMicroarch_Hygon_Unknown); 626 641 627 642 CASE_RET_STR(kCpumMicroarch_Unknown); … … 646 661 case kCpumMicroarch_NEC_End: 647 662 case kCpumMicroarch_Shanghai_End: 663 case kCpumMicroarch_Hygon_End: 648 664 case kCpumMicroarch_32BitHack: 649 665 break; … … 1314 1330 || uEbx 1315 1331 || uEdx 1316 || ASMIsAmdCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)) ) 1332 || ASMIsAmdCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx) 1333 || ASMIsHygonCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)) ) 1317 1334 fFlags |= CPUMCPUIDLEAF_F_CONTAINS_APIC_ID; 1318 1335 … … 1322 1339 else if ( uLeaf == UINT32_C(0x80000001) 1323 1340 && ( (uEdx & X86_CPUID_AMD_FEATURE_EDX_APIC) 1324 || ASMIsAmdCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)) ) 1341 || ASMIsAmdCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx) 1342 || ASMIsHygonCpuEx((*ppaLeaves)[0].uEbx, (*ppaLeaves)[0].uEcx, (*ppaLeaves)[0].uEdx)) ) 1325 1343 fFlags |= CPUMCPUIDLEAF_F_CONTAINS_APIC; 1326 1344 … … 1633 1651 return CPUMCPUVENDOR_CYRIX; 1634 1652 1653 if (ASMIsHygonCpuEx(uEBX, uECX, uEDX)) 1654 return CPUMCPUVENDOR_HYGON; 1655 1635 1656 /* "Geode by NSC", example: family 5, model 9. */ 1636 1657 … … 1660 1681 case CPUMCPUVENDOR_CYRIX: return "CYRIX"; 1661 1682 case CPUMCPUVENDOR_SHANGHAI: return "SHANGHAI"; 1683 case CPUMCPUVENDOR_HYGON: return "HYGON"; 1662 1684 case CPUMCPUVENDOR_UNKNOWN: return "UNKNOWN"; 1663 1685 … … 1927 1949 1928 1950 if ( pExtLeaf 1929 && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD) 1951 && ( pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD 1952 || pFeatures->enmCpuVendor == CPUMCPUVENDOR_HYGON)) 1930 1953 { 1931 1954 /* AMD features. */ … … 1970 1993 pFeatures->fLeakyFxSR = pExtLeaf 1971 1994 && (pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 1972 && pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD 1995 && ( pFeatures->enmCpuVendor == CPUMCPUVENDOR_AMD 1996 || pFeatures->enmCpuVendor == CPUMCPUVENDOR_HYGON) 1973 1997 && pFeatures->uFamily >= 6 /* K7 and up */; 1974 1998 … … 2939 2963 * VME bug was fixed in AGESA 1.0.0.6, microcode patch level 8001126. 2940 2964 */ 2941 if ( (pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_AMD_Zen_Ryzen) 2965 if ( ( pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_AMD_Zen_Ryzen 2966 || pVM->cpum.s.GuestFeatures.enmMicroarch == kCpumMicroarch_Hygon_Dhyana) 2942 2967 && uMicrocodeRev < 0x8001126 2943 2968 && !pConfig->fForceVme) … … 3066 3091 #ifdef VBOX_WITH_MULTI_CORE 3067 3092 if ( pVM->cCpus > 1 3068 && pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 3093 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 3094 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)) 3069 3095 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_CMPL; /* CmpLegacy */ 3070 3096 #endif … … 3660 3686 { 3661 3687 pCurLeaf->uEax = pCurLeaf->uEbx = pCurLeaf->uEcx = 0; 3662 if (pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 3688 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 3689 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON) 3663 3690 { 3664 3691 /* … … 3715 3742 #ifdef VBOX_WITH_MULTI_CORE 3716 3743 if ( pVM->cCpus > 1 3717 && pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 3744 && ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 3745 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)) 3718 3746 pCurLeaf->uEcx |= (pVM->cCpus - 1) & UINT32_C(0xff); 3719 3747 #endif … … 3732 3760 * EDX - SVM Feature identification. 3733 3761 */ 3734 if (pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 3762 if ( pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 3763 || pCpum->GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON) 3735 3764 { 3736 3765 pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0); … … 3843 3872 { 3844 3873 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_AMD); 3874 Assert(pCpum->GuestFeatures.enmCpuVendor != CPUMCPUVENDOR_HYGON); 3845 3875 pCurLeaf->uEbx = 0; /* Reserved. */ 3846 3876 pCurLeaf->uEcx = 0; /* Reserved. */ … … 4122 4152 bool fQueryNestedHwvirt = false; 4123 4153 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 4124 fQueryNestedHwvirt |= RT_BOOL(pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD); 4154 fQueryNestedHwvirt |= RT_BOOL( pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 4155 || pVM->cpum.s.HostFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON); 4125 4156 #endif 4126 4157 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX … … 4717 4748 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001)); 4718 4749 if ( pLeaf 4719 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 4750 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 4751 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)) 4720 4752 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE; 4721 4753 … … 4801 4833 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001)); 4802 4834 if ( pLeaf 4803 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 4835 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 4836 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)) 4804 4837 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT; 4805 4838 … … 4941 4974 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Speculation Control.\n")); 4942 4975 } 4943 else if (pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 4976 else if ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 4977 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON) 4944 4978 { 4945 4979 /* The precise details of AMD's implementation are not yet clear. */ … … 5040 5074 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001)); 5041 5075 if ( pLeaf 5042 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 5076 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 5077 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)) 5043 5078 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE; 5044 5079 … … 5054 5089 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001)); 5055 5090 if ( pLeaf 5056 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 5091 && ( pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD 5092 || pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_HYGON)) 5057 5093 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT; 5058 5094 … … 5663 5699 { 5664 5700 /** @todo deal with no 0x80000001 on the host. */ 5665 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx); 5666 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx); 5701 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx) 5702 || ASMIsHygonCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx); 5703 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx) 5704 || ASMIsHygonCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx); 5667 5705 5668 5706 /* CPUID(0x80000001).ecx */ -
trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
r80333 r81605 217 217 #include "cpus/ZHAOXIN_KaiXian_KX_U5581_1_8GHz.h" 218 218 219 #include "cpus/Hygon_C86_7185_32_core.h" 219 220 220 221 … … 318 319 #ifdef VBOX_CPUDB_NEC_V20_h 319 320 &g_Entry_NEC_V20, 321 #endif 322 323 #ifdef VBOX_CPUDB_Hygon_C86_7185_32_core_h 324 &g_Entry_Hygon_C86_7185_32_core, 320 325 #endif 321 326 }; -
trunk/src/VBox/VMM/VMMR3/NEMR3.cpp
r80333 r81605 196 196 */ 197 197 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP); 198 if (CPUMGetGuestCpuVendor(pVM) == CPUMCPUVENDOR_AMD) 198 if ( CPUMGetGuestCpuVendor(pVM) == CPUMCPUVENDOR_AMD 199 || CPUMGetGuestCpuVendor(pVM) == CPUMCPUVENDOR_HYGON) 199 200 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); /* 64 bits only on Intel CPUs */ 200 201 if (pVM->nem.s.fAllow64BitGuests) -
trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp
r76886 r81605 525 525 /* Ditto for 0x0000002a (EBL_CR_POWERON) and 0x00000277 (MSR_IA32_CR_PAT) on Intel (Atom 330). */ 526 526 /* And more of the same for 0x280 on Intel Pentium III. */ 527 if ( ((uMsr >= 0xc0011012 && uMsr <= 0xc0011013) && g_enmVendor == CPUMCPUVENDOR_AMD)527 if ( ((uMsr >= 0xc0011012 && uMsr <= 0xc0011013) && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 528 528 || ( (uMsr == 0x2a || uMsr == 0x277) 529 529 && g_enmVendor == CPUMCPUVENDOR_INTEL … … 696 696 case 0x00000089: return "BBL_CR_D1"; 697 697 case 0x0000008a: return "BBL_CR_D2"; 698 case 0x0000008b: return g_enmVendor == CPUMCPUVENDOR_AMD? "AMD_K8_PATCH_LEVEL"698 case 0x0000008b: return (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON) ? "AMD_K8_PATCH_LEVEL" 699 699 : g_fIntelNetBurst ? "IA32_BIOS_SIGN_ID" : "BBL_CR_D3|BIOS_SIGN"; 700 700 case 0x0000008c: return "P6_UNK_0000_008c"; /* P6_M_Dothan. */ … … 1946 1946 return "IntelLastBranchFromToN"; 1947 1947 1948 case 0x0000008b: return g_enmVendor == CPUMCPUVENDOR_AMD? "AmdK8PatchLevel" : "Ia32BiosSignId";1948 case 0x0000008b: return (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON) ? "AmdK8PatchLevel" : "Ia32BiosSignId"; 1949 1949 case 0x0000009b: return "Ia32SmmMonitorCtl"; 1950 1950 … … 3566 3566 || ( g_enmVendor == CPUMCPUVENDOR_INTEL 3567 3567 ? g_enmMicroarch >= kCpumMicroarch_Intel_Core7_First 3568 : g_enmVendor == CPUMCPUVENDOR_AMD3568 : (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON) 3569 3569 ? g_enmMicroarch != kCpumMicroarch_AMD_K8_90nm_AMDV 3570 3570 : true) ) … … 4206 4206 * This shall be sorted by uMsr as much as possible. 4207 4207 */ 4208 else if (uMsr == 0x00000000 && g_enmVendor == CPUMCPUVENDOR_AMD&& g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)4208 else if (uMsr == 0x00000000 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON) && g_enmMicroarch >= kCpumMicroarch_AMD_K8_First) 4209 4209 rc = printMsrAlias(uMsr, 0x00000402, NULL); 4210 else if (uMsr == 0x00000001 && g_enmVendor == CPUMCPUVENDOR_AMD&& g_enmMicroarch >= kCpumMicroarch_AMD_K8_First)4210 else if (uMsr == 0x00000001 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON) && g_enmMicroarch >= kCpumMicroarch_AMD_K8_First) 4211 4211 rc = printMsrAlias(uMsr, 0x00000401, NULL); /** @todo not 101% correct on Fam15h and later, 0xc0010015[McstatusWrEn] effect differs. */ 4212 4212 else if (uMsr == 0x0000001b) … … 4279 4279 else if (uMsr >= 0xc0000408 && uMsr <= 0xc000040f) 4280 4280 rc = reportMsr_AmdFam10hMc4MiscN(&paMsrs[i], cMsrs - i, &i); 4281 else if (uMsr == 0xc0010000 && g_enmVendor == CPUMCPUVENDOR_AMD)4281 else if (uMsr == 0xc0010000 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4282 4282 rc = reportMsr_AmdK8PerfCtlN(&paMsrs[i], cMsrs - i, &i); 4283 else if (uMsr == 0xc0010004 && g_enmVendor == CPUMCPUVENDOR_AMD)4283 else if (uMsr == 0xc0010004 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4284 4284 rc = reportMsr_AmdK8PerfCtrN(&paMsrs[i], cMsrs - i, &i); 4285 else if (uMsr == 0xc0010010 && g_enmVendor == CPUMCPUVENDOR_AMD)4285 else if (uMsr == 0xc0010010 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4286 4286 rc = reportMsr_AmdK8SysCfg(uMsr, uValue); 4287 else if (uMsr == 0xc0010015 && g_enmVendor == CPUMCPUVENDOR_AMD)4287 else if (uMsr == 0xc0010015 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4288 4288 rc = reportMsr_AmdK8HwCr(uMsr, uValue); 4289 else if ((uMsr == 0xc0010016 || uMsr == 0xc0010018) && g_enmVendor == CPUMCPUVENDOR_AMD)4289 else if ((uMsr == 0xc0010016 || uMsr == 0xc0010018) && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4290 4290 rc = reportMsr_AmdK8IorrBaseN(uMsr, uValue); 4291 else if ((uMsr == 0xc0010017 || uMsr == 0xc0010019) && g_enmVendor == CPUMCPUVENDOR_AMD)4291 else if ((uMsr == 0xc0010017 || uMsr == 0xc0010019) && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4292 4292 rc = reportMsr_AmdK8IorrMaskN(uMsr, uValue); 4293 else if ((uMsr == 0xc001001a || uMsr == 0xc001001d) && g_enmVendor == CPUMCPUVENDOR_AMD)4293 else if ((uMsr == 0xc001001a || uMsr == 0xc001001d) && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4294 4294 rc = reportMsr_AmdK8TopMemN(uMsr, uValue); 4295 else if (uMsr == 0xc0010030 && g_enmVendor == CPUMCPUVENDOR_AMD)4295 else if (uMsr == 0xc0010030 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4296 4296 rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 6, "AmdK8CpuNameN", &i); 4297 else if (uMsr >= 0xc0010044 && uMsr <= 0xc001004a && g_enmVendor == CPUMCPUVENDOR_AMD)4297 else if (uMsr >= 0xc0010044 && uMsr <= 0xc001004a && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4298 4298 rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 7, "AmdK8McCtlMaskN", 0xc0010044, true /*fEarlyEndOk*/, false, 0, &i); 4299 else if (uMsr == 0xc0010050 && g_enmVendor == CPUMCPUVENDOR_AMD)4299 else if (uMsr == 0xc0010050 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4300 4300 rc = reportMsr_GenRangeFunction(&paMsrs[i], cMsrs - i, 4, "AmdK8SmiOnIoTrapN", &i); 4301 else if (uMsr == 0xc0010064 && g_enmVendor == CPUMCPUVENDOR_AMD)4301 else if (uMsr == 0xc0010064 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4302 4302 rc = reportMsr_AmdFam10hPStateN(&paMsrs[i], cMsrs - i, &i); 4303 else if (uMsr == 0xc0010070 && g_enmVendor == CPUMCPUVENDOR_AMD)4303 else if (uMsr == 0xc0010070 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4304 4304 rc = reportMsr_AmdFam10hCofVidControl(uMsr, uValue); 4305 else if ((uMsr == 0xc0010118 || uMsr == 0xc0010119) && getMsrFnName(uMsr, NULL) && g_enmVendor == CPUMCPUVENDOR_AMD)4305 else if ((uMsr == 0xc0010118 || uMsr == 0xc0010119) && getMsrFnName(uMsr, NULL) && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4306 4306 rc = printMsrFunction(uMsr, NULL, NULL, annotateValue(uValue)); /* RAZ, write key. */ 4307 else if (uMsr == 0xc0010200 && g_enmVendor == CPUMCPUVENDOR_AMD)4307 else if (uMsr == 0xc0010200 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4308 4308 rc = reportMsr_AmdGenPerfMixedRange(&paMsrs[i], cMsrs - i, 12, &i); 4309 else if (uMsr == 0xc0010230 && g_enmVendor == CPUMCPUVENDOR_AMD)4309 else if (uMsr == 0xc0010230 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4310 4310 rc = reportMsr_AmdGenPerfMixedRange(&paMsrs[i], cMsrs - i, 8, &i); 4311 else if (uMsr == 0xc0010240 && g_enmVendor == CPUMCPUVENDOR_AMD)4311 else if (uMsr == 0xc0010240 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4312 4312 rc = reportMsr_AmdGenPerfMixedRange(&paMsrs[i], cMsrs - i, 8, &i); 4313 else if (uMsr == 0xc0011019 && g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver && g_enmVendor == CPUMCPUVENDOR_AMD)4313 else if (uMsr == 0xc0011019 && g_enmMicroarch >= kCpumMicroarch_AMD_15h_Piledriver && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4314 4314 rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 3, "AmdK7DrXAddrMaskN", 0xc0011019 - 1, 4315 4315 false /*fEarlyEndOk*/, false /*fNoIgnMask*/, 0, &i); 4316 else if (uMsr == 0xc0011021 && g_enmVendor == CPUMCPUVENDOR_AMD)4316 else if (uMsr == 0xc0011021 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4317 4317 rc = reportMsr_AmdK7InstrCacheCfg(uMsr, uValue); 4318 4318 else if (uMsr == 0xc0011023 && CPUMMICROARCH_IS_AMD_FAM_15H(g_enmMicroarch)) 4319 4319 rc = reportMsr_AmdFam15hCombUnitCfg(uMsr, uValue); 4320 else if (uMsr == 0xc0011027 && g_enmVendor == CPUMCPUVENDOR_AMD)4320 else if (uMsr == 0xc0011027 && (g_enmVendor == CPUMCPUVENDOR_AMD || g_enmVendor == CPUMCPUVENDOR_HYGON)) 4321 4321 rc = reportMsr_GenRangeFunctionEx(&paMsrs[i], cMsrs - i, 1, "AmdK7DrXAddrMaskN", 0xc0011027, 4322 4322 false /*fEarlyEndOk*/, false /*fNoIgnMask*/, 0, &i); … … 4473 4473 " * MSR ranges for %s.\n" 4474 4474 " */\n" 4475 "static CPUMMSRRANGE const g_aMsrRanges_%s[] = 4475 "static CPUMMSRRANGE const g_aMsrRanges_%s[] =\n{\n", 4476 4476 pszCpuDesc, 4477 4477 pszNameC); … … 4569 4569 case CPUMCPUVENDOR_CYRIX: return "Cyrix"; 4570 4570 case CPUMCPUVENDOR_SHANGHAI: return "Shanghai"; 4571 case CPUMCPUVENDOR_HYGON: return "Hygon"; 4571 4572 case CPUMCPUVENDOR_INVALID: 4572 4573 case CPUMCPUVENDOR_UNKNOWN: -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-decoding-1.c32
r76553 r81605 882 882 bool fIsAmd = false; 883 883 if (g_uBs3CpuDetected & BS3CPU_F_CPUID) 884 fIsAmd = ASMIsAmdCpu() ;884 fIsAmd = ASMIsAmdCpu() || ASMIsHygonCpu(); 885 885 Bs3TestPrintf("fIsAmd=%d\n", fIsAmd); 886 886 -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1-template.c
r76886 r81605 4748 4748 CASE_PRED(BS3CG1PRED_VENDOR_VIA, pThis->bCpuVendor == BS3CPUVENDOR_VIA); 4749 4749 CASE_PRED(BS3CG1PRED_VENDOR_SHANGHAI, pThis->bCpuVendor == BS3CPUVENDOR_SHANGHAI); 4750 CASE_PRED(BS3CG1PRED_VENDOR_HYGON, pThis->bCpuVendor == BS3CPUVENDOR_HYGON); 4750 4751 4751 4752 #undef CASE_PRED -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1.h
r76886 r81605 776 776 BS3CG1PRED_VENDOR_VIA, 777 777 BS3CG1PRED_VENDOR_SHANGHAI, 778 BS3CG1PRED_VENDOR_HYGON, 778 779 779 780 BS3CG1PRED_END -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-weird-1-x0.c
r76553 r81605 324 324 Bs3TrapSetJmpAndRestore(&Ctx, &TrapCtx); 325 325 Bs3RegSetDr7(0); 326 if (g_enmCpuVendor == BS3CPUVENDOR_AMD )326 if (g_enmCpuVendor == BS3CPUVENDOR_AMD || g_enmCpuVendor == BS3CPUVENDOR_HYGON) 327 327 bs3CpuWeird1_CompareDbgInhibitRingXfer(&TrapCtx, &Ctx, X86_XCPT_DB, offTestLabel, cbSpAdjust, 328 328 X86_DR6_INIT_VAL | X86_DR6_B0, cbIretFrameInt, uHandlerRspInt); … … 341 341 342 342 Bs3TrapSetJmpAndRestore(&Ctx, &TrapCtx); 343 if (g_enmCpuVendor == BS3CPUVENDOR_AMD )343 if (g_enmCpuVendor == BS3CPUVENDOR_AMD || g_enmCpuVendor == BS3CPUVENDOR_HYGON) 344 344 bs3CpuWeird1_CompareDbgInhibitRingXfer(&TrapCtx, &Ctx, X86_XCPT_DB, offTestLabel, cbSpAdjust, 345 345 X86_DR6_INIT_VAL | X86_DR6_B0, cbIretFrameInt, uHandlerRspInt); -
trunk/src/VBox/ValidationKit/bootsectors/bs3kit/bs3-cmn-GetCpuVendor.c
r76886 r81605 45 45 if (ASMIsShanghaiCpuEx(uEbx, uEcx, uEdx)) 46 46 return BS3CPUVENDOR_SHANGHAI; 47 if (ASMIsHygonCpuEx(uEbx, uEcx, uEdx)) 48 return BS3CPUVENDOR_HYGON; 47 49 return BS3CPUVENDOR_UNKNOWN; 48 50 } -
trunk/src/VBox/ValidationKit/bootsectors/bs3kit/bs3kit.h
r76886 r81605 1538 1538 BS3CPUVENDOR_CYRIX, 1539 1539 BS3CPUVENDOR_SHANGHAI, 1540 BS3CPUVENDOR_HYGON, 1540 1541 BS3CPUVENDOR_UNKNOWN, 1541 1542 BS3CPUVENDOR_END
Note:
See TracChangeset
for help on using the changeset viewer.