Changeset 54714 in vbox
- Timestamp:
- Mar 11, 2015 2:00:23 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 98878
- Location:
- trunk
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r54673 r54714 272 272 /** 273 273 * CPUID leaf. 274 * 275 * @remarks This structure is used by the patch manager and is therefore 276 * more or less set in stone. 274 277 */ 275 278 typedef struct CPUMCPUIDLEAF … … 294 297 uint32_t fFlags; 295 298 } CPUMCPUIDLEAF; 299 AssertCompileSize(CPUMCPUIDLEAF, 32); 296 300 /** Pointer to a CPUID leaf. */ 297 301 typedef CPUMCPUIDLEAF *PCPUMCPUIDLEAF; … … 309 313 /** 310 314 * Method used to deal with unknown CPUID leafs. 315 * @remarks Used in patch code. 311 316 */ 312 317 typedef enum CPUMUKNOWNCPUID … … 1271 1276 1272 1277 # if defined(VBOX_WITH_RAW_MODE) || defined(DOXYGEN_RUNNING) 1273 /** @name APIs for Patch Manager CPUID legacy tables1278 /** @name APIs for the CPUID raw-mode patch. 1274 1279 * @{ */ 1275 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM); 1276 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM); 1277 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM); 1278 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM); 1279 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM); 1280 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM); 1281 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM); 1280 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM); 1281 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM); 1282 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM); 1283 VMMR3_INT_DECL(CPUMUKNOWNCPUID) CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM); 1284 /* Legacy: */ 1285 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM); 1286 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM); 1287 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM); 1288 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM); 1289 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM); 1290 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM); 1282 1291 /** @} */ 1283 1292 # endif -
trunk/include/VBox/vmm/cpum.mac
r43657 r54714 4 4 5 5 ; 6 ; Copyright (C) 2006-201 2Oracle Corporation6 ; Copyright (C) 2006-2015 Oracle Corporation 7 7 ; 8 8 ; This file is part of VirtualBox Open Source Edition (OSE), as … … 26 26 %ifndef ___VBox_vmm_cpum_mac__ 27 27 %define ___VBox_vmm_cpum_mac__ 28 29 %include "iprt/asmdefs.mac" 30 31 ;; 32 ; CPUID leaf. 33 ; @remarks This structure is used by the patch manager and can only be extended 34 ; by adding to the end of it. 35 struc CPUMCPUIDLEAF 36 .uLeaf resd 1 37 .uSubLeaf resd 1 38 .fSubLeafMask resd 1 39 .uEax resd 1 40 .uEbx resd 1 41 .uEcx resd 1 42 .uEdx resd 1 43 .fFlags resd 1 44 endstruc 45 %define CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED RT_BIT_32(0) 46 47 ;; 48 ; For the default CPUID leaf value. 49 ; @remarks This is used by the patch manager and cannot be modified in any way. 50 struc CPUMCPUID 51 .uEax resd 1 52 .uEbx resd 1 53 .uEcx resd 1 54 .uEdx resd 1 55 endstruc 56 57 58 ;; @name Method used to deal with unknown CPUID leafs. 59 ;; @{ 60 %define CPUMUKNOWNCPUID_DEFAULTS 1 61 %define CPUMUKNOWNCPUID_LAST_STD_LEAF 2 62 %define CPUMUKNOWNCPUID_LAST_STD_LEAF_WITH_ECX 3 63 %define CPUMUKNOWNCPUID_PASSTHRU 4 64 ;; @} 65 28 66 29 67 ;; -
trunk/include/VBox/vmm/cpumctx.h
r48371 r54714 4 4 5 5 /* 6 * Copyright (C) 2006-201 2Oracle Corporation6 * Copyright (C) 2006-2015 Oracle Corporation 7 7 * 8 8 * This file is part of VirtualBox Open Source Edition (OSE), as … … 468 468 typedef struct CPUMCPUID 469 469 { 470 uint32_t eax;471 uint32_t ebx;472 uint32_t ecx;473 uint32_t edx;470 uint32_t uEax; 471 uint32_t uEbx; 472 uint32_t uEcx; 473 uint32_t uEdx; 474 474 } CPUMCPUID; 475 475 /** Pointer to a CPUID leaf. */ -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r54674 r54714 1328 1328 PVM pVM = pVCpu->CTX_SUFF(pVM); 1329 1329 uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER; 1330 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0]. eax >= 0x800000011331 ? pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx1330 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001 1331 ? pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx 1332 1332 : 0; 1333 1333 uint64_t fMask = 0; -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r54674 r54714 1179 1179 if (uSubLeaf < paLeaves[i].uSubLeaf) 1180 1180 while ( i > 0 1181 && uLeaf == paLeaves[i ].uLeaf1182 && uSubLeaf < paLeaves[i].uSubLeaf)1181 && uLeaf == paLeaves[i - 1].uLeaf 1182 && uSubLeaf <= paLeaves[i - 1].uSubLeaf) 1183 1183 i--; 1184 1184 else … … 1217 1217 pCpuId = &pVM->cpum.s.aGuestCpuIdPatmExt[iLeaf - UINT32_C(0x80000000)]; 1218 1218 else if ( iLeaf - UINT32_C(0x40000000) < 0x100 /** @todo Fix this later: Hyper-V says 0x400000FF is the last valid leaf. */ 1219 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdPatmStd[1]. ecx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */1219 && (pVCpu->CTX_SUFF(pVM)->cpum.s.aGuestCpuIdPatmStd[1].uEcx & X86_CPUID_FEATURE_ECX_HVP)) /* Only report if HVP bit set. */ 1220 1220 { 1221 1221 PCPUMCPUIDLEAF pHyperLeaf = cpumCpuIdGetLeaf(pVM, iLeaf, 0 /* uSubLeaf */); … … 1240 1240 uint32_t cCurrentCacheIndex = *pEcx; 1241 1241 1242 *pEax = pCpuId-> eax;1243 *pEbx = pCpuId-> ebx;1244 *pEcx = pCpuId-> ecx;1245 *pEdx = pCpuId-> edx;1242 *pEax = pCpuId->uEax; 1243 *pEbx = pCpuId->uEbx; 1244 *pEcx = pCpuId->uEcx; 1245 *pEdx = pCpuId->uEdx; 1246 1246 1247 1247 if ( iLeaf == 1) … … 1328 1328 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1329 1329 if (pLeaf) 1330 pVM->cpum.s.aGuestCpuIdPatmStd[1]. edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC;1330 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_APIC; 1331 1331 1332 1332 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1333 1333 if ( pLeaf 1334 1334 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1335 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC;1335 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_APIC; 1336 1336 1337 1337 pVM->cpum.s.GuestFeatures.fApic = 1; … … 1345 1345 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1346 1346 if (pLeaf) 1347 pVM->cpum.s.aGuestCpuIdPatmStd[1]. ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC;1347 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_X2APIC; 1348 1348 pVM->cpum.s.GuestFeatures.fX2Apic = 1; 1349 1349 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled x2APIC\n")); … … 1363 1363 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1364 1364 if (pLeaf) 1365 pVM->cpum.s.aGuestCpuIdPatmStd[1]. edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP;1365 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_SEP; 1366 1366 pVM->cpum.s.GuestFeatures.fSysEnter = 1; 1367 1367 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSENTER/EXIT\n")); … … 1390 1390 1391 1391 /* Valid for both Intel and AMD CPUs, although only in 64 bits mode for Intel. */ 1392 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL;1392 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_SYSCALL; 1393 1393 pVM->cpum.s.GuestFeatures.fSysCall = 1; 1394 1394 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled SYSCALL/RET\n")); … … 1408 1408 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1409 1409 if (pLeaf) 1410 pVM->cpum.s.aGuestCpuIdPatmStd[1]. edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE;1410 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAE; 1411 1411 1412 1412 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1413 1413 if ( pLeaf 1414 1414 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1415 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE;1415 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAE; 1416 1416 1417 1417 pVM->cpum.s.GuestFeatures.fPae = 1; … … 1433 1433 1434 1434 /* Valid for both Intel and AMD. */ 1435 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;1435 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 1436 1436 pVM->cpum.s.GuestFeatures.fLongMode = 1; 1437 1437 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LONG MODE\n")); … … 1452 1452 1453 1453 /* Valid for both Intel and AMD. */ 1454 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX;1454 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_NX; 1455 1455 pVM->cpum.s.GuestFeatures.fNoExecute = 1; 1456 1456 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled NX\n")); … … 1472 1472 1473 1473 /* Valid for both Intel and AMD. */ 1474 pVM->cpum.s.aGuestCpuIdPatmExt[1]. ecx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;1474 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx |= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 1475 1475 pVM->cpum.s.GuestFeatures.fLahfSahf = 1; 1476 1476 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled LAHF/SAHF\n")); … … 1485 1485 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1486 1486 if (pLeaf) 1487 pVM->cpum.s.aGuestCpuIdPatmStd[1]. edx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT;1487 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx |= X86_CPUID_FEATURE_EDX_PAT; 1488 1488 1489 1489 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1490 1490 if ( pLeaf 1491 1491 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1492 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT;1492 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_PAT; 1493 1493 1494 1494 pVM->cpum.s.GuestFeatures.fPat = 1; … … 1512 1512 1513 1513 /* Valid for both Intel and AMD. */ 1514 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP;1514 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 1515 1515 pVM->cpum.s.HostFeatures.fRdTscP = 1; 1516 1516 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n")); … … 1523 1523 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1524 1524 if (pLeaf) 1525 pVM->cpum.s.aGuestCpuIdPatmStd[1]. ecx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP;1525 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx |= X86_CPUID_FEATURE_ECX_HVP; 1526 1526 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 1; 1527 1527 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled Hypervisor Present bit\n")); … … 1542 1542 1543 1543 /* Valid for both Intel and AMD. */ 1544 pVM->cpum.s.aGuestCpuIdPatmStd[5]. ecx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0;1544 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx |= X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0; 1545 1545 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 1; 1546 1546 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled MWAIT Extensions.\n")); … … 1607 1607 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1608 1608 if (pLeaf) 1609 pVM->cpum.s.aGuestCpuIdPatmStd[1]. edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC;1609 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_APIC; 1610 1610 1611 1611 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1612 1612 if ( pLeaf 1613 1613 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1614 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC;1614 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_APIC; 1615 1615 1616 1616 pVM->cpum.s.GuestFeatures.fApic = 0; … … 1621 1621 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1622 1622 if (pLeaf) 1623 pVM->cpum.s.aGuestCpuIdPatmStd[1]. ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC;1623 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_X2APIC; 1624 1624 pVM->cpum.s.GuestFeatures.fX2Apic = 0; 1625 1625 Log(("CPUM: ClearGuestCpuIdFeature: Disabled x2APIC\n")); … … 1629 1629 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1630 1630 if (pLeaf) 1631 pVM->cpum.s.aGuestCpuIdPatmStd[1]. edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE;1631 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAE; 1632 1632 1633 1633 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1634 1634 if ( pLeaf 1635 1635 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1636 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE;1636 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAE; 1637 1637 1638 1638 pVM->cpum.s.GuestFeatures.fPae = 0; … … 1643 1643 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1644 1644 if (pLeaf) 1645 pVM->cpum.s.aGuestCpuIdPatmStd[1]. edx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT;1645 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_FEATURE_EDX_PAT; 1646 1646 1647 1647 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1648 1648 if ( pLeaf 1649 1649 && pVM->cpum.s.GuestFeatures.enmCpuVendor == CPUMCPUVENDOR_AMD) 1650 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT;1650 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_AMD_FEATURE_EDX_PAT; 1651 1651 1652 1652 pVM->cpum.s.GuestFeatures.fPat = 0; … … 1657 1657 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1658 1658 if (pLeaf) 1659 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE;1659 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_LONG_MODE; 1660 1660 pVM->cpum.s.GuestFeatures.fLongMode = 0; 1661 1661 break; … … 1664 1664 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1665 1665 if (pLeaf) 1666 pVM->cpum.s.aGuestCpuIdPatmExt[1]. ecx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF;1666 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF; 1667 1667 pVM->cpum.s.GuestFeatures.fLahfSahf = 0; 1668 1668 break; … … 1671 1671 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x80000001), 0); 1672 1672 if (pLeaf) 1673 pVM->cpum.s.aGuestCpuIdPatmExt[1]. edx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP;1673 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx &= ~X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 1674 1674 pVM->cpum.s.GuestFeatures.fRdTscP = 0; 1675 1675 Log(("CPUM: ClearGuestCpuIdFeature: Disabled RDTSCP!\n")); … … 1679 1679 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000001), 0); 1680 1680 if (pLeaf) 1681 pVM->cpum.s.aGuestCpuIdPatmStd[1]. ecx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP;1681 pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx = pLeaf->uEcx &= ~X86_CPUID_FEATURE_ECX_HVP; 1682 1682 pVM->cpum.s.GuestFeatures.fHypervisorPresent = 0; 1683 1683 break; … … 1686 1686 pLeaf = cpumCpuIdGetLeaf(pVM, UINT32_C(0x00000005), 0); 1687 1687 if (pLeaf) 1688 pVM->cpum.s.aGuestCpuIdPatmStd[5]. ecx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0);1688 pVM->cpum.s.aGuestCpuIdPatmStd[5].uEcx = pLeaf->uEcx &= ~(X86_CPUID_MWAIT_ECX_EXT | X86_CPUID_MWAIT_ECX_BREAKIRQIF0); 1689 1689 pVM->cpum.s.GuestFeatures.fMWaitExtensions = 0; 1690 1690 Log(("CPUM: ClearGuestCpuIdFeature: Disabled MWAIT Extensions!\n")); -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r54674 r54714 80 80 { 81 81 uint32_t uLeaf; /**< Leaf to check. */ 82 uint32_t ecx;/**< which bits in ecx to unify between CPUs. */83 uint32_t edx;/**< which bits in edx to unify between CPUs. */82 uint32_t uEcx; /**< which bits in ecx to unify between CPUs. */ 83 uint32_t uEdx; /**< which bits in edx to unify between CPUs. */ 84 84 } 85 85 const g_aCpuidUnifyBits[] = … … 170 170 ASMCpuIdExSlow(uLeaf, 0, 0, 0, &eax, &ebx, &ecx, &edx); 171 171 172 ASMAtomicAndU32(&pLegacyLeaf-> ecx, ecx | ~g_aCpuidUnifyBits[i].ecx);173 ASMAtomicAndU32(&pLegacyLeaf-> edx, edx | ~g_aCpuidUnifyBits[i].edx);172 ASMAtomicAndU32(&pLegacyLeaf->uEcx, ecx | ~g_aCpuidUnifyBits[i].uEcx); 173 ASMAtomicAndU32(&pLegacyLeaf->uEdx, edx | ~g_aCpuidUnifyBits[i].uEdx); 174 174 } 175 175 } … … 300 300 continue; 301 301 302 pLeaf->uEcx = pLegacyLeaf-> ecx;303 pLeaf->uEdx = pLegacyLeaf-> edx;302 pLeaf->uEcx = pLegacyLeaf->uEcx; 303 pLeaf->uEdx = pLegacyLeaf->uEdx; 304 304 } 305 305 } -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r54674 r54714 727 727 if (!fHWVirtExEnabled) 728 728 { 729 Assert( pVM->cpum.s.aGuestCpuIdPatmStd[4]. eax == 0730 || pVM->cpum.s.aGuestCpuIdPatmStd[0]. eax < 0x4);731 pVM->cpum.s.aGuestCpuIdPatmStd[4]. eax = 0;729 Assert( pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax == 0 730 || pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax < 0x4); 731 pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax = 0; 732 732 } 733 733 } … … 1232 1232 * features in the future. 1233 1233 */ 1234 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1]. ecx &1234 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx & 1235 1235 ( X86_CPUID_FEATURE_ECX_DTES64 1236 1236 | X86_CPUID_FEATURE_ECX_VMX … … 1875 1875 CPUMCPUID Host; 1876 1876 CPUMCPUID Guest; 1877 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdPatmStd[0]. eax;1877 unsigned cStdMax = pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax; 1878 1878 1879 1879 uint32_t cStdHstMax; … … 1891 1891 { 1892 1892 Guest = pVM->cpum.s.aGuestCpuIdPatmStd[i]; 1893 ASMCpuIdExSlow(i, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);1893 ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 1894 1894 1895 1895 pHlp->pfnPrintf(pHlp, 1896 1896 "Gst: %08x %08x %08x %08x %08x%s\n" 1897 1897 "Hst: %08x %08x %08x %08x\n", 1898 i, Guest. eax, Guest.ebx, Guest.ecx, Guest.edx,1898 i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx, 1899 1899 i <= cStdMax ? "" : "*", 1900 Host. eax, Host.ebx, Host.ecx, Host.edx);1900 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx); 1901 1901 } 1902 1902 else 1903 1903 { 1904 ASMCpuIdExSlow(i, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);1904 ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 1905 1905 1906 1906 pHlp->pfnPrintf(pHlp, 1907 1907 "Hst: %08x %08x %08x %08x %08x\n", 1908 i, Host. eax, Host.ebx, Host.ecx, Host.edx);1908 i, Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx); 1909 1909 } 1910 1910 } … … 1919 1919 "Name: %.04s%.04s%.04s\n" 1920 1920 "Supports: 0-%x\n", 1921 &Guest. ebx, &Guest.edx, &Guest.ecx, Guest.eax);1921 &Guest.uEbx, &Guest.uEdx, &Guest.uEcx, Guest.uEax); 1922 1922 } 1923 1923 … … 1925 1925 * Get Features. 1926 1926 */ 1927 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0]. ebx,1928 pVM->cpum.s.aGuestCpuIdPatmStd[0]. ecx,1929 pVM->cpum.s.aGuestCpuIdPatmStd[0]. edx);1927 bool const fIntel = ASMIsIntelCpuEx(pVM->cpum.s.aGuestCpuIdPatmStd[0].uEbx, 1928 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEcx, 1929 pVM->cpum.s.aGuestCpuIdPatmStd[0].uEdx); 1930 1930 if (cStdMax >= 1 && iVerbosity) 1931 1931 { … … 1933 1933 1934 1934 Guest = pVM->cpum.s.aGuestCpuIdPatmStd[1]; 1935 uint32_t uEAX = Guest. eax;1935 uint32_t uEAX = Guest.uEax; 1936 1936 1937 1937 pHlp->pfnPrintf(pHlp, … … 1948 1948 ASMGetCpuStepping(uEAX), 1949 1949 (uEAX >> 12) & 3, s_apszTypes[(uEAX >> 12) & 3], 1950 (Guest. ebx >> 24) & 0xff,1951 (Guest. ebx >> 16) & 0xff,1952 (Guest. ebx >> 8) & 0xff,1953 (Guest. ebx >> 0) & 0xff);1950 (Guest.uEbx >> 24) & 0xff, 1951 (Guest.uEbx >> 16) & 0xff, 1952 (Guest.uEbx >> 8) & 0xff, 1953 (Guest.uEbx >> 0) & 0xff); 1954 1954 if (iVerbosity == 1) 1955 1955 { 1956 uint32_t uEDX = Guest. edx;1956 uint32_t uEDX = Guest.uEdx; 1957 1957 pHlp->pfnPrintf(pHlp, "Features EDX: "); 1958 1958 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU"); … … 1990 1990 pHlp->pfnPrintf(pHlp, "\n"); 1991 1991 1992 uint32_t uECX = Guest. ecx;1992 uint32_t uECX = Guest.uEcx; 1993 1993 pHlp->pfnPrintf(pHlp, "Features ECX: "); 1994 1994 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " SSE3"); … … 2028 2028 else 2029 2029 { 2030 ASMCpuIdExSlow(1, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2031 2032 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host. edx;2033 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host. ecx;2034 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest. edx;2035 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest. ecx;2030 ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2031 2032 X86CPUIDFEATEDX EdxHost = *(PX86CPUIDFEATEDX)&Host.uEdx; 2033 X86CPUIDFEATECX EcxHost = *(PX86CPUIDFEATECX)&Host.uEcx; 2034 X86CPUIDFEATEDX EdxGuest = *(PX86CPUIDFEATEDX)&Guest.uEdx; 2035 X86CPUIDFEATECX EcxGuest = *(PX86CPUIDFEATECX)&Guest.uEcx; 2036 2036 2037 2037 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n"); … … 2112 2112 * Implemented after AMD specs. 2113 2113 */ 2114 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdPatmExt[0]. eax & 0xffff;2114 unsigned cExtMax = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax & 0xffff; 2115 2115 2116 2116 pHlp->pfnPrintf(pHlp, … … 2122 2122 { 2123 2123 Guest = pVM->cpum.s.aGuestCpuIdPatmExt[i]; 2124 ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2124 ASMCpuIdExSlow(0x80000000 | i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2125 2125 2126 2126 if ( i == 7 2127 && (Host. edx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR))2127 && (Host.uEdx & X86_CPUID_AMD_ADVPOWER_EDX_TSCINVAR)) 2128 2128 { 2129 2129 fSupportsInvariantTsc = true; … … 2132 2132 "Gst: %08x %08x %08x %08x %08x%s\n" 2133 2133 "Hst: %08x %08x %08x %08x\n", 2134 0x80000000 | i, Guest. eax, Guest.ebx, Guest.ecx, Guest.edx,2134 0x80000000 | i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx, 2135 2135 i <= cExtMax ? "" : "*", 2136 Host. eax, Host.ebx, Host.ecx, Host.edx);2136 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx); 2137 2137 } 2138 2138 … … 2146 2146 "Ext Name: %.4s%.4s%.4s\n" 2147 2147 "Ext Supports: 0x80000000-%#010x\n", 2148 &Guest. ebx, &Guest.edx, &Guest.ecx, Guest.eax);2148 &Guest.uEbx, &Guest.uEdx, &Guest.uEcx, Guest.uEax); 2149 2149 } 2150 2150 … … 2152 2152 { 2153 2153 Guest = pVM->cpum.s.aGuestCpuIdPatmExt[1]; 2154 uint32_t uEAX = Guest. eax;2154 uint32_t uEAX = Guest.uEax; 2155 2155 pHlp->pfnPrintf(pHlp, 2156 2156 "Family: %d \tExtended: %d \tEffective: %d\n" … … 2161 2161 (uEAX >> 4) & 0xf, (uEAX >> 16) & 0x0f, ASMGetCpuModel(uEAX, fIntel), 2162 2162 ASMGetCpuStepping(uEAX), 2163 Guest. ebx & 0xfff);2163 Guest.uEbx & 0xfff); 2164 2164 2165 2165 if (iVerbosity == 1) 2166 2166 { 2167 uint32_t uEDX = Guest. edx;2167 uint32_t uEDX = Guest.uEdx; 2168 2168 pHlp->pfnPrintf(pHlp, "Features EDX: "); 2169 2169 if (uEDX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " FPU"); … … 2201 2201 pHlp->pfnPrintf(pHlp, "\n"); 2202 2202 2203 uint32_t uECX = Guest. ecx;2203 uint32_t uECX = Guest.uEcx; 2204 2204 pHlp->pfnPrintf(pHlp, "Features ECX: "); 2205 2205 if (uECX & RT_BIT(0)) pHlp->pfnPrintf(pHlp, " LAHF/SAHF"); … … 2224 2224 else 2225 2225 { 2226 ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2227 2228 uint32_t uEdxGst = Guest. edx;2229 uint32_t uEdxHst = Host. edx;2226 ASMCpuIdExSlow(0x80000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2227 2228 uint32_t uEdxGst = Guest.uEdx; 2229 uint32_t uEdxHst = Host.uEdx; 2230 2230 pHlp->pfnPrintf(pHlp, "Mnemonic - Description = guest (host)\n"); 2231 2231 pHlp->pfnPrintf(pHlp, "FPU - x87 FPU on Chip = %d (%d)\n", !!(uEdxGst & RT_BIT( 0)), !!(uEdxHst & RT_BIT( 0))); … … 2262 2262 pHlp->pfnPrintf(pHlp, "31 - AMD 3DNow! = %d (%d)\n", !!(uEdxGst & RT_BIT(31)), !!(uEdxHst & RT_BIT(31))); 2263 2263 2264 uint32_t uEcxGst = Guest. ecx;2265 uint32_t uEcxHst = Host. ecx;2264 uint32_t uEcxGst = Guest.uEcx; 2265 uint32_t uEcxHst = Host.uEcx; 2266 2266 pHlp->pfnPrintf(pHlp, "LahfSahf - LAHF/SAHF in 64-bit mode = %d (%d)\n", !!(uEcxGst & RT_BIT( 0)), !!(uEcxHst & RT_BIT( 0))); 2267 2267 pHlp->pfnPrintf(pHlp, "CmpLegacy - Core MP legacy mode (depr) = %d (%d)\n", !!(uEcxGst & RT_BIT( 1)), !!(uEcxHst & RT_BIT( 1))); … … 2286 2286 char szString[4*4*3+1] = {0}; 2287 2287 uint32_t *pu32 = (uint32_t *)szString; 2288 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2]. eax;2289 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2]. ebx;2290 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2]. ecx;2291 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2]. edx;2288 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEax; 2289 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEbx; 2290 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEcx; 2291 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[2].uEdx; 2292 2292 if (cExtMax >= 3) 2293 2293 { 2294 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3]. eax;2295 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3]. ebx;2296 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3]. ecx;2297 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3]. edx;2294 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEax; 2295 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEbx; 2296 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEcx; 2297 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[3].uEdx; 2298 2298 } 2299 2299 if (cExtMax >= 4) 2300 2300 { 2301 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4]. eax;2302 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4]. ebx;2303 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4]. ecx;2304 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4]. edx;2301 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEax; 2302 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEbx; 2303 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEcx; 2304 *pu32++ = pVM->cpum.s.aGuestCpuIdPatmExt[4].uEdx; 2305 2305 } 2306 2306 pHlp->pfnPrintf(pHlp, "Full Name: %s\n", szString); … … 2309 2309 if (iVerbosity && cExtMax >= 5) 2310 2310 { 2311 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[5]. eax;2312 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[5]. ebx;2313 uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[5]. ecx;2314 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[5]. edx;2311 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEax; 2312 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEbx; 2313 uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEcx; 2314 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[5].uEdx; 2315 2315 char sz1[32]; 2316 2316 char sz2[32]; … … 2347 2347 if (iVerbosity && cExtMax >= 6) 2348 2348 { 2349 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[6]. eax;2350 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[6]. ebx;2351 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[6]. edx;2349 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEax; 2350 uint32_t uEBX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEbx; 2351 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[6].uEdx; 2352 2352 2353 2353 pHlp->pfnPrintf(pHlp, … … 2374 2374 if (iVerbosity && cExtMax >= 7) 2375 2375 { 2376 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[7]. edx;2376 uint32_t uEDX = pVM->cpum.s.aGuestCpuIdPatmExt[7].uEdx; 2377 2377 2378 2378 pHlp->pfnPrintf(pHlp, "Host Invariant-TSC support: %RTbool\n", fSupportsInvariantTsc); … … 2396 2396 if (iVerbosity && cExtMax >= 8) 2397 2397 { 2398 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[8]. eax;2399 uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[8]. ecx;2398 uint32_t uEAX = pVM->cpum.s.aGuestCpuIdPatmExt[8].uEax; 2399 uint32_t uECX = pVM->cpum.s.aGuestCpuIdPatmExt[8].uEcx; 2400 2400 2401 2401 pHlp->pfnPrintf(pHlp, … … 2420 2420 RT_ZERO(Host); 2421 2421 if (cStdHstMax >= 1) 2422 ASMCpuIdExSlow(1, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2423 bool fHostHvp = RT_BOOL(Host. ecx & X86_CPUID_FEATURE_ECX_HVP);2422 ASMCpuIdExSlow(1, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2423 bool fHostHvp = RT_BOOL(Host.uEcx & X86_CPUID_FEATURE_ECX_HVP); 2424 2424 bool fGuestHvp = false; 2425 2425 if (cStdMax >= 1) 2426 2426 { 2427 2427 Guest = pVM->cpum.s.aGuestCpuIdPatmStd[1]; 2428 fGuestHvp = RT_BOOL(Guest. ecx & X86_CPUID_FEATURE_ECX_HVP);2428 fGuestHvp = RT_BOOL(Guest.uEcx & X86_CPUID_FEATURE_ECX_HVP); 2429 2429 } 2430 2430 … … 2447 2447 RT_ZERO(Host); 2448 2448 if (fHostHvp) 2449 ASMCpuIdExSlow(uHyperLeaf, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2449 ASMCpuIdExSlow(uHyperLeaf, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2450 2450 2451 2451 CPUMCPUIDLEAF GuestLeaf; 2452 2452 uint32_t const cHyperGstMax = pHyperLeafGst ? pHyperLeafGst->uEax : 0; 2453 uint32_t const cHyperHstMax = Host. eax;2453 uint32_t const cHyperHstMax = Host.uEax; 2454 2454 uint32_t const cHyperMax = RT_MAX(cHyperHstMax, cHyperGstMax); 2455 2455 for (uint32_t i = uHyperLeaf; i <= cHyperMax; i++) … … 2458 2458 RT_ZERO(GuestLeaf); 2459 2459 if (i <= cHyperHstMax) 2460 ASMCpuIdExSlow(i, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2460 ASMCpuIdExSlow(i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2461 2461 CPUMR3CpuIdGetLeaf(pVM, &GuestLeaf, i, 0 /* uSubLeaf */); 2462 2462 if (!fHostHvp) … … 2473 2473 i, GuestLeaf.uEax, GuestLeaf.uEbx, GuestLeaf.uEcx, GuestLeaf.uEdx, 2474 2474 i <= cHyperGstMax ? "" : "*", 2475 Host. eax, Host.ebx, Host.ecx, Host.edx, i <= cHyperHstMax ? "" : "*");2475 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx, i <= cHyperHstMax ? "" : "*"); 2476 2476 } 2477 2477 } … … 2481 2481 * Centaur. 2482 2482 */ 2483 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdPatmCentaur[0]. eax & 0xffff;2483 unsigned cCentaurMax = pVM->cpum.s.aGuestCpuIdPatmCentaur[0].uEax & 0xffff; 2484 2484 2485 2485 pHlp->pfnPrintf(pHlp, … … 2490 2490 { 2491 2491 Guest = pVM->cpum.s.aGuestCpuIdPatmCentaur[i]; 2492 ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2492 ASMCpuIdExSlow(0xc0000000 | i, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2493 2493 2494 2494 pHlp->pfnPrintf(pHlp, 2495 2495 "Gst: %08x %08x %08x %08x %08x%s\n" 2496 2496 "Hst: %08x %08x %08x %08x\n", 2497 0xc0000000 | i, Guest. eax, Guest.ebx, Guest.ecx, Guest.edx,2497 0xc0000000 | i, Guest.uEax, Guest.uEbx, Guest.uEcx, Guest.uEdx, 2498 2498 i <= cCentaurMax ? "" : "*", 2499 Host. eax, Host.ebx, Host.ecx, Host.edx);2499 Host.uEax, Host.uEbx, Host.uEcx, Host.uEdx); 2500 2500 } 2501 2501 … … 2508 2508 pHlp->pfnPrintf(pHlp, 2509 2509 "Centaur Supports: 0xc0000000-%#010x\n", 2510 Guest. eax);2510 Guest.uEax); 2511 2511 } 2512 2512 2513 2513 if (iVerbosity && cCentaurMax >= 1) 2514 2514 { 2515 ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host. eax, &Host.ebx, &Host.ecx, &Host.edx);2516 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdPatmCentaur[1]. edx;2517 uint32_t uEdxHst = Host. edx;2515 ASMCpuIdExSlow(0xc0000001, 0, 0, 0, &Host.uEax, &Host.uEbx, &Host.uEcx, &Host.uEdx); 2516 uint32_t uEdxGst = pVM->cpum.s.aGuestCpuIdPatmCentaur[1].uEdx; 2517 uint32_t uEdxHst = Host.uEdx; 2518 2518 2519 2519 if (iVerbosity == 1) -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r54674 r54714 541 541 if (pLeaf) 542 542 { 543 pLegacy-> eax = pLeaf->uEax;544 pLegacy-> ebx = pLeaf->uEbx;545 pLegacy-> ecx = pLeaf->uEcx;546 pLegacy-> edx = pLeaf->uEdx;543 pLegacy->uEax = pLeaf->uEax; 544 pLegacy->uEbx = pLeaf->uEbx; 545 pLegacy->uEcx = pLeaf->uEcx; 546 pLegacy->uEdx = pLeaf->uEdx; 547 547 return true; 548 548 } … … 1195 1195 */ 1196 1196 *penmUnknownMethod = CPUMUKNOWNCPUID_DEFAULTS; 1197 pDefUnknown-> eax = 0;1198 pDefUnknown-> ebx = 0;1199 pDefUnknown-> ecx = 0;1200 pDefUnknown-> edx = 0;1197 pDefUnknown->uEax = 0; 1198 pDefUnknown->uEbx = 0; 1199 pDefUnknown->uEcx = 0; 1200 pDefUnknown->uEdx = 0; 1201 1201 1202 1202 /* … … 1258 1258 else 1259 1259 *penmUnknownMethod = CPUMUKNOWNCPUID_LAST_STD_LEAF; 1260 pDefUnknown-> eax = auLast[0];1261 pDefUnknown-> ebx = auLast[1];1262 pDefUnknown-> ecx = auLast[2];1263 pDefUnknown-> edx = auLast[3];1260 pDefUnknown->uEax = auLast[0]; 1261 pDefUnknown->uEbx = auLast[1]; 1262 pDefUnknown->uEcx = auLast[2]; 1263 pDefUnknown->uEdx = auLast[3]; 1264 1264 return VINF_SUCCESS; 1265 1265 } … … 1691 1691 int rc = CFGMR3QueryU32(pLeafNode, "eax", &u32); 1692 1692 if (RT_SUCCESS(rc)) 1693 pLeaf-> eax = u32;1693 pLeaf->uEax = u32; 1694 1694 else 1695 1695 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); … … 1697 1697 rc = CFGMR3QueryU32(pLeafNode, "ebx", &u32); 1698 1698 if (RT_SUCCESS(rc)) 1699 pLeaf-> ebx = u32;1699 pLeaf->uEbx = u32; 1700 1700 else 1701 1701 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); … … 1703 1703 rc = CFGMR3QueryU32(pLeafNode, "ecx", &u32); 1704 1704 if (RT_SUCCESS(rc)) 1705 pLeaf-> ecx = u32;1705 pLeaf->uEcx = u32; 1706 1706 else 1707 1707 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); … … 1709 1709 rc = CFGMR3QueryU32(pLeafNode, "edx", &u32); 1710 1710 if (RT_SUCCESS(rc)) 1711 pLeaf-> edx = u32;1711 pLeaf->uEdx = u32; 1712 1712 else 1713 1713 AssertReturn(rc == VERR_CFGM_VALUE_NOT_FOUND, rc); … … 1753 1753 /* Using the ECX variant for all of them can't hurt... */ 1754 1754 for (uint32_t i = 0; i < cLeaves; i++) 1755 ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i]. eax, &paLeaves[i].ebx, &paLeaves[i].ecx, &paLeaves[i].edx);1755 ASMCpuIdExSlow(uStart + i, 0, 0, 0, &paLeaves[i].uEax, &paLeaves[i].uEbx, &paLeaves[i].uEcx, &paLeaves[i].uEdx); 1756 1756 1757 1757 /* Load CPUID leaf override; we currently don't care if the user … … 1816 1816 if (pLeaf) 1817 1817 { 1818 pLegacyLeaf-> eax = pLeaf->uEax;1819 pLegacyLeaf-> ebx = pLeaf->uEbx;1820 pLegacyLeaf-> ecx = pLeaf->uEcx;1821 pLegacyLeaf-> edx = pLeaf->uEdx;1818 pLegacyLeaf->uEax = pLeaf->uEax; 1819 pLegacyLeaf->uEbx = pLeaf->uEbx; 1820 pLegacyLeaf->uEcx = pLeaf->uEcx; 1821 pLegacyLeaf->uEdx = pLeaf->uEdx; 1822 1822 } 1823 1823 else … … 2603 2603 CPUMCPUID aRawStd[16]; 2604 2604 for (unsigned i = 0; i < RT_ELEMENTS(aRawStd); i++) 2605 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i]. eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);2605 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx); 2606 2606 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawStd)); 2607 2607 SSMR3PutMem(pSSM, &aRawStd[0], sizeof(aRawStd)); … … 2609 2609 CPUMCPUID aRawExt[32]; 2610 2610 for (unsigned i = 0; i < RT_ELEMENTS(aRawExt); i++) 2611 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i]. eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);2611 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx); 2612 2612 SSMR3PutU32(pSSM, RT_ELEMENTS(aRawExt)); 2613 2613 SSMR3PutMem(pSSM, &aRawExt[0], sizeof(aRawExt)); … … 2634 2634 NewLeaf.uSubLeaf = 0; 2635 2635 NewLeaf.fSubLeafMask = 0; 2636 NewLeaf.uEax = CpuId. eax;2637 NewLeaf.uEbx = CpuId. ebx;2638 NewLeaf.uEcx = CpuId. ecx;2639 NewLeaf.uEdx = CpuId. edx;2636 NewLeaf.uEax = CpuId.uEax; 2637 NewLeaf.uEbx = CpuId.uEbx; 2638 NewLeaf.uEcx = CpuId.uEcx; 2639 NewLeaf.uEdx = CpuId.uEdx; 2640 2640 NewLeaf.fFlags = 0; 2641 2641 rc = cpumR3CpuIdInsert(NULL /* pVM */, ppaLeaves, pcLeaves, &NewLeaf); … … 2873 2873 AssertRCReturn(rc, rc); 2874 2874 for (uint32_t i = cRawStd; i < RT_ELEMENTS(aRawStd); i++) 2875 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i]. eax, &aRawStd[i].ebx, &aRawStd[i].ecx, &aRawStd[i].edx);2875 ASMCpuIdExSlow(i, 0, 0, 0, &aRawStd[i].uEax, &aRawStd[i].uEbx, &aRawStd[i].uEcx, &aRawStd[i].uEdx); 2876 2876 2877 2877 CPUMCPUID aRawExt[32]; … … 2883 2883 AssertRCReturn(rc, rc); 2884 2884 for (uint32_t i = cRawExt; i < RT_ELEMENTS(aRawExt); i++) 2885 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i]. eax, &aRawExt[i].ebx, &aRawExt[i].ecx, &aRawExt[i].edx);2885 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, &aRawExt[i].uEax, &aRawExt[i].uEbx, &aRawExt[i].uEcx, &aRawExt[i].uEdx); 2886 2886 2887 2887 /* … … 2890 2890 CPUMCPUID aHostRawStd[16]; 2891 2891 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawStd); i++) 2892 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i]. eax, &aHostRawStd[i].ebx, &aHostRawStd[i].ecx, &aHostRawStd[i].edx);2892 ASMCpuIdExSlow(i, 0, 0, 0, &aHostRawStd[i].uEax, &aHostRawStd[i].uEbx, &aHostRawStd[i].uEcx, &aHostRawStd[i].uEdx); 2893 2893 2894 2894 CPUMCPUID aHostRawExt[32]; 2895 2895 for (unsigned i = 0; i < RT_ELEMENTS(aHostRawExt); i++) 2896 2896 ASMCpuIdExSlow(i | UINT32_C(0x80000000), 0, 0, 0, 2897 &aHostRawExt[i]. eax, &aHostRawExt[i].ebx, &aHostRawExt[i].ecx, &aHostRawExt[i].edx);2897 &aHostRawExt[i].uEax, &aHostRawExt[i].uEbx, &aHostRawExt[i].uEcx, &aHostRawExt[i].uEdx); 2898 2898 2899 2899 /* … … 2926 2926 { 2927 2927 /* CPUID(0) */ 2928 CPUID_CHECK_RET( aHostRawStd[0]. ebx == aRawStd[0].ebx2929 && aHostRawStd[0]. ecx == aRawStd[0].ecx2930 && aHostRawStd[0]. edx == aRawStd[0].edx,2928 CPUID_CHECK_RET( aHostRawStd[0].uEbx == aRawStd[0].uEbx 2929 && aHostRawStd[0].uEcx == aRawStd[0].uEcx 2930 && aHostRawStd[0].uEdx == aRawStd[0].uEdx, 2931 2931 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"), 2932 &aHostRawStd[0]. ebx, &aHostRawStd[0].edx, &aHostRawStd[0].ecx,2933 &aRawStd[0]. ebx, &aRawStd[0].edx, &aRawStd[0].ecx));2934 CPUID_CHECK2_WRN("Std CPUID max leaf", aHostRawStd[0]. eax, aRawStd[0].eax);2935 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1]. eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3);2936 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1]. eax >> 28, aRawExt[1].eax >> 28);2937 2938 bool const fIntel = ASMIsIntelCpuEx(aRawStd[0]. ebx, aRawStd[0].ecx, aRawStd[0].edx);2932 &aHostRawStd[0].uEbx, &aHostRawStd[0].uEdx, &aHostRawStd[0].uEcx, 2933 &aRawStd[0].uEbx, &aRawStd[0].uEdx, &aRawStd[0].uEcx)); 2934 CPUID_CHECK2_WRN("Std CPUID max leaf", aHostRawStd[0].uEax, aRawStd[0].uEax); 2935 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].uEax >> 14) & 3, (aRawExt[1].uEax >> 14) & 3); 2936 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].uEax >> 28, aRawExt[1].uEax >> 28); 2937 2938 bool const fIntel = ASMIsIntelCpuEx(aRawStd[0].uEbx, aRawStd[0].uEcx, aRawStd[0].uEdx); 2939 2939 2940 2940 /* CPUID(1).eax */ 2941 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawStd[1]. eax), ASMGetCpuFamily(aRawStd[1].eax));2942 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawStd[1]. eax, fIntel), ASMGetCpuModel(aRawStd[1].eax, fIntel));2943 CPUID_CHECK2_WRN("CPU type", (aHostRawStd[1]. eax >> 12) & 3, (aRawStd[1].eax >> 12) & 3 );2941 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawStd[1].uEax), ASMGetCpuFamily(aRawStd[1].uEax)); 2942 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawStd[1].uEax, fIntel), ASMGetCpuModel(aRawStd[1].uEax, fIntel)); 2943 CPUID_CHECK2_WRN("CPU type", (aHostRawStd[1].uEax >> 12) & 3, (aRawStd[1].uEax >> 12) & 3 ); 2944 2944 2945 2945 /* CPUID(1).ebx - completely ignore CPU count and APIC ID. */ 2946 CPUID_CHECK2_RET("CPU brand ID", aHostRawStd[1]. ebx & 0xff, aRawStd[1].ebx & 0xff);2947 CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1]. ebx >> 8) & 0xff, (aRawStd[1].ebx >> 8) & 0xff);2946 CPUID_CHECK2_RET("CPU brand ID", aHostRawStd[1].uEbx & 0xff, aRawStd[1].uEbx & 0xff); 2947 CPUID_CHECK2_WRN("CLFLUSH chunk count", (aHostRawStd[1].uEbx >> 8) & 0xff, (aRawStd[1].uEbx >> 8) & 0xff); 2948 2948 2949 2949 /* CPUID(1).ecx */ 2950 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3);2951 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL);2952 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64);2953 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);2954 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS);2955 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_VMX);2956 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_SMX);2957 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_EST);2958 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TM2);2959 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3);2960 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID);2961 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );2962 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA);2963 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16);2964 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);2965 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM);2966 CPUID_RAW_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);2967 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_PCID);2968 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_DCA);2969 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1);2970 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2);2971 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);2972 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE);2973 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT);2974 CPUID_RAW_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_TSCDEADL);2975 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES);2976 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE);2977 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE);2978 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX);2979 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_F16C);2980 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_RDRAND);2981 CPUID_RAW_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_HVP);2950 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3); 2951 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL); 2952 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64); 2953 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR); 2954 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS); 2955 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX); 2956 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX); 2957 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_EST); 2958 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2); 2959 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3); 2960 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID); 2961 CPUID_RAW_FEATURE_RET(Std, uEcx, RT_BIT_32(11) /*reserved*/ ); 2962 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA); 2963 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16); 2964 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE); 2965 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM); 2966 CPUID_RAW_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/); 2967 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID); 2968 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA); 2969 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1); 2970 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2); 2971 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC); 2972 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE); 2973 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT); 2974 CPUID_RAW_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL); 2975 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES); 2976 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE); 2977 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE); 2978 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX); 2979 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C); 2980 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND); 2981 CPUID_RAW_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP); 2982 2982 2983 2983 /* CPUID(1).edx */ 2984 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);2985 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);2986 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE);2987 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);2988 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC);2989 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR);2990 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAE);2991 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);2992 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8);2993 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);2994 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);2995 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);2996 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);2997 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);2998 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);2999 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV);3000 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);3001 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);3002 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);3003 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH);3004 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);3005 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_DS);3006 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_ACPI);3007 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX);3008 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR);3009 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE);3010 CPUID_RAW_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2);3011 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SS);3012 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_HTT);3013 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_TM);3014 CPUID_RAW_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/);3015 CPUID_RAW_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PBE);2984 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU); 2985 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME); 2986 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE); 2987 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE); 2988 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC); 2989 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR); 2990 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE); 2991 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE); 2992 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8); 2993 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC); 2994 CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/); 2995 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP); 2996 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR); 2997 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE); 2998 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA); 2999 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV); 3000 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT); 3001 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36); 3002 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN); 3003 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH); 3004 CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/); 3005 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_DS); 3006 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI); 3007 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX); 3008 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR); 3009 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE); 3010 CPUID_RAW_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2); 3011 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SS); 3012 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT); 3013 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_TM); 3014 CPUID_RAW_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/); 3015 CPUID_RAW_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE); 3016 3016 3017 3017 /* CPUID(2) - config, mostly about caches. ignore. */ … … 3027 3027 3028 3028 /* CPUID(d) - XCR0 stuff - takes ECX as input. We only warn about the main level (ECX=0) for now. */ 3029 CPUID_CHECK_WRN( aRawStd[0]. eax < UINT32_C(0x0000000d)3030 || aHostRawStd[0]. eax >= UINT32_C(0x0000000d),3029 CPUID_CHECK_WRN( aRawStd[0].uEax < UINT32_C(0x0000000d) 3030 || aHostRawStd[0].uEax >= UINT32_C(0x0000000d), 3031 3031 ("CPUM: Standard leaf D was present on saved state host, not present on current.\n")); 3032 if ( aRawStd[0]. eax >= UINT32_C(0x0000000d)3033 && aHostRawStd[0]. eax >= UINT32_C(0x0000000d))3032 if ( aRawStd[0].uEax >= UINT32_C(0x0000000d) 3033 && aHostRawStd[0].uEax >= UINT32_C(0x0000000d)) 3034 3034 { 3035 CPUID_CHECK2_WRN("Valid low XCR0 bits", aHostRawStd[0xd]. eax, aRawStd[0xd].eax);3036 CPUID_CHECK2_WRN("Valid high XCR0 bits", aHostRawStd[0xd]. edx, aRawStd[0xd].edx);3037 CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size", aHostRawStd[0xd]. ebx, aRawStd[0xd].ebx);3038 CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size", aHostRawStd[0xd]. ecx, aRawStd[0xd].ecx);3035 CPUID_CHECK2_WRN("Valid low XCR0 bits", aHostRawStd[0xd].uEax, aRawStd[0xd].uEax); 3036 CPUID_CHECK2_WRN("Valid high XCR0 bits", aHostRawStd[0xd].uEdx, aRawStd[0xd].uEdx); 3037 CPUID_CHECK2_WRN("Current XSAVE/XRSTOR area size", aHostRawStd[0xd].uEbx, aRawStd[0xd].uEbx); 3038 CPUID_CHECK2_WRN("Max XSAVE/XRSTOR area size", aHostRawStd[0xd].uEcx, aRawStd[0xd].uEcx); 3039 3039 } 3040 3040 … … 3042 3042 Note! Intel have/is marking many of the fields here as reserved. We 3043 3043 will verify them as if it's an AMD CPU. */ 3044 CPUID_CHECK_RET( (aHostRawExt[0]. eax >= UINT32_C(0x80000001) && aHostRawExt[0].eax <= UINT32_C(0x8000007f))3045 || !(aRawExt[0]. eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f)),3044 CPUID_CHECK_RET( (aHostRawExt[0].uEax >= UINT32_C(0x80000001) && aHostRawExt[0].uEax <= UINT32_C(0x8000007f)) 3045 || !(aRawExt[0].uEax >= UINT32_C(0x80000001) && aRawExt[0].uEax <= UINT32_C(0x8000007f)), 3046 3046 (N_("Extended leaves was present on saved state host, but is missing on the current\n"))); 3047 if (aRawExt[0]. eax >= UINT32_C(0x80000001) && aRawExt[0].eax <= UINT32_C(0x8000007f))3047 if (aRawExt[0].uEax >= UINT32_C(0x80000001) && aRawExt[0].uEax <= UINT32_C(0x8000007f)) 3048 3048 { 3049 CPUID_CHECK_RET( aHostRawExt[0]. ebx == aRawExt[0].ebx3050 && aHostRawExt[0]. ecx == aRawExt[0].ecx3051 && aHostRawExt[0]. edx == aRawExt[0].edx,3049 CPUID_CHECK_RET( aHostRawExt[0].uEbx == aRawExt[0].uEbx 3050 && aHostRawExt[0].uEcx == aRawExt[0].uEcx 3051 && aHostRawExt[0].uEdx == aRawExt[0].uEdx, 3052 3052 (N_("CPU vendor mismatch: host='%.4s%.4s%.4s' saved='%.4s%.4s%.4s'"), 3053 &aHostRawExt[0]. ebx, &aHostRawExt[0].edx, &aHostRawExt[0].ecx,3054 &aRawExt[0]. ebx, &aRawExt[0].edx, &aRawExt[0].ecx));3055 CPUID_CHECK2_WRN("Ext CPUID max leaf", aHostRawExt[0]. eax, aRawExt[0].eax);3053 &aHostRawExt[0].uEbx, &aHostRawExt[0].uEdx, &aHostRawExt[0].uEcx, 3054 &aRawExt[0].uEbx, &aRawExt[0].uEdx, &aRawExt[0].uEcx)); 3055 CPUID_CHECK2_WRN("Ext CPUID max leaf", aHostRawExt[0].uEax, aRawExt[0].uEax); 3056 3056 3057 3057 /* CPUID(0x80000001).eax - same as CPUID(0).eax. */ 3058 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawExt[1]. eax), ASMGetCpuFamily(aRawExt[1].eax));3059 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawExt[1]. eax, fIntel), ASMGetCpuModel(aRawExt[1].eax, fIntel));3060 CPUID_CHECK2_WRN("CPU type", (aHostRawExt[1]. eax >> 12) & 3, (aRawExt[1].eax >> 12) & 3 );3061 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1]. eax >> 14) & 3, (aRawExt[1].eax >> 14) & 3 );3062 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1]. eax >> 28, aRawExt[1].eax >> 28);3058 CPUID_CHECK2_RET("CPU family", ASMGetCpuFamily(aHostRawExt[1].uEax), ASMGetCpuFamily(aRawExt[1].uEax)); 3059 CPUID_CHECK2_RET("CPU model", ASMGetCpuModel(aHostRawExt[1].uEax, fIntel), ASMGetCpuModel(aRawExt[1].uEax, fIntel)); 3060 CPUID_CHECK2_WRN("CPU type", (aHostRawExt[1].uEax >> 12) & 3, (aRawExt[1].uEax >> 12) & 3 ); 3061 CPUID_CHECK2_WRN("Reserved bits 15:14", (aHostRawExt[1].uEax >> 14) & 3, (aRawExt[1].uEax >> 14) & 3 ); 3062 CPUID_CHECK2_WRN("Reserved bits 31:28", aHostRawExt[1].uEax >> 28, aRawExt[1].uEax >> 28); 3063 3063 3064 3064 /* CPUID(0x80000001).ebx - Brand ID (maybe), just warn if things differs. */ 3065 CPUID_CHECK2_WRN("CPU BrandID", aHostRawExt[1]. ebx & 0xffff, aRawExt[1].ebx & 0xffff);3066 CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1]. ebx >> 16) & 0xfff, (aRawExt[1].ebx >> 16) & 0xfff);3067 CPUID_CHECK2_WRN("PkgType", (aHostRawExt[1]. ebx >> 28) & 0xf, (aRawExt[1].ebx >> 28) & 0xf);3065 CPUID_CHECK2_WRN("CPU BrandID", aHostRawExt[1].uEbx & 0xffff, aRawExt[1].uEbx & 0xffff); 3066 CPUID_CHECK2_WRN("Reserved bits 16:27", (aHostRawExt[1].uEbx >> 16) & 0xfff, (aRawExt[1].uEbx >> 16) & 0xfff); 3067 CPUID_CHECK2_WRN("PkgType", (aHostRawExt[1].uEbx >> 28) & 0xf, (aRawExt[1].uEbx >> 28) & 0xf); 3068 3068 3069 3069 /* CPUID(0x80000001).ecx */ 3070 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF);3071 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL);3072 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM);3073 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);3074 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L);3075 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM);3076 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A);3077 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);3078 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);3079 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW);3080 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS);3081 CPUID_RAW_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5);3082 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT);3083 CPUID_RAW_FEATURE_IGN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT);3084 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));3085 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));3086 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));3087 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));3088 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));3089 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));3090 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));3091 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));3092 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));3093 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));3094 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));3095 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));3096 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));3097 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));3098 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));3099 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));3100 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));3101 CPUID_RAW_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));3070 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); 3071 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL); 3072 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM); 3073 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC); 3074 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L); 3075 CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM); 3076 CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); 3077 CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE); 3078 CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF); 3079 CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW); 3080 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS); 3081 CPUID_RAW_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE5); 3082 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); 3083 CPUID_RAW_FEATURE_IGN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT); 3084 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14)); 3085 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15)); 3086 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16)); 3087 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17)); 3088 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18)); 3089 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19)); 3090 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20)); 3091 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21)); 3092 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22)); 3093 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23)); 3094 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24)); 3095 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25)); 3096 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26)); 3097 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27)); 3098 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28)); 3099 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29)); 3100 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30)); 3101 CPUID_RAW_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31)); 3102 3102 3103 3103 /* CPUID(0x80000001).edx */ 3104 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FPU);3105 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_VME);3106 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_DE);3107 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE);3108 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_TSC);3109 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MSR);3110 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAE);3111 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCE);3112 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CX8);3113 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_APIC);3114 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(10) /*reserved*/);3115 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SEP);3116 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MTRR);3117 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PGE);3118 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MCA);3119 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_CMOV);3120 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PAT);3121 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_PSE36);3122 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(18) /*reserved*/);3123 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(19) /*reserved*/);3124 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);3125 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(21) /*reserved*/);3126 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);3127 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_MMX);3128 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FXSR);3129 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);3130 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);3131 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);3132 CPUID_RAW_FEATURE_IGN(Ext, edx, RT_BIT_32(28) /*reserved*/);3133 CPUID_RAW_FEATURE_IGN(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);3134 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);3135 CPUID_RAW_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);3104 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU); 3105 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_VME); 3106 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_DE); 3107 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE); 3108 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC); 3109 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR); 3110 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE); 3111 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE); 3112 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8); 3113 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC); 3114 CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(10) /*reserved*/); 3115 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SEP); 3116 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR); 3117 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE); 3118 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA); 3119 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV); 3120 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT); 3121 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36); 3122 CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(18) /*reserved*/); 3123 CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(19) /*reserved*/); 3124 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX); 3125 CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(21) /*reserved*/); 3126 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX); 3127 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX); 3128 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR); 3129 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 3130 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB); 3131 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 3132 CPUID_RAW_FEATURE_IGN(Ext, uEdx, RT_BIT_32(28) /*reserved*/); 3133 CPUID_RAW_FEATURE_IGN(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 3134 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 3135 CPUID_RAW_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 3136 3136 3137 3137 /** @todo verify the rest as well. */ … … 3158 3158 3159 3159 /* CPUID(1).ecx */ 3160 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU3161 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU?3162 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU?3163 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_MONITOR);3164 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU?3165 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU3166 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU3167 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_EST); // -> EMU3168 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU?3169 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU3170 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU3171 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(11) /*reserved*/ );3172 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this?3173 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU?3174 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU3175 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU3176 CPUID_GST_FEATURE_RET(Std, ecx, RT_BIT_32(16) /*reserved*/);3177 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_PCID);3178 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU?3179 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU3180 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU3181 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_X2APIC);3182 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU3183 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU3184 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_TSCDEADL);3185 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AES); // -> EMU3186 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU3187 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU3188 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU?3189 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_F16C);3190 CPUID_GST_FEATURE_RET(Std, ecx, X86_CPUID_FEATURE_ECX_RDRAND);3191 CPUID_GST_FEATURE_IGN(Std, ecx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host3160 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE3); // -> EMU 3161 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCLMUL); // -> EMU? 3162 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DTES64); // -> EMU? 3163 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_MONITOR); 3164 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CPLDS); // -> EMU? 3165 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_VMX); // -> EMU 3166 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SMX); // -> EMU 3167 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_EST); // -> EMU 3168 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TM2); // -> EMU? 3169 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSSE3); // -> EMU 3170 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CNTXID); // -> EMU 3171 CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(11) /*reserved*/ ); 3172 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_FMA); // -> EMU? what's this? 3173 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_CX16); // -> EMU? 3174 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TPRUPDATE);//-> EMU 3175 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PDCM); // -> EMU 3176 CPUID_GST_FEATURE_RET(Std, uEcx, RT_BIT_32(16) /*reserved*/); 3177 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_PCID); 3178 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_DCA); // -> EMU? 3179 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_1); // -> EMU 3180 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_SSE4_2); // -> EMU 3181 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_X2APIC); 3182 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_MOVBE); // -> EMU 3183 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_POPCNT); // -> EMU 3184 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_TSCDEADL); 3185 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AES); // -> EMU 3186 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_XSAVE); // -> EMU 3187 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_OSXSAVE); // -> EMU 3188 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_AVX); // -> EMU? 3189 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_F16C); 3190 CPUID_GST_FEATURE_RET(Std, uEcx, X86_CPUID_FEATURE_ECX_RDRAND); 3191 CPUID_GST_FEATURE_IGN(Std, uEcx, X86_CPUID_FEATURE_ECX_HVP); // Normally not set by host 3192 3192 3193 3193 /* CPUID(1).edx */ 3194 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FPU);3195 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_VME);3196 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DE); // -> EMU?3197 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE);3198 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU3199 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU3200 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PAE);3201 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCE);3202 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?3203 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_APIC);3204 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(10) /*reserved*/);3205 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_SEP);3206 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MTRR);3207 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PGE);3208 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_MCA);3209 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU3210 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PAT);3211 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSE36);3212 CPUID_GST_FEATURE_IGN(Std, edx, X86_CPUID_FEATURE_EDX_PSN);3213 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU3214 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(20) /*reserved*/);3215 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_DS); // -> EMU?3216 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU?3217 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU3218 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU3219 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU3220 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU3221 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_SS); // -> EMU?3222 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU?3223 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_TM); // -> EMU?3224 CPUID_GST_FEATURE_RET(Std, edx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU3225 CPUID_GST_FEATURE_RET(Std, edx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU?3194 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FPU); 3195 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_VME); 3196 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DE); // -> EMU? 3197 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE); 3198 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TSC); // -> EMU 3199 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MSR); // -> EMU 3200 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PAE); 3201 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCE); 3202 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CX8); // -> EMU? 3203 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_APIC); 3204 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(10) /*reserved*/); 3205 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_SEP); 3206 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MTRR); 3207 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PGE); 3208 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_MCA); 3209 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU 3210 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PAT); 3211 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSE36); 3212 CPUID_GST_FEATURE_IGN(Std, uEdx, X86_CPUID_FEATURE_EDX_PSN); 3213 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_CLFSH); // -> EMU 3214 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(20) /*reserved*/); 3215 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_DS); // -> EMU? 3216 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_ACPI); // -> EMU? 3217 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_MMX); // -> EMU 3218 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU 3219 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE); // -> EMU 3220 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SSE2); // -> EMU 3221 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_SS); // -> EMU? 3222 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_HTT); // -> EMU? 3223 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_TM); // -> EMU? 3224 CPUID_GST_FEATURE_RET(Std, uEdx, RT_BIT_32(30) /*JMPE/IA64*/); // -> EMU 3225 CPUID_GST_FEATURE_RET(Std, uEdx, X86_CPUID_FEATURE_EDX_PBE); // -> EMU? 3226 3226 3227 3227 /* CPUID(0x80000000). */ … … 3231 3231 { 3232 3232 /** @todo deal with no 0x80000001 on the host. */ 3233 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0]. ebx, aHostRawStd[0].ecx, aHostRawStd[0].edx);3234 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0]. ebx, aGuestCpuIdExt[0].ecx, aGuestCpuIdExt[0].edx);3233 bool const fHostAmd = ASMIsAmdCpuEx(aHostRawStd[0].uEbx, aHostRawStd[0].uEcx, aHostRawStd[0].uEdx); 3234 bool const fGuestAmd = ASMIsAmdCpuEx(aGuestCpuIdExt[0].uEbx, aGuestCpuIdExt[0].uEcx, aGuestCpuIdExt[0].uEdx); 3235 3235 3236 3236 /* CPUID(0x80000001).ecx */ 3237 CPUID_GST_FEATURE_WRN(Ext, ecx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU3238 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU3239 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU3240 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ???3241 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU3242 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU3243 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU3244 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU3245 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU3246 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU?3247 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU3248 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SSE5); // -> EMU3249 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU3250 CPUID_GST_AMD_FEATURE_RET(Ext, ecx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU3251 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(14));3252 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(15));3253 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(16));3254 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(17));3255 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(18));3256 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(19));3257 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(20));3258 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(21));3259 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(22));3260 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(23));3261 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(24));3262 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(25));3263 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(26));3264 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(27));3265 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(28));3266 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(29));3267 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(30));3268 CPUID_GST_AMD_FEATURE_WRN(Ext, ecx, RT_BIT_32(31));3237 CPUID_GST_FEATURE_WRN(Ext, uEcx, X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF); // -> EMU 3238 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CMPL); // -> EMU 3239 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SVM); // -> EMU 3240 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_EXT_APIC);// ??? 3241 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_CR8L); // -> EMU 3242 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_ABM); // -> EMU 3243 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE4A); // -> EMU 3244 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_MISALNSSE);//-> EMU 3245 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_3DNOWPRF);// -> EMU 3246 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_OSVW); // -> EMU? 3247 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_IBS); // -> EMU 3248 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SSE5); // -> EMU 3249 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_SKINIT); // -> EMU 3250 CPUID_GST_AMD_FEATURE_RET(Ext, uEcx, X86_CPUID_AMD_FEATURE_ECX_WDT); // -> EMU 3251 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(14)); 3252 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(15)); 3253 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(16)); 3254 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(17)); 3255 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(18)); 3256 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(19)); 3257 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(20)); 3258 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(21)); 3259 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(22)); 3260 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(23)); 3261 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(24)); 3262 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(25)); 3263 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(26)); 3264 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(27)); 3265 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(28)); 3266 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(29)); 3267 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(30)); 3268 CPUID_GST_AMD_FEATURE_WRN(Ext, uEcx, RT_BIT_32(31)); 3269 3269 3270 3270 /* CPUID(0x80000001).edx */ 3271 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU3272 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU3273 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU3274 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE);3275 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU3276 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU3277 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE);3278 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE);3279 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU?3280 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC);3281 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(10) /*reserved*/);3282 CPUID_GST_FEATURE_IGN( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only.3283 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR);3284 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE);3285 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA);3286 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU3287 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT);3288 CPUID_GST_FEATURE2_IGN( edx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36);3289 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(18) /*reserved*/);3290 CPUID_GST_AMD_FEATURE_WRN(Ext, edx, RT_BIT_32(19) /*reserved*/);3291 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_NX);3292 CPUID_GST_FEATURE_WRN( Ext, edx, RT_BIT_32(21) /*reserved*/);3293 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_AMD_FEATURE_EDX_AXMMX);3294 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU3295 CPUID_GST_FEATURE2_RET( edx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU3296 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_FFXSR);3297 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB);3298 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP);3299 CPUID_GST_FEATURE_IGN( Ext, edx, RT_BIT_32(28) /*reserved*/);3300 CPUID_GST_FEATURE_RET( Ext, edx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE);3301 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX);3302 CPUID_GST_AMD_FEATURE_RET(Ext, edx, X86_CPUID_AMD_FEATURE_EDX_3DNOW);3271 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FPU, X86_CPUID_FEATURE_EDX_FPU); // -> EMU 3272 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_VME, X86_CPUID_FEATURE_EDX_VME); // -> EMU 3273 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_DE, X86_CPUID_FEATURE_EDX_DE); // -> EMU 3274 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE, X86_CPUID_FEATURE_EDX_PSE); 3275 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_TSC, X86_CPUID_FEATURE_EDX_TSC); // -> EMU 3276 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MSR, X86_CPUID_FEATURE_EDX_MSR); // -> EMU 3277 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAE, X86_CPUID_FEATURE_EDX_PAE); 3278 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCE, X86_CPUID_FEATURE_EDX_MCE); 3279 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CX8, X86_CPUID_FEATURE_EDX_CX8); // -> EMU? 3280 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_APIC, X86_CPUID_FEATURE_EDX_APIC); 3281 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(10) /*reserved*/); 3282 CPUID_GST_FEATURE_IGN( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_SYSCALL); // On Intel: long mode only. 3283 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MTRR, X86_CPUID_FEATURE_EDX_MTRR); 3284 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PGE, X86_CPUID_FEATURE_EDX_PGE); 3285 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_MCA, X86_CPUID_FEATURE_EDX_MCA); 3286 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_CMOV, X86_CPUID_FEATURE_EDX_CMOV); // -> EMU 3287 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PAT, X86_CPUID_FEATURE_EDX_PAT); 3288 CPUID_GST_FEATURE2_IGN( uEdx, X86_CPUID_AMD_FEATURE_EDX_PSE36, X86_CPUID_FEATURE_EDX_PSE36); 3289 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(18) /*reserved*/); 3290 CPUID_GST_AMD_FEATURE_WRN(Ext, uEdx, RT_BIT_32(19) /*reserved*/); 3291 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_NX); 3292 CPUID_GST_FEATURE_WRN( Ext, uEdx, RT_BIT_32(21) /*reserved*/); 3293 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_AXMMX); 3294 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_MMX, X86_CPUID_FEATURE_EDX_MMX); // -> EMU 3295 CPUID_GST_FEATURE2_RET( uEdx, X86_CPUID_AMD_FEATURE_EDX_FXSR, X86_CPUID_FEATURE_EDX_FXSR); // -> EMU 3296 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_FFXSR); 3297 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_PAGE1GB); 3298 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_RDTSCP); 3299 CPUID_GST_FEATURE_IGN( Ext, uEdx, RT_BIT_32(28) /*reserved*/); 3300 CPUID_GST_FEATURE_RET( Ext, uEdx, X86_CPUID_EXT_FEATURE_EDX_LONG_MODE); 3301 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW_EX); 3302 CPUID_GST_AMD_FEATURE_RET(Ext, uEdx, X86_CPUID_AMD_FEATURE_EDX_3DNOW); 3303 3303 } 3304 3304 … … 3346 3346 3347 3347 /** 3348 * Gets a pointer to the default CPUID leaf. 3349 * 3350 * @returns Raw-mode pointer to the default CPUID leaf (read-only). 3351 * @param pVM Pointer to the VM. 3352 * @remark Intended for PATM only. 3353 */ 3354 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM) 3355 { 3356 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdPatmDef); 3357 } 3358 3359 3360 /** 3361 * Gets a pointer to the CPUID leaf array. 3362 * 3363 * @returns Raw-mode pointer to the CPUID leaf array. 3364 * @param pVM Pointer to the VM. 3365 * @remark Intended for PATM only. 3366 */ 3367 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayRCPtr(PVM pVM) 3368 { 3369 Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 3370 return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC; 3371 } 3372 3373 3374 /** 3375 * Gets a pointer to the CPUID leaf array. 3376 * 3377 * @returns Raw-mode pointer to the end of CPUID leaf array (exclusive). 3378 * @param pVM Pointer to the VM. 3379 * @remark Intended for PATM only. 3380 */ 3381 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUIDLEAF)) CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(PVM pVM) 3382 { 3383 Assert(MMHyperRCToR3(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesRC) == pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 3384 return pVM->cpum.s.GuestInfo.paCpuIdLeavesRC 3385 + pVM->cpum.s.GuestInfo.cCpuIdLeaves * sizeof(CPUMCPUIDLEAF); 3386 } 3387 3388 3389 /** 3390 * Gets the unknown CPUID leaf method. 3391 * 3392 * @returns Unknown CPUID leaf method. 3393 * @param pVM Pointer to the VM. 3394 * @remark Intended for PATM only. 3395 */ 3396 VMMR3_INT_DECL(CPUMUKNOWNCPUID) CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(PVM pVM) 3397 { 3398 return pVM->cpum.s.GuestInfo.enmUnknownCpuIdMethod; 3399 } 3400 3401 3402 3403 /** 3348 3404 * Gets a number of standard CPUID leafs (PATM only). 3349 3405 * 3350 3406 * @returns Number of leafs. 3351 3407 * @param pVM Pointer to the VM. 3352 * @remark Intended for PATM .3408 * @remark Intended for PATM - legacy, don't use in new code. 3353 3409 */ 3354 3410 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmStdMax(PVM pVM) … … 3363 3419 * @returns Number of leafs. 3364 3420 * @param pVM Pointer to the VM. 3365 * @remark Intended for PATM .3421 * @remark Intended for PATM - legacy, don't use in new code. 3366 3422 */ 3367 3423 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmExtMax(PVM pVM) … … 3376 3432 * @returns Number of leafs. 3377 3433 * @param pVM Pointer to the VM. 3378 * @remark Intended for PATM .3434 * @remark Intended for PATM - legacy, don't use in new code. 3379 3435 */ 3380 3436 VMMR3_INT_DECL(uint32_t) CPUMR3GetGuestCpuIdPatmCentaurMax(PVM pVM) … … 3389 3445 * CPUMR3GetGuestCpuIdStdMax() give the size of the array. 3390 3446 * 3391 * @returns Pointer to the standard CPUID leaves (read-only).3447 * @returns Raw-mode pointer to the standard CPUID leaves (read-only). 3392 3448 * @param pVM Pointer to the VM. 3393 * @remark Intended for PATM .3449 * @remark Intended for PATM - legacy, don't use in new code. 3394 3450 */ 3395 3451 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmStdRCPtr(PVM pVM) … … 3404 3460 * CPUMGetGuestCpuIdExtMax() give the size of the array. 3405 3461 * 3406 * @returns Pointer to the extended CPUID leaves (read-only).3462 * @returns Raw-mode pointer to the extended CPUID leaves (read-only). 3407 3463 * @param pVM Pointer to the VM. 3408 * @remark Intended for PATM .3464 * @remark Intended for PATM - legacy, don't use in new code. 3409 3465 */ 3410 3466 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmExtRCPtr(PVM pVM) … … 3419 3475 * CPUMGetGuestCpuIdCentaurMax() give the size of the array. 3420 3476 * 3421 * @returns Pointer to the centaur CPUID leaves (read-only).3477 * @returns Raw-mode pointer to the centaur CPUID leaves (read-only). 3422 3478 * @param pVM Pointer to the VM. 3423 * @remark Intended for PATM .3479 * @remark Intended for PATM - legacy, don't use in new code. 3424 3480 */ 3425 3481 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmCentaurRCPtr(PVM pVM) 3426 3482 { 3427 3483 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0]); 3428 }3429 3430 3431 /**3432 * Gets a pointer to the default CPUID leaf.3433 *3434 * @returns Pointer to the default CPUID leaf (read-only).3435 * @param pVM Pointer to the VM.3436 * @remark Intended for PATM.3437 */3438 VMMR3_INT_DECL(RCPTRTYPE(PCCPUMCPUID)) CPUMR3GetGuestCpuIdPatmDefRCPtr(PVM pVM)3439 {3440 return (RCPTRTYPE(PCCPUMCPUID))VM_RC_ADDR(pVM, &pVM->cpum.s.GuestCpuIdPatmDef);3441 3484 } 3442 3485 -
trunk/src/VBox/VMM/VMMR3/PATM.cpp
r54687 r54714 748 748 749 749 /* 750 * Apply fixups 750 * Apply fixups. 751 751 */ 752 PRELOCREC pRec = 0; 753 AVLPVKEY key = 0; 754 755 while (true) 756 { 757 /* Get the record that's closest from above */ 758 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true); 759 if (pRec == 0) 752 AVLPVKEY key = NULL; 753 for (;;) 754 { 755 /* Get the record that's closest from above (after or equal to key). */ 756 PRELOCREC pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true); 757 if (!pRec) 760 758 break; 761 759 762 key = ( AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */760 key = (uint8_t *)pRec->Core.Key + 1; /* search for the next record during the next round. */ 763 761 764 762 switch (pRec->uType) 765 763 { 764 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL: 765 Assert(pRec->pDest == pRec->pSource); 766 Log(("Absolute patch template fixup type %#x at %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos)); 767 *(RTRCUINTPTR *)pRec->pRelocPos += delta; 768 break; 769 766 770 case FIXUP_ABSOLUTE: 767 771 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos)); … … 2648 2652 if (fAddFixup) 2649 2653 { 2650 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS) 2654 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, 2655 pPatch->pPatchJumpDestGC) != VINF_SUCCESS) 2651 2656 { 2652 2657 Log(("Relocation failed for the jump in the guest code!!\n")); … … 2664 2669 if (fAddFixup) 2665 2670 { 2666 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS) 2671 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, 2672 pPatch->pPatchJumpDestGC) != VINF_SUCCESS) 2667 2673 { 2668 2674 Log(("Relocation failed for the jump in the guest code!!\n")); … … 2689 2695 if (fAddFixup) 2690 2696 { 2691 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS) 2697 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, 2698 PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS) 2692 2699 { 2693 2700 Log(("Relocation failed for the jump in the guest code!!\n")); … … 2783 2790 if (fAddFixup) 2784 2791 { 2785 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS) 2792 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, 2793 pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS) 2786 2794 { 2787 2795 Log(("Relocation failed for the jump in the guest code!!\n")); -
trunk/src/VBox/VMM/VMMR3/PATMA.asm
r54692 r54714 32 32 %include "VBox/err.mac" 33 33 %include "iprt/x86.mac" 34 %include "VBox/vmm/cpum.mac" 34 35 %include "VBox/vmm/vm.mac" 35 36 %include "PATMA.mac" … … 1723 1724 ; 1724 1725 BEGIN_PATCH g_patmCpuidRecord, PATMCpuidReplacement 1726 not dword [esp-16] ; probe stack before starting, just in case. 1727 not dword [esp-16] 1725 1728 mov dword [ss:PATM_INTERRUPTFLAG], 0 1726 1729 PATCH_FIXUP PATM_INTERRUPTFLAG 1727 1730 pushf 1728 1731 1729 cmp eax, PATM_CPUID_STD_MAX 1730 PATCH_FIXUP PATM_CPUID_STD_MAX 1731 jb cpuid_std 1732 cmp eax, 0x80000000 1733 jb cpuid_def 1734 cmp eax, PATM_CPUID_EXT_MAX 1735 PATCH_FIXUP PATM_CPUID_EXT_MAX 1736 jb cpuid_ext 1737 cmp eax, 0xc0000000 1738 jb cpuid_def 1739 cmp eax, PATM_CPUID_CENTAUR_MAX 1740 PATCH_FIXUP PATM_CPUID_CENTAUR_MAX 1741 jb cpuid_centaur 1742 1743 cpuid_def: 1744 mov eax, PATM_CPUID_DEF_PTR 1732 ;; @todo We could put all this stuff in a CPUM assembly function can simply call it. 1733 1734 ; Save the registers we use for passthru and sub-leaf matching (eax is not used). 1735 push edx 1736 push ecx 1737 push ebx 1738 1739 ; 1740 ; Perform a linear search of the strictly sorted CPUID leaf array. 1741 ; 1742 ; (Was going to do a binary search, but that ended up being complicated if 1743 ; we want a flexible leaf size. Linear search is probably good enough.) 1744 ; 1745 mov ebx, PATM_CPUID_ARRAY_PTR 1746 PATCH_FIXUP PATM_CPUID_ARRAY_PTR 1747 mov edx, PATM_CPUID_ARRAY_END_PTR 1748 PATCH_FIXUP PATM_CPUID_ARRAY_END_PTR 1749 cmp ebx, edx 1750 jae cpuid_unknown 1751 1752 cpuid_lookup_leaf: 1753 cmp eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf] 1754 jbe cpuid_maybe_match_eax 1755 add ebx, PATM_CPUID_ARRAY_ENTRY_SIZE 1756 PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE 1757 cmp ebx, edx 1758 jb cpuid_lookup_leaf 1759 jmp cpuid_unknown 1760 1761 cpuid_maybe_match_eax: 1762 jne cpuid_unknown 1763 1764 ; Sub-leaf match too? 1765 mov ecx, [esp + 4] 1766 and ecx, [ss:ebx + CPUMCPUIDLEAF.fSubLeafMask] 1767 cmp ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf] 1768 je cpuid_fetch 1769 1770 ; Search forward until we've got a matching sub-leaf (or not). 1771 cpuid_subleaf_lookup: 1772 add ebx, PATM_CPUID_ARRAY_ENTRY_SIZE 1773 PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE 1774 cmp ebx, edx 1775 jae cpuid_subleaf_not_found_sub_ebx 1776 cmp eax, [ss:ebx + CPUMCPUIDLEAF.uLeaf] 1777 jne cpuid_subleaf_not_found_sub_ebx 1778 cmp ecx, [ss:ebx + CPUMCPUIDLEAF.uSubLeaf] 1779 ja cpuid_subleaf_lookup 1780 je cpuid_fetch 1781 cpuid_subleaf_not_found_sub_ebx: 1782 sub ebx, PATM_CPUID_ARRAY_ENTRY_SIZE 1783 PATCH_FIXUP PATM_CPUID_ARRAY_ENTRY_SIZE 1784 1785 ; 1786 ; Out of range sub-leafs aren't quite as easy and pretty as we emulate them 1787 ; here, but we do an adequate job. 1788 ; 1789 cpuid_subleaf_not_found: 1790 mov ecx, [esp + 4] 1791 test dword [ss:ebx + CPUMCPUIDLEAF.fFlags], CPUMCPUIDLEAF_F_SUBLEAVES_ECX_UNCHANGED 1792 jnz cpuid_load_zeros_except_ecx 1793 cpuid_load_zeros: 1794 xor ecx, ecx 1795 cpuid_load_zeros_except_ecx: 1796 xor edx, edx 1797 xor eax, eax 1798 xor ebx, ebx 1799 jmp cpuid_done 1800 1801 ; 1802 ; Different CPUs have different ways of dealing with unknown CPUID leaves. 1803 ; 1804 cpuid_unknown: 1805 mov edx, PATM_CPUID_UNKNOWN_METHOD 1806 PATCH_FIXUP PATM_CPUID_UNKNOWN_METHOD 1807 cmp edx, CPUMUKNOWNCPUID_PASSTHRU 1808 je cpuid_unknown_passthru 1809 ; Load the default cpuid leaf. 1810 cpuid_unknown_def_leaf: 1811 mov ebx, PATM_CPUID_DEF_PTR 1745 1812 PATCH_FIXUP PATM_CPUID_DEF_PTR 1746 jmp cpuid_fetch 1747 1748 cpuid_std: 1749 mov edx, PATM_CPUID_STD_PTR 1750 PATCH_FIXUP PATM_CPUID_STD_PTR 1751 jmp cpuid_calc 1752 1753 cpuid_ext: 1754 and eax, 0ffh 1755 mov edx, PATM_CPUID_EXT_PTR 1756 PATCH_FIXUP PATM_CPUID_EXT_PTR 1757 jmp cpuid_calc 1758 1759 cpuid_centaur: 1760 and eax, 0ffh 1761 mov edx, PATM_CPUID_CENTAUR_PTR 1762 PATCH_FIXUP PATM_CPUID_CENTAUR_PTR 1763 1764 cpuid_calc: 1765 lea eax, [ss:eax * 4] ; 4 entries... 1766 lea eax, [ss:eax * 4] ; 4 bytes each 1767 add eax, edx 1768 1813 mov edx, [ss:ebx + CPUMCPUID.uEdx] 1814 mov ecx, [ss:ebx + CPUMCPUID.uEcx] 1815 mov eax, [ss:ebx + CPUMCPUID.uEax] 1816 mov ebx, [ss:ebx + CPUMCPUID.uEbx] 1817 jmp cpuid_done 1818 ; Pass thru the input values unmodified (eax is still virgin). 1819 cpuid_unknown_passthru: 1820 mov edx, [esp + 8] 1821 mov ecx, [esp + 4] 1822 mov ebx, [esp] 1823 jmp cpuid_done 1824 1825 ; 1826 ; Normal return. 1827 ; 1769 1828 cpuid_fetch: 1770 mov edx, [ss:eax + 12] ; CPUMCPUID layout assumptions! 1771 mov ecx, [ss:eax + 8] 1772 mov ebx, [ss:eax + 4] 1773 mov eax, [ss:eax] 1774 1829 mov edx, [ss:ebx + CPUMCPUIDLEAF.uEdx] 1830 mov ecx, [ss:ebx + CPUMCPUIDLEAF.uEcx] 1831 mov eax, [ss:ebx + CPUMCPUIDLEAF.uEax] 1832 mov ebx, [ss:ebx + CPUMCPUIDLEAF.uEbx] 1833 1834 cpuid_done: 1835 add esp, 12 1775 1836 popf 1776 1837 mov dword [ss:PATM_INTERRUPTFLAG], 1 -
trunk/src/VBox/VMM/VMMR3/PATMA.mac
r54686 r54714 20 20 21 21 ;; @name Patch Fixup Types 22 ; @remarks These fixups types are part of the saved state. 22 23 ; @{ 23 24 %define PATM_VMFLAGS 0xF1ABCD00 … … 52 53 %define PATM_CALL_RETURN_ADDR 0xF1ABCD19 53 54 %define PATM_CPUID_CENTAUR_PTR 0xF1ABCD1a 55 %define PATM_CPUID_ARRAY_PTR 0xF1ABCD1b 56 %define PATM_CPUID_ARRAY_END_PTR 0xF1ABCD1c 57 %define PATM_CPUID_ARRAY_ENTRY_SIZE 0xF1ABCD1d 58 %define PATM_CPUID_UNKNOWN_METHOD 0xF1ABCD1e 59 54 60 55 61 ;/* Anything larger doesn't require a fixup */ -
trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp
r54688 r54714 94 94 PRELOCREC pRec; 95 95 96 Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest)); 96 Assert( uType == FIXUP_ABSOLUTE 97 || (uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL && pSource == pDest && PATM_IS_FIXUP_TYPE(pSource)) 98 || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest)); 97 99 98 100 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest)); … … 162 164 #endif 163 165 164 /* *165 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH166 * A DIFFERENT HYPERVISOR LAYOUT.166 /* 167 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING 168 * A SAVED STATE WITH A DIFFERENT HYPERVISOR LAYOUT. 167 169 */ 168 170 switch (pAsmRecord->aRelocs[i].uType) 169 171 { 170 case PATM_VMFLAGS: 171 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags); 172 break; 173 174 case PATM_PENDINGACTION: 175 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction); 176 break; 177 178 case PATM_FIXUP: 179 /* Offset in aRelocs[i].uInfo is from the base of the function. */ 180 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo 181 + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC); 182 break; 172 /* 173 * PATMGCSTATE member fixups. 174 */ 175 case PATM_VMFLAGS: 176 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags); 177 break; 178 case PATM_PENDINGACTION: 179 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction); 180 break; 181 case PATM_STACKPTR: 182 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp); 183 break; 184 case PATM_INTERRUPTFLAG: 185 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF); 186 break; 187 case PATM_INHIBITIRQADDR: 188 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts); 189 break; 190 case PATM_TEMP_EAX: 191 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX); 192 break; 193 case PATM_TEMP_ECX: 194 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX); 195 break; 196 case PATM_TEMP_EDI: 197 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI); 198 break; 199 case PATM_TEMP_EFLAGS: 200 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags); 201 break; 202 case PATM_TEMP_RESTORE_FLAGS: 203 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags); 204 break; 205 case PATM_CALL_PATCH_TARGET_ADDR: 206 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr); 207 break; 208 case PATM_CALL_RETURN_ADDR: 209 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr); 210 break; 183 211 #ifdef VBOX_WITH_STATISTICS 184 case PATM_ALLPATCHCALLS: 185 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls); 186 break; 187 188 case PATM_IRETEFLAGS: 189 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags); 190 break; 191 192 case PATM_IRETCS: 193 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS); 194 break; 195 196 case PATM_IRETEIP: 197 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP); 198 break; 199 200 case PATM_PERPATCHCALLS: 201 dest = patmPatchQueryStatAddress(pVM, pPatch); 202 break; 212 case PATM_ALLPATCHCALLS: 213 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls); 214 break; 215 case PATM_IRETEFLAGS: 216 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags); 217 break; 218 case PATM_IRETCS: 219 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS); 220 break; 221 case PATM_IRETEIP: 222 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP); 223 break; 203 224 #endif 204 case PATM_STACKPTR: 205 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp); 206 break; 207 208 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd 209 * part to store the original return addresses. 210 */ 211 case PATM_STACKBASE: 212 dest = pVM->patm.s.pGCStackGC; 213 break; 214 215 case PATM_STACKBASE_GUEST: 216 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE; 217 break; 218 219 case PATM_RETURNADDR: /* absolute guest address; no fixup required */ 220 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP); 221 dest = pCallInfo->pReturnGC; 222 break; 223 224 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */ 225 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP); 226 227 /** @note hardcoded assumption that we must return to the instruction following this block */ 228 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction; 229 break; 230 231 case PATM_CALLTARGET: /* relative to patch address; no fixup required */ 232 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP); 233 234 /* Address must be filled in later. (see patmr3SetBranchTargets) */ 235 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL); 236 dest = PATM_ILLEGAL_DESTINATION; 237 break; 238 239 case PATM_PATCHBASE: /* Patch GC base address */ 240 dest = pVM->patm.s.pPatchMemGC; 241 break; 242 243 case PATM_CPUID_STD_PTR: 244 dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM); 245 break; 246 247 case PATM_CPUID_EXT_PTR: 248 dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM); 249 break; 250 251 case PATM_CPUID_CENTAUR_PTR: 252 dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM); 253 break; 254 255 case PATM_CPUID_DEF_PTR: 256 dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM); 257 break; 258 259 case PATM_CPUID_STD_MAX: 260 dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM); 261 break; 262 263 case PATM_CPUID_EXT_MAX: 264 dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM); 265 break; 266 267 case PATM_CPUID_CENTAUR_MAX: 268 dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM); 269 break; 270 271 case PATM_INTERRUPTFLAG: 272 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF); 273 break; 274 275 case PATM_INHIBITIRQADDR: 276 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts); 277 break; 278 279 case PATM_NEXTINSTRADDR: 280 Assert(pCallInfo); 281 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */ 282 dest = pCallInfo->pNextInstrGC; 283 break; 284 285 case PATM_CURINSTRADDR: 286 Assert(pCallInfo); 287 dest = pCallInfo->pCurInstrGC; 288 break; 289 290 case PATM_VM_FORCEDACTIONS: 291 /* @todo dirty assumptions when correcting this fixup during saved state loading. */ 292 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions); 293 break; 294 295 case PATM_TEMP_EAX: 296 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX); 297 break; 298 case PATM_TEMP_ECX: 299 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX); 300 break; 301 case PATM_TEMP_EDI: 302 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI); 303 break; 304 case PATM_TEMP_EFLAGS: 305 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags); 306 break; 307 case PATM_TEMP_RESTORE_FLAGS: 308 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags); 309 break; 310 case PATM_CALL_PATCH_TARGET_ADDR: 311 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr); 312 break; 313 case PATM_CALL_RETURN_ADDR: 314 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr); 315 break; 316 317 /* Relative address of global patm lookup and call function. */ 318 case PATM_LOOKUP_AND_CALL_FUNCTION: 319 { 320 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 321 Assert(pVM->patm.s.pfnHelperCallGC); 322 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 323 324 /* Relative value is target minus address of instruction after the actual call instruction. */ 325 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall; 326 break; 327 } 328 329 case PATM_RETURN_FUNCTION: 330 { 331 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 332 Assert(pVM->patm.s.pfnHelperRetGC); 333 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 334 335 /* Relative value is target minus address of instruction after the actual call instruction. */ 336 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall; 337 break; 338 } 339 340 case PATM_IRET_FUNCTION: 341 { 342 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 343 Assert(pVM->patm.s.pfnHelperIretGC); 344 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 345 346 /* Relative value is target minus address of instruction after the actual call instruction. */ 347 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall; 348 break; 349 } 350 351 case PATM_LOOKUP_AND_JUMP_FUNCTION: 352 { 353 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 354 Assert(pVM->patm.s.pfnHelperJumpGC); 355 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 356 357 /* Relative value is target minus address of instruction after the actual call instruction. */ 358 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall; 359 break; 360 } 361 362 default: 363 dest = PATM_ILLEGAL_DESTINATION; 364 AssertRelease(0); 365 break; 225 226 227 case PATM_FIXUP: 228 /* Offset in aRelocs[i].uInfo is from the base of the function. */ 229 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo 230 + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC); 231 break; 232 233 #ifdef VBOX_WITH_STATISTICS 234 case PATM_PERPATCHCALLS: 235 dest = patmPatchQueryStatAddress(pVM, pPatch); 236 break; 237 #endif 238 239 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd 240 * part to store the original return addresses. 241 */ 242 case PATM_STACKBASE: 243 dest = pVM->patm.s.pGCStackGC; 244 break; 245 246 case PATM_STACKBASE_GUEST: 247 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE; 248 break; 249 250 case PATM_RETURNADDR: /* absolute guest address; no fixup required */ 251 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP); 252 dest = pCallInfo->pReturnGC; 253 break; 254 255 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */ 256 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP); 257 258 /** @note hardcoded assumption that we must return to the instruction following this block */ 259 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction; 260 break; 261 262 case PATM_CALLTARGET: /* relative to patch address; no fixup required */ 263 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP); 264 265 /* Address must be filled in later. (see patmr3SetBranchTargets) */ 266 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL); 267 dest = PATM_ILLEGAL_DESTINATION; 268 break; 269 270 case PATM_PATCHBASE: /* Patch GC base address */ 271 dest = pVM->patm.s.pPatchMemGC; 272 break; 273 274 case PATM_NEXTINSTRADDR: 275 Assert(pCallInfo); 276 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */ 277 dest = pCallInfo->pNextInstrGC; 278 break; 279 280 case PATM_CURINSTRADDR: 281 Assert(pCallInfo); 282 dest = pCallInfo->pCurInstrGC; 283 break; 284 285 /* Relative address of global patm lookup and call function. */ 286 case PATM_LOOKUP_AND_CALL_FUNCTION: 287 { 288 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC 289 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 290 Assert(pVM->patm.s.pfnHelperCallGC); 291 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 292 293 /* Relative value is target minus address of instruction after the actual call instruction. */ 294 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall; 295 break; 296 } 297 298 case PATM_RETURN_FUNCTION: 299 { 300 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC 301 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 302 Assert(pVM->patm.s.pfnHelperRetGC); 303 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 304 305 /* Relative value is target minus address of instruction after the actual call instruction. */ 306 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall; 307 break; 308 } 309 310 case PATM_IRET_FUNCTION: 311 { 312 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC 313 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 314 Assert(pVM->patm.s.pfnHelperIretGC); 315 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 316 317 /* Relative value is target minus address of instruction after the actual call instruction. */ 318 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall; 319 break; 320 } 321 322 case PATM_LOOKUP_AND_JUMP_FUNCTION: 323 { 324 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC 325 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC); 326 Assert(pVM->patm.s.pfnHelperJumpGC); 327 Assert(sizeof(uint32_t) == sizeof(RTRCPTR)); 328 329 /* Relative value is target minus address of instruction after the actual call instruction. */ 330 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall; 331 break; 332 } 333 334 case PATM_CPUID_STD_MAX: /* saved state only */ 335 dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM); 336 break; 337 case PATM_CPUID_EXT_MAX: /* saved state only */ 338 dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM); 339 break; 340 case PATM_CPUID_CENTAUR_MAX: /* saved state only */ 341 dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM); 342 break; 343 344 /* 345 * The following fixups needs to be recalculated when loading saved state 346 * Note! Earlier saved state versions had different hacks for detecting these. 347 */ 348 case PATM_VM_FORCEDACTIONS: 349 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions); 350 break; 351 case PATM_CPUID_DEF_PTR: 352 dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM); 353 break; 354 case PATM_CPUID_ARRAY_PTR: 355 dest = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM); 356 break; 357 case PATM_CPUID_ARRAY_END_PTR: 358 dest = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM); 359 break; 360 case PATM_CPUID_ARRAY_ENTRY_SIZE: 361 dest = sizeof(CPUMCPUIDLEAF); 362 break; 363 case PATM_CPUID_UNKNOWN_METHOD: 364 dest = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM); 365 break; 366 367 case PATM_CPUID_STD_PTR: /* saved state only */ 368 dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM); 369 break; 370 case PATM_CPUID_EXT_PTR: /* saved state only */ 371 dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM); 372 break; 373 case PATM_CPUID_CENTAUR_PTR: /* saved state only */ 374 dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM); 375 break; 376 377 default: 378 dest = PATM_ILLEGAL_DESTINATION; 379 AssertReleaseFailed(); 380 break; 366 381 } 367 382 … … 369 384 if (pAsmRecord->aRelocs[i].uType < PATM_NO_FIXUP) 370 385 { 371 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE); 386 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL, 387 pAsmRecord->aRelocs[i].uType /*pSources*/, pAsmRecord->aRelocs[i].uType /*pDest*/); 372 388 } 373 389 break; -
trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp
r54688 r54714 561 561 rec.Core.Key = 0; 562 562 563 if (rec.uType == FIXUP_ABSOLUTE)564 {565 /* Core.Key abused to store the fixup type. */566 if (*pFixup == pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))567 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPU_FF_ACTION;568 else if (*pFixup == CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM))569 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_DEFAULT;570 else if (*pFixup == CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM))571 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_STANDARD;572 else if (*pFixup == CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM))573 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_EXTENDED;574 else if (*pFixup == CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM))575 rec.Core.Key = (AVLPVKEY)PATM_FIXUP_CPUID_CENTAUR;576 }577 563 578 564 /* Save the lookup record. */ … … 1117 1103 { 1118 1104 case FIXUP_ABSOLUTE: 1105 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL: 1119 1106 { 1120 if (pRec->pSource && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource)) 1107 Assert( pRec->uType != PATM_SAVED_STATE_VERSION_NO_RAW_MEM 1108 || (pRec->pSource == pRec->pDest && PATM_IS_FIXUP_TYPE(pRec->pSource)) ); 1109 1110 /* bird: What is this for exactly? Only the MMIO fixups used to have pSource set. */ 1111 if ( pRec->pSource 1112 && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource) 1113 && pRec->uType != FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL) 1121 1114 break; 1122 1115 … … 1255 1248 *pFixup = (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC; 1256 1249 } 1257 /* Boldly ASSUMES: 1250 /* 1251 * For PATM_SAVED_STATE_VERSION_FIXUP_HACK and earlier boldly ASSUME: 1258 1252 * 1. That pCPUMCtxGC is in the VM structure and that its location is 1259 1253 * at the first page of the same 4 MB chunk. 1260 1254 * 2. That the forced actions were in the first 32 bytes of the VM 1261 1255 * structure. 1262 * 3. That the CPUM leafs are less than 8KB into the structure. */ 1256 * 3. That the CPUM leafs are less than 8KB into the structure. 1257 */ 1263 1258 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK 1264 1259 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32)) … … 1266 1261 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", uFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions))); 1267 1262 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions); 1263 pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS; 1264 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1268 1265 } 1269 1266 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK … … 1272 1269 static int cCpuidFixup = 0; 1273 1270 1274 /* very dirty assumptions about the cpuid patch and cpuid ordering. */1271 /* Very dirty assumptions about the cpuid patch and cpuid ordering. */ 1275 1272 switch (cCpuidFixup & 3) 1276 1273 { 1277 1274 case 0: 1278 1275 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM); 1276 pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR; 1277 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1279 1278 break; 1280 1279 case 1: 1281 1280 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM); 1281 pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR; 1282 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1282 1283 break; 1283 1284 case 2: 1284 1285 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM); 1286 pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR; 1287 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1285 1288 break; 1286 1289 case 3: 1287 1290 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM); 1291 pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR; 1292 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1288 1293 break; 1289 1294 } … … 1291 1296 cCpuidFixup++; 1292 1297 } 1293 else if (uVersion >= PATM_SAVED_STATE_VERSION_MEM) 1298 /* 1299 * For PATM_SAVED_STATE_VERSION_MEM thru PATM_SAVED_STATE_VERSION_NO_RAW_MEM 1300 * we abused Core.Key to store the type for fixups needing correcting on load. 1301 */ 1302 else if ( uVersion >= PATM_SAVED_STATE_VERSION_MEM 1303 && uVersion <= PATM_SAVED_STATE_VERSION_NO_RAW_MEM) 1294 1304 { 1295 1305 /* Core.Key abused to store the type of fixup. */ … … 1298 1308 case PATM_FIXUP_CPU_FF_ACTION: 1299 1309 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions); 1310 pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS; 1311 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1300 1312 LogFlow(("Changing cpu ff action fixup from %x to %x\n", uFixup, *pFixup)); 1301 1313 break; 1302 1314 case PATM_FIXUP_CPUID_DEFAULT: 1303 1315 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM); 1316 pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR; 1317 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1304 1318 LogFlow(("Changing cpuid def fixup from %x to %x\n", uFixup, *pFixup)); 1305 1319 break; 1306 1320 case PATM_FIXUP_CPUID_STANDARD: 1307 1321 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM); 1322 pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR; 1323 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1308 1324 LogFlow(("Changing cpuid std fixup from %x to %x\n", uFixup, *pFixup)); 1309 1325 break; 1310 1326 case PATM_FIXUP_CPUID_EXTENDED: 1311 1327 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM); 1328 pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR; 1329 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1312 1330 LogFlow(("Changing cpuid ext fixup from %x to %x\n", uFixup, *pFixup)); 1313 1331 break; 1314 1332 case PATM_FIXUP_CPUID_CENTAUR: 1315 1333 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM); 1334 pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR; 1335 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL; 1316 1336 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", uFixup, *pFixup)); 1317 1337 break; … … 1319 1339 AssertMsgFailed(("Unexpected fixup value %p\n", (uintptr_t)pRec->Core.Key)); 1320 1340 break; 1341 } 1342 } 1343 /* 1344 * After PATM_SAVED_STATE_VERSION_NO_RAW_MEM we changed the fixup type 1345 * and instead put the patch fixup code in the source and target addresses. 1346 */ 1347 else if ( uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM 1348 && pRec->uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL) 1349 { 1350 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_FIXUP_TYPE(pRec->pSource)); 1351 switch (pRec->pSource) 1352 { 1353 case PATM_VM_FORCEDACTIONS: 1354 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions); 1355 break; 1356 case PATM_CPUID_DEF_PTR: 1357 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM); 1358 break; 1359 case PATM_CPUID_ARRAY_PTR: 1360 *pFixup = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM); 1361 break; 1362 case PATM_CPUID_ARRAY_END_PTR: 1363 *pFixup = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM); 1364 break; 1365 case PATM_CPUID_ARRAY_ENTRY_SIZE: 1366 *pFixup = sizeof(CPUMCPUIDLEAF); 1367 break; 1368 case PATM_CPUID_UNKNOWN_METHOD: 1369 *pFixup = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM); 1370 break; 1371 case PATM_CPUID_STD_PTR: /* Saved again patches only. */ 1372 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM); 1373 break; 1374 case PATM_CPUID_EXT_PTR: /* Saved again patches only. */ 1375 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM); 1376 break; 1377 case PATM_CPUID_CENTAUR_PTR: /* Saved again patches only. */ 1378 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM); 1379 break; 1321 1380 } 1322 1381 } -
trunk/src/VBox/VMM/include/PATMA.h
r54687 r54714 20 20 21 21 /** @name Patch Fixup Types 22 * @remarks These fixups types are part of the saved state. 22 23 * @{ */ 23 24 #define PATM_VMFLAGS 0xF1ABCD00 … … 34 35 #define PATM_FIXUP 0xF1ABCD07 35 36 #define PATM_PENDINGACTION 0xF1ABCD08 36 #define PATM_CPUID_STD_PTR 0xF1ABCD09 37 #define PATM_CPUID_EXT_PTR 0xF1ABCD0a 37 #define PATM_CPUID_STD_PTR 0xF1ABCD09 /**< Legacy, saved state only. */ 38 #define PATM_CPUID_EXT_PTR 0xF1ABCD0a /**< Legacy, saved state only. */ 38 39 #define PATM_CPUID_DEF_PTR 0xF1ABCD0b 39 40 #define PATM_STACKBASE 0xF1ABCD0c /**< Stack to store our private patch return addresses */ … … 51 52 #define PATM_CALL_PATCH_TARGET_ADDR 0xF1ABCD18 52 53 #define PATM_CALL_RETURN_ADDR 0xF1ABCD19 53 #define PATM_CPUID_CENTAUR_PTR 0xF1ABCD1a 54 #define PATM_CPUID_CENTAUR_PTR 0xF1ABCD1a /**< Legacy, saved state only. */ 55 #define PATM_CPUID_ARRAY_PTR 0xF1ABCD1b 56 #define PATM_CPUID_ARRAY_END_PTR 0xF1ABCD1c 57 #define PATM_CPUID_ARRAY_ENTRY_SIZE 0xF1ABCD1d 58 #define PATM_CPUID_UNKNOWN_METHOD 0xF1ABCD1e 54 59 55 60 /* Anything larger doesn't require a fixup */ … … 67 72 #define PATM_IRET_FUNCTION 0xF1ABCE0A /**< Relative address of global PATM iret function. */ 68 73 #define PATM_CPUID_CENTAUR_MAX 0xF1ABCE0B 74 75 /** Identifies an patch fixup type value (with reasonable accuracy). */ 76 #define PATM_IS_FIXUP_TYPE(a_uValue) \ 77 ( ((a_uValue) & UINT32_C(0xfffffC00)) == UINT32_C(0xF1ABCC00) && ((a_uValue) & UINT32_C(0xff)) < UINT32_C(0x30) ) 69 78 /** @} */ 70 79 -
trunk/src/VBox/VMM/include/PATMInternal.h
r54688 r54714 32 32 /** @name Saved state version numbers. 33 33 * @{ */ 34 /** New fixup type FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL. */ 35 #define PATM_SAVED_STATE_VERSION 57 34 36 /** Uses normal structure serialization with markers and everything. */ 35 #define PATM_SAVED_STATE_VERSION 37 #define PATM_SAVED_STATE_VERSION_NO_RAW_MEM 56 36 38 /** Last version which saves structures as raw memory. */ 37 39 #define PATM_SAVED_STATE_VERSION_MEM 55 … … 98 100 #define PATM_MAX_INVALID_WRITES 16384 99 101 102 /** @name FIXUP_XXX - RELOCREC::uType values. 103 * @{ */ 104 /** Absolute fixup. With one exception (MMIO cache), this does not take any 105 * source or destination. @sa FIXUP_ABSOLUTE_ASM. */ 100 106 #define FIXUP_ABSOLUTE 0 101 107 #define FIXUP_REL_JMPTOPATCH 1 102 108 #define FIXUP_REL_JMPTOGUEST 2 109 /** Absolute fixup in patch assembly code template. 110 * 111 * The source and desination addresses both set to the patch fixup type (see 112 * PATM_IS_FIXUP_TYPE and friends in PATMA.h). This is recent addition (CPUID 113 * subleaf code), so when loading older saved states this is usally represented 114 * as FIXUP_ABSOLUTE. */ 115 #define FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL 3 116 /** @} */ 117 103 118 104 119 #define PATM_ILLEGAL_DESTINATION 0xDEADBEEF -
trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp
r51288 r54714 4673 4673 szNameC, 4674 4674 CPUMR3CpuIdUnknownLeafMethodName(enmUnknownMethod), 4675 DefUnknown. eax,4676 DefUnknown. ebx,4677 DefUnknown. ecx,4678 DefUnknown. edx,4675 DefUnknown.uEax, 4676 DefUnknown.uEbx, 4677 DefUnknown.uEcx, 4678 DefUnknown.uEdx, 4679 4679 szMsrMask, 4680 4680 szNameC,
Note:
See TracChangeset
for help on using the changeset viewer.