Changeset 107650 in vbox
- Timestamp:
- Jan 10, 2025 1:42:28 PM (8 days ago)
- Location:
- trunk
- Files:
-
- 25 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum-armv8.h
r107389 r107650 197 197 198 198 199 /**200 * CPU ID registers.201 */202 typedef struct CPUMIDREGS203 {204 /** Content of the ID_AA64PFR0_EL1 register. */205 uint64_t u64RegIdAa64Pfr0El1;206 /** Content of the ID_AA64PFR1_EL1 register. */207 uint64_t u64RegIdAa64Pfr1El1;208 /** Content of the ID_AA64DFR0_EL1 register. */209 uint64_t u64RegIdAa64Dfr0El1;210 /** Content of the ID_AA64DFR1_EL1 register. */211 uint64_t u64RegIdAa64Dfr1El1;212 /** Content of the ID_AA64AFR0_EL1 register. */213 uint64_t u64RegIdAa64Afr0El1;214 /** Content of the ID_AA64AFR1_EL1 register. */215 uint64_t u64RegIdAa64Afr1El1;216 /** Content of the ID_AA64ISAR0_EL1 register. */217 uint64_t u64RegIdAa64Isar0El1;218 /** Content of the ID_AA64ISAR1_EL1 register. */219 uint64_t u64RegIdAa64Isar1El1;220 /** Content of the ID_AA64ISAR2_EL1 register. */221 uint64_t u64RegIdAa64Isar2El1;222 /** Content of the ID_AA64MMFR0_EL1 register. */223 uint64_t u64RegIdAa64Mmfr0El1;224 /** Content of the ID_AA64MMFR1_EL1 register. */225 uint64_t u64RegIdAa64Mmfr1El1;226 /** Content of the ID_AA64MMFR2_EL1 register. */227 uint64_t u64RegIdAa64Mmfr2El1;228 /** Content of the CLIDR_EL1 register. */229 uint64_t u64RegClidrEl1;230 /** Content of the CTR_EL0 register. */231 uint64_t u64RegCtrEl0;232 /** Content of the DCZID_EL0 register. */233 uint64_t u64RegDczidEl0;234 } CPUMIDREGS;235 /** Pointer to CPU ID registers. */236 typedef CPUMIDREGS *PCPUMIDREGS;237 /** Pointer to a const CPU ID registers structure. */238 typedef CPUMIDREGS const *PCCPUMIDREGS;239 240 199 241 200 /** @name Changed flags. … … 280 239 281 240 VMMR3DECL(int) CPUMR3SysRegRangesInsert(PVM pVM, PCCPUMSYSREGRANGE pNewRange); 282 VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUM IDREGS pIdRegs);283 284 VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUM IDREGS *ppIdRegs);241 VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMARMV8IDREGS pIdRegs); 242 243 VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMARMV8IDREGS *ppIdRegs); 285 244 286 245 /** @} */ -
trunk/include/VBox/vmm/cpum-x86-amd64.h
r107389 r107650 812 812 #ifndef VBOX_FOR_DTRACE_LIB 813 813 814 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)815 VMMDECL(int) CPUMCpuIdCollectLeavesX86(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves);816 VMMDECL(CPUMCPUVENDOR) CPUMCpuIdDetectX86VendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX);817 #endif818 819 814 VMM_INT_DECL(bool) CPUMAssertGuestRFlagsCookie(PVM pVM, PVMCPU pVCpu); 820 815 … … 2329 2324 /** @} */ 2330 2325 2331 VMMDECL(bool) CPUMSupportsXSave(PVM pVM);2332 2326 VMMDECL(bool) CPUMIsHostUsingSysEnter(PVM pVM); 2333 2327 VMMDECL(bool) CPUMIsHostUsingSysCall(PVM pVM); … … 2388 2382 VMMDECL(CPUMMICROARCH) CPUMCpuIdDetermineX86MicroarchEx(CPUMCPUVENDOR enmVendor, uint8_t bFamily, 2389 2383 uint8_t bModel, uint8_t bStepping); 2390 VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch);2391 2384 VMMR3DECL(int) CPUMR3CpuIdDetectUnknownLeafMethod(PCPUMUNKNOWNCPUID penmUnknownMethod, PCPUMCPUID pDefUnknown); 2392 2385 VMMR3DECL(const char *) CPUMR3CpuIdUnknownLeafMethodName(CPUMUNKNOWNCPUID enmUnknownMethod); -
trunk/include/VBox/vmm/cpum.h
r107389 r107650 354 354 typedef struct CPUMFEATURESCOMMON 355 355 { 356 /** The CPU vendor (CPUMCPUVENDOR). */357 uint8_t enmCpuVendor;358 /** The CPU family. */359 uint8_t uFamily;360 /** The CPU model. */361 uint8_t uModel;362 /** The CPU stepping. */363 uint8_t uStepping;364 356 /** The microarchitecture. */ 365 357 #ifndef VBOX_FOR_DTRACE_LIB … … 368 360 uint32_t enmMicroarch; 369 361 #endif 362 /** The CPU vendor (CPUMCPUVENDOR). */ 363 uint8_t enmCpuVendor; 370 364 /** The maximum physical address width of the CPU. */ 371 365 uint8_t cMaxPhysAddrWidth; … … 381 375 typedef struct CPUMFEATURESX86 382 376 { 377 /** The microarchitecture. */ 378 #ifndef VBOX_FOR_DTRACE_LIB 379 CPUMMICROARCH enmMicroarch; 380 #else 381 uint32_t enmMicroarch; 382 #endif 383 383 /** The CPU vendor (CPUMCPUVENDOR). */ 384 384 uint8_t enmCpuVendor; 385 /** The maximum physical address width of the CPU. */ 386 uint8_t cMaxPhysAddrWidth; 387 /** The maximum linear address width of the CPU. */ 388 uint8_t cMaxLinearAddrWidth; 389 385 390 /** The CPU family. */ 386 391 uint8_t uFamily; … … 389 394 /** The CPU stepping. */ 390 395 uint8_t uStepping; 391 /** The microarchitecture. */392 #ifndef VBOX_FOR_DTRACE_LIB393 CPUMMICROARCH enmMicroarch;394 #else395 uint32_t enmMicroarch;396 #endif397 /** The maximum physical address width of the CPU. */398 uint8_t cMaxPhysAddrWidth;399 /** The maximum linear address width of the CPU. */400 uint8_t cMaxLinearAddrWidth;401 396 /** Max size of the extended state (or FPU state if no XSAVE). */ 402 397 uint16_t cbMaxExtendedState; … … 844 839 /** VMX: Padding / reserved for future, making it a total of 128 bits. */ 845 840 uint32_t fVmxPadding1; 841 uint32_t auPadding[4]; 846 842 } CPUMFEATURESX86; 847 843 #ifndef VBOX_FOR_DTRACE_LIB 848 AssertCompileSize(CPUMFEATURESX86, 48);844 AssertCompileSize(CPUMFEATURESX86, 64); 849 845 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmCpuVendor, CPUMFEATURESX86, enmCpuVendor); 850 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uFamily, CPUMFEATURESX86, uFamily);851 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uModel, CPUMFEATURESX86, uModel);852 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uStepping, CPUMFEATURESX86, uStepping);853 846 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmMicroarch, CPUMFEATURESX86, enmMicroarch); 854 847 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, cMaxPhysAddrWidth, CPUMFEATURESX86, cMaxPhysAddrWidth); … … 863 856 typedef struct CPUMFEATURESARMV8 864 857 { 865 /** The CPU vendor (CPUMCPUVENDOR). */866 uint8_t enmCpuVendor;867 /** The CPU family. */868 uint8_t uFamily;869 /** The CPU model. */870 uint8_t uModel;871 /** The CPU stepping. */872 uint8_t uStepping;873 858 /** The microarchitecture. */ 874 859 #ifndef VBOX_FOR_DTRACE_LIB … … 877 862 uint32_t enmMicroarch; 878 863 #endif 864 /** The CPU vendor (CPUMCPUVENDOR). */ 865 uint8_t enmCpuVendor; 879 866 /** The maximum physical address width of the CPU. */ 880 867 uint8_t cMaxPhysAddrWidth; 881 868 /** The maximum linear address width of the CPU. */ 882 869 uint8_t cMaxLinearAddrWidth; 883 uint16_t uPadding; 870 871 /** The CPU implementer value (from MIDR_EL1). */ 872 uint8_t uImplementeter; 873 /** The CPU part number (from MIDR_EL1). */ 874 uint16_t uPartNum; 875 /** The CPU variant (from MIDR_EL1). */ 876 uint8_t uVariant; 877 /** The CPU revision (from MIDR_EL1). */ 878 uint8_t uRevision; 884 879 885 880 /** @name Granule sizes supported. … … 1395 1390 /** @} */ 1396 1391 1397 /** Padding to the required size to match CPUMFEATURES for x86/amd64. */1398 uint 8_t abPadding[4];1392 /** Padding to the required size to match CPUMFEATURESX86. */ 1393 uint32_t auPadding[5]; 1399 1394 } CPUMFEATURESARMV8; 1400 1395 #ifndef VBOX_FOR_DTRACE_LIB 1401 AssertCompileSize(CPUMFEATURESARMV8, 48); 1396 AssertCompileSize(CPUMFEATURESARMV8, 64); 1397 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmMicroarch, CPUMFEATURESARMV8, enmMicroarch); 1402 1398 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmCpuVendor, CPUMFEATURESARMV8, enmCpuVendor); 1403 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uFamily, CPUMFEATURESARMV8, uFamily);1404 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uModel, CPUMFEATURESARMV8, uModel);1405 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, uStepping, CPUMFEATURESARMV8, uStepping);1406 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, enmMicroarch, CPUMFEATURESARMV8, enmMicroarch);1407 1399 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, cMaxPhysAddrWidth, CPUMFEATURESARMV8, cMaxPhysAddrWidth); 1408 1400 AssertCompileMembersAtSameOffset(CPUMFEATURESCOMMON, cMaxLinearAddrWidth, CPUMFEATURESARMV8, cMaxLinearAddrWidth); … … 1436 1428 s; 1437 1429 } CPUHOSTFEATURES; 1430 #ifndef VBOX_FOR_DTRACE_LIB 1431 AssertCompileSize(CPUHOSTFEATURES, 64); 1432 #endif 1438 1433 /** Pointer to a const host CPU feature structure. */ 1439 1434 typedef CPUHOSTFEATURES const *PCCPUHOSTFEATURES; … … 1458 1453 1459 1454 1455 1456 /** 1457 * ARMv8 CPU ID registers. 1458 */ 1459 typedef struct CPUMARMV8IDREGS 1460 { 1461 /** Content of the ID_AA64PFR0_EL1 register. */ 1462 uint64_t u64RegIdAa64Pfr0El1; 1463 /** Content of the ID_AA64PFR1_EL1 register. */ 1464 uint64_t u64RegIdAa64Pfr1El1; 1465 /** Content of the ID_AA64DFR0_EL1 register. */ 1466 uint64_t u64RegIdAa64Dfr0El1; 1467 /** Content of the ID_AA64DFR1_EL1 register. */ 1468 uint64_t u64RegIdAa64Dfr1El1; 1469 /** Content of the ID_AA64AFR0_EL1 register. */ 1470 uint64_t u64RegIdAa64Afr0El1; 1471 /** Content of the ID_AA64AFR1_EL1 register. */ 1472 uint64_t u64RegIdAa64Afr1El1; 1473 /** Content of the ID_AA64ISAR0_EL1 register. */ 1474 uint64_t u64RegIdAa64Isar0El1; 1475 /** Content of the ID_AA64ISAR1_EL1 register. */ 1476 uint64_t u64RegIdAa64Isar1El1; 1477 /** Content of the ID_AA64ISAR2_EL1 register. */ 1478 uint64_t u64RegIdAa64Isar2El1; 1479 /** Content of the ID_AA64MMFR0_EL1 register. */ 1480 uint64_t u64RegIdAa64Mmfr0El1; 1481 /** Content of the ID_AA64MMFR1_EL1 register. */ 1482 uint64_t u64RegIdAa64Mmfr1El1; 1483 /** Content of the ID_AA64MMFR2_EL1 register. */ 1484 uint64_t u64RegIdAa64Mmfr2El1; 1485 /** Content of the CLIDR_EL1 register. */ 1486 uint64_t u64RegClidrEl1; 1487 /** Content of the CTR_EL0 register. */ 1488 uint64_t u64RegCtrEl0; 1489 /** Content of the DCZID_EL0 register. */ 1490 uint64_t u64RegDczidEl0; 1491 /** @todo we need MIDR_EL1 here, possibly also MPIDR_EL1 and REVIDR_EL1. */ 1492 } CPUMARMV8IDREGS; 1493 /** Pointer to CPU ID registers. */ 1494 typedef CPUMARMV8IDREGS *PCPUMARMV8IDREGS; 1495 /** Pointer to a const CPU ID registers structure. */ 1496 typedef CPUMARMV8IDREGS const *PCCPUMARMV8IDREGS; 1497 1498 1460 1499 /* 1461 1500 * Include the target specific header. … … 1499 1538 VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM); 1500 1539 1540 VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch); 1541 VMMDECL(const char *) CPUMCpuVendorName(CPUMCPUVENDOR enmVendor); 1542 1543 VMMDECL(CPUMCPUVENDOR) CPUMCpuIdDetectX86VendorEx(uint32_t uEAX, uint32_t uEBX, uint32_t uECX, uint32_t uEDX); 1544 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 1545 VMMDECL(int) CPUMCpuIdCollectLeavesFromX86Host(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves); 1546 #endif 1547 #if defined(RT_ARCH_ARM64) 1548 VMMDECL(int) CPUMCpuIdCollectIdRegistersFromArmV8Host(PCPUMARMV8IDREGS pIdRegs); 1549 #endif 1550 1501 1551 #ifdef IN_RING3 1502 1552 /** @defgroup grp_cpum_r3 The CPUM ring-3 API … … 1512 1562 VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu); 1513 1563 VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM); 1514 VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch);1515 VMMR3DECL(const char *) CPUMCpuVendorName(CPUMCPUVENDOR enmVendor);1516 1564 1517 1565 VMMR3DECL(uint32_t) CPUMR3DbGetEntries(void); -
trunk/include/VBox/vmm/vm.h
r107227 r107650 1360 1360 struct 1361 1361 { 1362 /** Padding for hidden fields. */1363 uint8_t abHidden0[64 + 48];1364 1362 /** Guest CPU feature information. */ 1365 1363 CPUMFEATURES GuestFeatures; -
trunk/include/iprt/armv8.h
r106463 r107650 581 581 /** CSSELR_EL1 register - RW. */ 582 582 #define ARMV8_AARCH64_SYSREG_CSSELR_EL1 ARMV8_AARCH64_SYSREG_ID_CREATE(3, 2, 0, 0, 0) 583 584 /** CTR_EL0 - Cache Type Register - RO. */ 585 #define ARMV8_AARCH64_SYSREG_CTR_EL0 ARMV8_AARCH64_SYSREG_ID_CREATE(3, 3, 0, 0, 1) 586 /** DCZID_EL0 - Data Cache Zero ID Register - RO. */ 587 #define ARMV8_AARCH64_SYSREG_DCZID_EL0 ARMV8_AARCH64_SYSREG_ID_CREATE(3, 3, 0, 0, 7) 588 583 589 584 590 /** NZCV - Status Flags - ??. */ -
trunk/src/VBox/VMM/Makefile.kmk
r107308 r107650 416 416 VMMR3/CPUM-armv8.cpp \ 417 417 VMMR3/CPUMDbg-armv8.cpp \ 418 VMMAll/CPUMAllCpuId.cpp \ 418 419 VMMR3/CPUMR3CpuId-armv8.cpp \ 419 420 VMMR3/CPUMR3Db-armv8.cpp \ … … 1074 1075 ## 1075 1076 # Turn the header $2 into the DTrace library script $1. 1077 # @todo ARM: this needs adjusting for the non-native VBoxVMM variant! 1076 1078 # 1077 1079 define def_vmm_lib_dtrace_preprocess … … 1083 1085 -D VBOX_FOR_DTRACE_LIB \ 1084 1086 -D VBOX_FOR_DTRACE_LIB_$(toupper $(KBUILD_TARGET_ARCH)) \ 1087 -D $(if-expr "$(KBUILD_TARGET_ARCH)" == "amd64",VBOX_VMM_TARGET_X86,VBOX_VMM_TARGET_ARMV8) \ 1085 1088 -D IN_RING0 \ 1086 1089 -D RT_C_DECLS_BEGIN= \ -
trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp
r106061 r107650 46 46 #include <iprt/string.h> 47 47 #include <iprt/x86-helpers.h> 48 #if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8) 49 # include <iprt/armv8.h> 50 #endif 48 51 49 52 … … 51 54 * Global Variables * 52 55 *********************************************************************************************************************************/ 56 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) 53 57 /** 54 58 * The intel pentium family. … … 505 509 } 506 510 511 #endif /* if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */ 512 513 507 514 508 515 /** … … 685 692 } 686 693 694 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) 687 695 688 696 /** … … 750 758 else 751 759 { 752 # ifdef IN_VBOX_CPU_REPORT760 # ifdef IN_VBOX_CPU_REPORT 753 761 AssertReleaseFailed(); 754 # else755 # ifdef IN_RING3762 # else 763 # ifdef IN_RING3 756 764 Assert(ppaLeaves == &pVM->cpum.s.GuestInfo.paCpuIdLeavesR3); 757 765 Assert(*ppaLeaves == pVM->cpum.s.GuestInfo.aCpuIdLeaves); … … 761 769 { } 762 770 else 763 # endif771 # endif 764 772 { 765 773 *ppaLeaves = NULL; 766 774 LogRel(("CPUM: cpumR3CpuIdEnsureSpace: Out of CPUID space!\n")); 767 775 } 768 # endif776 # endif 769 777 } 770 778 return *ppaLeaves; … … 772 780 773 781 774 # ifdef VBOX_STRICT782 # ifdef VBOX_STRICT 775 783 /** 776 784 * Checks that we've updated the CPUID leaves array correctly. … … 796 804 } 797 805 } 798 #endif 806 # endif 807 808 #endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */ 799 809 800 810 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) … … 982 992 * success. 983 993 */ 984 VMMDECL(int) CPUMCpuIdCollectLeaves X86(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves)994 VMMDECL(int) CPUMCpuIdCollectLeavesFromX86Host(PCPUMCPUIDLEAF *ppaLeaves, uint32_t *pcLeaves) 985 995 { 986 996 *ppaLeaves = NULL; … … 1150 1160 return VINF_SUCCESS; 1151 1161 } 1162 1152 1163 #endif /* RT_ARCH_X86 || RT_ARCH_AMD64 */ 1153 1164 1154 1165 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) 1155 1166 /** 1156 1167 * Detect the CPU vendor give n the … … 1193 1204 return CPUMCPUVENDOR_UNKNOWN; 1194 1205 } 1206 #endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */ 1195 1207 1196 1208 … … 1224 1236 } 1225 1237 1238 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) 1226 1239 1227 1240 static PCCPUMCPUIDLEAF cpumCpuIdFindLeaf(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf) … … 1260 1273 1261 1274 1262 static void cpumExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, PCPUMFEATURESpFeatures)1275 static void cpumExplodeVmxFeatures(PCVMXMSRS pVmxMsrs, CPUMFEATURESX86 *pFeatures) 1263 1276 { 1264 1277 Assert(pVmxMsrs); … … 1390 1403 1391 1404 1392 int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, PCPUMFEATURESpFeatures)1405 int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, CPUMFEATURESX86 *pFeatures) 1393 1406 { 1394 1407 Assert(pMsrs); … … 1631 1644 } 1632 1645 1646 #endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */ 1647 1648 #if defined(RT_ARCH_ARM64) 1649 /** 1650 * Collects the ID registers from an ARMv8 host. 1651 * 1652 * This isn't trivial an all hosts when running in userland and there is no 1653 * support driver handy. 1654 */ 1655 VMMDECL(int) CPUMCpuIdCollectIdRegistersFromArmV8Host(PCPUMARMV8IDREGS pIdRegs) 1656 { 1657 # ifdef _MSC_VER 1658 # define READ_SYS_REG(a_u64Dst, a_SysRegName) do { \ 1659 (a_u64Dst) = (uint64_t)_ReadStatusReg(RT_CONCAT(ARMV8_AARCH64_SYSREG_,a_SysRegName) & 0x7fff); \ 1660 } while (0) 1661 # else 1662 # define READ_SYS_REG(a_u64Dst, a_SysRegName) do { \ 1663 __asm__ __volatile__ ("mrs %0, " #a_SysRegName : "=r" (a_u64Dst)); \ 1664 } while (0) 1665 # endif 1666 1667 RT_ZERO(*pIdRegs); 1668 1669 /* 1670 * CTR_EL0 can be trapped when executed in L0 (SCTLR_EL0.UCT) and macOS 1671 * & Windows does so by default. Linux OTOH typically exposes all the 1672 * feature registers to user land with some sanitizing. 1673 */ 1674 # if !defined(IN_RING3) || defined(RT_OS_LINUX) 1675 READ_SYS_REG(pIdRegs->u64RegCtrEl0, CTR_EL0); 1676 # endif 1677 READ_SYS_REG(pIdRegs->u64RegDczidEl0, DCZID_EL0); 1678 1679 # if defined(IN_RING0) || defined(RT_OS_LINUX) 1680 # ifdef IN_RING3 1681 if (getauxval(AT_HWCAP) & HWCAP_CPUID) 1682 # endif 1683 { 1684 READ_SYS_REG(pIdRegs->u64RegIdAa64Pfr0El1, ID_AA64PFR0_EL1); 1685 READ_SYS_REG(pIdRegs->u64RegIdAa64Pfr1El1, ID_AA64PFR1_EL1); 1686 READ_SYS_REG(pIdRegs->u64RegIdAa64Dfr0El1, ID_AA64DFR0_EL1); 1687 READ_SYS_REG(pIdRegs->u64RegIdAa64Dfr1El1, ID_AA64DFR1_EL1); 1688 /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Dfr2El1, ID_AA64DFR2_EL1); 1689 READ_SYS_REG(pIdRegs->u64RegIdAa64Afr0El1, ID_AA64AFR0_EL1); 1690 READ_SYS_REG(pIdRegs->u64RegIdAa64Afr1El1, ID_AA64AFR1_EL1); 1691 READ_SYS_REG(pIdRegs->u64RegIdAa64Isar0El1, ID_AA64ISAR0_EL1); 1692 READ_SYS_REG(pIdRegs->u64RegIdAa64Isar1El1, ID_AA64ISAR1_EL1); 1693 READ_SYS_REG(pIdRegs->u64RegIdAa64Isar2El1, ID_AA64ISAR2_EL1); 1694 /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Isar3El1, ID_AA64ISAR3_EL1); 1695 READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr0El1, ID_AA64MMFR0_EL1); 1696 READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr1El1, ID_AA64MMFR1_EL1); 1697 READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr2El1, ID_AA64MMFR2_EL1); 1698 /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr3El1, ID_AA64MMFR3_EL1); 1699 /// @todo READ_SYS_REG(pIdRegs->u64RegIdAa64Mmfr4El1, ID_AA64MMFR4_EL1); 1700 READ_SYS_REG(pIdRegs->u64RegClidrEl1, CLIDR_EL1); 1701 1702 /// @todo READ_SYS_REG(pIdRegs->uMainIdRegEl1, MIDR_EL1); 1703 /// @todo READ_SYS_REG(pIdRegs->uMpIdRegEl1, MPIDR_EL1); 1704 /// @todo READ_SYS_REG(pIdRegs->uRevIdRegEl1, REVIDR_EL1); 1705 return VINF_SUCCESS; 1706 } 1707 # endif 1708 # ifndef IN_RING0 1709 /** @todo On darwin we should just cache the information (CPU DB) and figure 1710 * out which Apple Mx we're running on. */ 1711 /** @todo Make the info available via the support driver... */ 1712 return VINF_SUCCESS; 1713 # endif 1714 } 1715 #endif /* defined(RT_ARCH_ARM64) */ 1716 1717 #if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8) 1718 /** 1719 * Explode the CPU features from the given ID registers. 1720 * 1721 * @returns VBox status code. 1722 * @param pIdRegs The ID registers to explode the features from. 1723 * @param pFeatures Where to store the features to. 1724 */ 1725 int cpumCpuIdExplodeFeaturesArmV8(PCCPUMARMV8IDREGS pIdRegs, CPUMFEATURESARMV8 *pFeatures) 1726 { 1727 uint64_t u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1; 1728 1729 static uint8_t s_aPaRange[] = { 32, 36, 40, 42, 44, 48, 52 }; 1730 AssertLogRelMsgReturn(RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE) < RT_ELEMENTS(s_aPaRange), 1731 ("CPUM: Invalid/Unsupported PARange value in ID_AA64MMFR0_EL1 register: %u\n", 1732 RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)), 1733 VERR_CPUM_IPE_1); 1734 1735 pFeatures->cMaxPhysAddrWidth = s_aPaRange[RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)]; 1736 pFeatures->fTGran4K = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN4) != ARMV8_ID_AA64MMFR0_EL1_TGRAN4_NOT_IMPL; 1737 pFeatures->fTGran16K = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN16) != ARMV8_ID_AA64MMFR0_EL1_TGRAN16_NOT_IMPL; 1738 pFeatures->fTGran64K = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN64) != ARMV8_ID_AA64MMFR0_EL1_TGRAN64_NOT_IMPL; 1739 1740 /* ID_AA64ISAR0_EL1 features. */ 1741 u64IdReg = pIdRegs->u64RegIdAa64Isar0El1; 1742 pFeatures->fAes = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES) >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED; 1743 pFeatures->fPmull = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES) >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED_PMULL; 1744 pFeatures->fSha1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA1) >= ARMV8_ID_AA64ISAR0_EL1_SHA1_SUPPORTED; 1745 pFeatures->fSha256 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2) >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256; 1746 pFeatures->fSha512 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2) >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256_SHA512; 1747 pFeatures->fCrc32 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_CRC32) >= ARMV8_ID_AA64ISAR0_EL1_CRC32_SUPPORTED; 1748 pFeatures->fLse = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_ATOMIC) >= ARMV8_ID_AA64ISAR0_EL1_ATOMIC_SUPPORTED; 1749 pFeatures->fTme = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TME) >= ARMV8_ID_AA64ISAR0_EL1_TME_SUPPORTED; 1750 pFeatures->fRdm = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RDM) >= ARMV8_ID_AA64ISAR0_EL1_RDM_SUPPORTED; 1751 pFeatures->fSha3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA3) >= ARMV8_ID_AA64ISAR0_EL1_SHA3_SUPPORTED; 1752 pFeatures->fSm3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM3) >= ARMV8_ID_AA64ISAR0_EL1_SM3_SUPPORTED; 1753 pFeatures->fSm4 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM4) >= ARMV8_ID_AA64ISAR0_EL1_SM4_SUPPORTED; 1754 pFeatures->fDotProd = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_DP) >= ARMV8_ID_AA64ISAR0_EL1_DP_SUPPORTED; 1755 pFeatures->fFhm = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_FHM) >= ARMV8_ID_AA64ISAR0_EL1_FHM_SUPPORTED; 1756 pFeatures->fFlagM = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS) >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED; 1757 pFeatures->fFlagM2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS) >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED_2; 1758 pFeatures->fTlbios = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB) >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED; 1759 pFeatures->fTlbirange = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB) >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED_RANGE; 1760 pFeatures->fRng = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RNDR) >= ARMV8_ID_AA64ISAR0_EL1_RNDR_SUPPORTED; 1761 1762 /* ID_AA64ISAR1_EL1 features. */ 1763 u64IdReg = pIdRegs->u64RegIdAa64Isar1El1; 1764 pFeatures->fDpb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB) >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED; 1765 pFeatures->fDpb2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB) >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED_2; 1766 1767 /* PAuth using QARMA5. */ 1768 pFeatures->fPacQarma5 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) != ARMV8_ID_AA64ISAR1_EL1_APA_NOT_IMPL; 1769 if (pFeatures->fPacQarma5) 1770 { 1771 pFeatures->fPAuth = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH; 1772 pFeatures->fEpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_EPAC; 1773 pFeatures->fPAuth2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH2; 1774 pFeatures->fFpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPAC; 1775 pFeatures->fFpacCombine = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPACCOMBINE; 1776 } 1777 1778 /* PAuth using implementation defined algorithm. */ 1779 pFeatures->fPacImp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) != ARMV8_ID_AA64ISAR1_EL1_API_NOT_IMPL; 1780 if (pFeatures->fPacQarma5) 1781 { 1782 pFeatures->fPAuth = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH; 1783 pFeatures->fEpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_EPAC; 1784 pFeatures->fPAuth2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH2; 1785 pFeatures->fFpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPAC; 1786 pFeatures->fFpacCombine = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPACCOMBINE; 1787 } 1788 1789 pFeatures->fJscvt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FJCVTZS) >= ARMV8_ID_AA64ISAR1_EL1_FJCVTZS_SUPPORTED; 1790 pFeatures->fFcma = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FCMA) >= ARMV8_ID_AA64ISAR1_EL1_FCMA_SUPPORTED; 1791 pFeatures->fLrcpc = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC) >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED; 1792 pFeatures->fLrcpc2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC) >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED_2; 1793 pFeatures->fFrintts = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FRINTTS) >= ARMV8_ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED; 1794 pFeatures->fSb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SB) >= ARMV8_ID_AA64ISAR1_EL1_SB_SUPPORTED; 1795 pFeatures->fSpecres = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SPECRES) >= ARMV8_ID_AA64ISAR1_EL1_SPECRES_SUPPORTED; 1796 pFeatures->fBf16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16) >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_BF16; 1797 pFeatures->fEbf16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16) >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_EBF16; 1798 pFeatures->fDgh = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DGH) >= ARMV8_ID_AA64ISAR1_EL1_DGH_SUPPORTED; 1799 pFeatures->fI8mm = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_I8MM) >= ARMV8_ID_AA64ISAR1_EL1_I8MM_SUPPORTED; 1800 pFeatures->fXs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_XS) >= ARMV8_ID_AA64ISAR1_EL1_XS_SUPPORTED; 1801 pFeatures->fLs64 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64) >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED; 1802 pFeatures->fLs64V = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64) >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_V; 1803 pFeatures->fLs64Accdata = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64) >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_ACCDATA; 1804 1805 /* ID_AA64ISAR2_EL1 features. */ 1806 u64IdReg = pIdRegs->u64RegIdAa64Isar2El1; 1807 pFeatures->fWfxt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_WFXT) >= ARMV8_ID_AA64ISAR2_EL1_WFXT_SUPPORTED; 1808 pFeatures->fRpres = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_RPRES) >= ARMV8_ID_AA64ISAR2_EL1_RPRES_SUPPORTED; 1809 1810 /* PAuth using QARMA3. */ 1811 pFeatures->fPacQarma3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_GPA3) >= ARMV8_ID_AA64ISAR2_EL1_GPA3_SUPPORTED; 1812 pFeatures->fPacQarma3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) != ARMV8_ID_AA64ISAR2_EL1_APA3_NOT_IMPL; 1813 if (pFeatures->fPacQarma5) 1814 { 1815 pFeatures->fPAuth = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH; 1816 pFeatures->fEpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_EPAC; 1817 pFeatures->fPAuth2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH2; 1818 pFeatures->fFpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPAC; 1819 pFeatures->fFpacCombine = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPACCOMBINE; 1820 } 1821 1822 pFeatures->fMops = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_MOPS) >= ARMV8_ID_AA64ISAR2_EL1_MOPS_SUPPORTED; 1823 pFeatures->fHbc = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_BC) >= ARMV8_ID_AA64ISAR2_EL1_BC_SUPPORTED; 1824 pFeatures->fConstPacField = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_PACFRAC) >= ARMV8_ID_AA64ISAR2_EL1_PACFRAC_TRUE; 1825 1826 /* ID_AA64PFR0_EL1 */ 1827 u64IdReg = pIdRegs->u64RegIdAa64Pfr0El1; 1828 /* The FP and AdvSIMD field must have the same value. */ 1829 Assert(RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) == RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD)); 1830 pFeatures->fFp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) != ARMV8_ID_AA64PFR0_EL1_FP_NOT_IMPL; 1831 pFeatures->fFp16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) == ARMV8_ID_AA64PFR0_EL1_FP_IMPL_SP_DP_HP; 1832 pFeatures->fAdvSimd = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD) != ARMV8_ID_AA64PFR0_EL1_ADVSIMD_NOT_IMPL; 1833 pFeatures->fFp16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD) == ARMV8_ID_AA64PFR0_EL1_ADVSIMD_IMPL_SP_DP_HP; 1834 pFeatures->fRas = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS) >= ARMV8_ID_AA64PFR0_EL1_RAS_SUPPORTED; 1835 pFeatures->fRasV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS) >= ARMV8_ID_AA64PFR0_EL1_RAS_V1P1; 1836 pFeatures->fSve = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SVE) >= ARMV8_ID_AA64PFR0_EL1_SVE_SUPPORTED; 1837 pFeatures->fSecEl2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SEL2) >= ARMV8_ID_AA64PFR0_EL1_SEL2_SUPPORTED; 1838 pFeatures->fAmuV1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU) >= ARMV8_ID_AA64PFR0_EL1_AMU_V1; 1839 pFeatures->fAmuV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU) >= ARMV8_ID_AA64PFR0_EL1_AMU_V1P1; 1840 pFeatures->fDit = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_DIT) >= ARMV8_ID_AA64PFR0_EL1_DIT_SUPPORTED; 1841 pFeatures->fRme = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RME) >= ARMV8_ID_AA64PFR0_EL1_RME_SUPPORTED; 1842 pFeatures->fCsv2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2) >= ARMV8_ID_AA64PFR0_EL1_CSV2_SUPPORTED; 1843 pFeatures->fCsv2v3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2) >= ARMV8_ID_AA64PFR0_EL1_CSV2_3_SUPPORTED; 1844 1845 /* ID_AA64PFR1_EL1 */ 1846 u64IdReg = pIdRegs->u64RegIdAa64Pfr1El1; 1847 pFeatures->fBti = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_BT) >= ARMV8_ID_AA64PFR1_EL1_BT_SUPPORTED; 1848 pFeatures->fSsbs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS) >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED; 1849 pFeatures->fSsbs2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS) >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED_MSR_MRS; 1850 pFeatures->fMte = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE) >= ARMV8_ID_AA64PFR1_EL1_MTE_INSN_ONLY; 1851 pFeatures->fMte2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE) >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL; 1852 pFeatures->fMte3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE) >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL_ASYM_TAG_FAULT_CHK; 1853 /** @todo RAS_frac, MPAM_frac, CSV2_frac. */ 1854 pFeatures->fSme = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME) >= ARMV8_ID_AA64PFR1_EL1_SME_SUPPORTED; 1855 pFeatures->fSme2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME) >= ARMV8_ID_AA64PFR1_EL1_SME_SME2; 1856 pFeatures->fRngTrap = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_RNDRTRAP) >= ARMV8_ID_AA64PFR1_EL1_RNDRTRAP_SUPPORTED; 1857 pFeatures->fNmi = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_NMI) >= ARMV8_ID_AA64PFR1_EL1_NMI_SUPPORTED; 1858 1859 /* ID_AA64MMFR0_EL1 */ 1860 u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1; 1861 pFeatures->fExs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_EXS) >= ARMV8_ID_AA64MMFR0_EL1_EXS_SUPPORTED; 1862 pFeatures->fFgt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_FGT) >= ARMV8_ID_AA64MMFR0_EL1_FGT_SUPPORTED; 1863 pFeatures->fEcv = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_ECV) >= ARMV8_ID_AA64MMFR0_EL1_ECV_SUPPORTED; 1864 1865 /* ID_AA64MMFR1_EL1 */ 1866 u64IdReg = pIdRegs->u64RegIdAa64Mmfr1El1; 1867 pFeatures->fHafdbs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HAFDBS) >= ARMV8_ID_AA64MMFR1_EL1_HAFDBS_SUPPORTED; 1868 pFeatures->fVmid16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VMIDBITS) >= ARMV8_ID_AA64MMFR1_EL1_VMIDBITS_16; 1869 pFeatures->fVhe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VHE) >= ARMV8_ID_AA64MMFR1_EL1_VHE_SUPPORTED; 1870 pFeatures->fHpds = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS) >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED; 1871 pFeatures->fHpds2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS) >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED_2; 1872 pFeatures->fLor = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_LO) >= ARMV8_ID_AA64MMFR1_EL1_LO_SUPPORTED; 1873 pFeatures->fPan = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN) >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED; 1874 pFeatures->fPan2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN) >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_2; 1875 pFeatures->fPan3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN) >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_3; 1876 pFeatures->fXnx = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_XNX) >= ARMV8_ID_AA64MMFR1_EL1_XNX_SUPPORTED; 1877 pFeatures->fTwed = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TWED) >= ARMV8_ID_AA64MMFR1_EL1_TWED_SUPPORTED; 1878 pFeatures->fEts2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_ETS) >= ARMV8_ID_AA64MMFR1_EL1_ETS_SUPPORTED; 1879 pFeatures->fHcx = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HCX) >= ARMV8_ID_AA64MMFR1_EL1_HCX_SUPPORTED; 1880 pFeatures->fAfp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_AFP) >= ARMV8_ID_AA64MMFR1_EL1_AFP_SUPPORTED; 1881 pFeatures->fNTlbpa = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_NTLBPA) >= ARMV8_ID_AA64MMFR1_EL1_NTLBPA_INCLUDE_COHERENT_ONLY; 1882 pFeatures->fTidcp1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TIDCP1) >= ARMV8_ID_AA64MMFR1_EL1_TIDCP1_SUPPORTED; 1883 pFeatures->fCmow = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_CMOW) >= ARMV8_ID_AA64MMFR1_EL1_CMOW_SUPPORTED; 1884 1885 /* ID_AA64MMFR2_EL1 */ 1886 u64IdReg = pIdRegs->u64RegIdAa64Mmfr2El1; 1887 pFeatures->fTtcnp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CNP) >= ARMV8_ID_AA64MMFR2_EL1_CNP_SUPPORTED; 1888 pFeatures->fUao = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_UAO) >= ARMV8_ID_AA64MMFR2_EL1_UAO_SUPPORTED; 1889 pFeatures->fLsmaoc = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_LSM) >= ARMV8_ID_AA64MMFR2_EL1_LSM_SUPPORTED; 1890 pFeatures->fIesb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IESB) >= ARMV8_ID_AA64MMFR2_EL1_IESB_SUPPORTED; 1891 pFeatures->fLva = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_VARANGE) >= ARMV8_ID_AA64MMFR2_EL1_VARANGE_52BITS_64KB_GRAN; 1892 pFeatures->fCcidx = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CCIDX) >= ARMV8_ID_AA64MMFR2_EL1_CCIDX_64BIT; 1893 pFeatures->fNv = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV) >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED; 1894 pFeatures->fNv2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV) >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED_2; 1895 pFeatures->fTtst = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_ST) >= ARMV8_ID_AA64MMFR2_EL1_ST_SUPPORTED; 1896 pFeatures->fLse2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_AT) >= ARMV8_ID_AA64MMFR2_EL1_AT_SUPPORTED; 1897 pFeatures->fIdst = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IDS) >= ARMV8_ID_AA64MMFR2_EL1_IDS_EC_18H; 1898 pFeatures->fS2Fwb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_FWB) >= ARMV8_ID_AA64MMFR2_EL1_FWB_SUPPORTED; 1899 pFeatures->fTtl = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_TTL) >= ARMV8_ID_AA64MMFR2_EL1_TTL_SUPPORTED; 1900 pFeatures->fEvt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_EVT) >= ARMV8_ID_AA64MMFR2_EL1_EVT_SUPPORTED; 1901 pFeatures->fE0Pd = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_E0PD) >= ARMV8_ID_AA64MMFR2_EL1_E0PD_SUPPORTED; 1902 1903 /* ID_AA64DFR0_EL1 */ 1904 u64IdReg = pIdRegs->u64RegIdAa64Dfr0El1; 1905 pFeatures->fDebugV8p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8_VHE; 1906 pFeatures->fDebugV8p2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p2; 1907 pFeatures->fDebugV8p4 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p4; 1908 pFeatures->fDebugV8p8 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p8; 1909 pFeatures->fPmuV3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3; 1910 pFeatures->fPmuV3p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P1; 1911 pFeatures->fPmuV3p4 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P4; 1912 pFeatures->fPmuV3p5 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P5; 1913 pFeatures->fPmuV3p7 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P7; 1914 pFeatures->fPmuV3p8 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P8; 1915 pFeatures->fSpe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED; 1916 pFeatures->fSpeV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P1; 1917 pFeatures->fSpeV1p2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P2; 1918 pFeatures->fSpeV1p3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P3; 1919 pFeatures->fDoubleLock = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK) == ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK_SUPPORTED; 1920 pFeatures->fTrf = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEFILT) >= ARMV8_ID_AA64DFR0_EL1_TRACEFILT_SUPPORTED; 1921 pFeatures->fTrbe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER) >= ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER_SUPPORTED; 1922 pFeatures->fMtPmu = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_MTPMU) == ARMV8_ID_AA64DFR0_EL1_MTPMU_SUPPORTED; 1923 pFeatures->fBrbe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE) >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED; 1924 pFeatures->fBrbeV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE) >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED_V1P1; 1925 pFeatures->fHpmn0 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_HPMN0) >= ARMV8_ID_AA64DFR0_EL1_HPMN0_SUPPORTED; 1926 1927 return VINF_SUCCESS; 1928 } 1929 #endif /* defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8) */ 1930 -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs-armv8.cpp
r107113 r107650 268 268 } 269 269 270 #if 0 /* unused atm */ 270 271 271 272 /** … … 306 307 } 307 308 309 #endif 308 310 309 311 /** … … 491 493 } 492 494 493 494 /**495 * Translates a microarchitecture enum value to the corresponding string496 * constant.497 *498 * @returns Read-only string constant (omits "kCpumMicroarch_" prefix). Returns499 * NULL if the value is invalid.500 *501 * @param enmMicroarch The enum value to convert.502 *503 * @todo Doesn't really belong here but for now there is no other Armv8 CPUM source file.504 */505 VMMDECL(const char *) CPUMMicroarchName(CPUMMICROARCH enmMicroarch)506 {507 switch (enmMicroarch)508 {509 #define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("kCpumMicroarch_") - 1)510 CASE_RET_STR(kCpumMicroarch_Apple_M1);511 #undef CASE_RET_STR512 default:513 break;514 }515 516 return NULL;517 } -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r107113 r107650 1075 1075 VMMDECL(CPUMCPUVENDOR) CPUMGetHostCpuVendor(PVM pVM) 1076 1076 { 1077 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures. enmCpuVendor;1077 return (CPUMCPUVENDOR)pVM->cpum.s.HostFeatures.Common.enmCpuVendor; 1078 1078 } 1079 1079 … … 1087 1087 VMMDECL(CPUMMICROARCH) CPUMGetHostMicroarch(PCVM pVM) 1088 1088 { 1089 return pVM->cpum.s.HostFeatures. enmMicroarch;1089 return pVM->cpum.s.HostFeatures.Common.enmMicroarch; 1090 1090 } 1091 1091 … … 1631 1631 { 1632 1632 pVCpu->cpum.s.fChanged |= fChangedAdd; 1633 }1634 1635 1636 /**1637 * Checks if the CPU supports the XSAVE and XRSTOR instruction.1638 *1639 * @returns true if supported.1640 * @returns false if not supported.1641 * @param pVM The cross context VM structure.1642 */1643 VMMDECL(bool) CPUMSupportsXSave(PVM pVM)1644 {1645 return pVM->cpum.s.HostFeatures.fXSaveRstor != 0;1646 1633 } 1647 1634 -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r107389 r107650 147 147 PCPUMCPUIDLEAF paLeaves; 148 148 uint32_t cLeaves; 149 rc = CPUMCpuIdCollectLeaves X86(&paLeaves, &cLeaves);149 rc = CPUMCpuIdCollectLeavesFromX86Host(&paLeaves, &cLeaves); 150 150 AssertLogRelRCReturn(rc, rc); 151 151 … … 217 217 { 218 218 /* Copy the ring-0 host feature set to the shared part so ring-3 can pick it up. */ 219 pGVM->cpum.s.HostFeatures = g_CpumHostFeatures.s;219 pGVM->cpum.s.HostFeatures.s = g_CpumHostFeatures.s; 220 220 } 221 221 … … 363 363 * Note! we assume this happens after the CPUMR3Init is done, so CPUID bits are settled. 364 364 */ 365 pVM->cpum.s.HostFeatures. fArchRdclNo = 0;366 pVM->cpum.s.HostFeatures. fArchIbrsAll = 0;367 pVM->cpum.s.HostFeatures. fArchRsbOverride = 0;368 pVM->cpum.s.HostFeatures. fArchVmmNeedNotFlushL1d = 0;369 pVM->cpum.s.HostFeatures. fArchMdsNo = 0;365 pVM->cpum.s.HostFeatures.s.fArchRdclNo = 0; 366 pVM->cpum.s.HostFeatures.s.fArchIbrsAll = 0; 367 pVM->cpum.s.HostFeatures.s.fArchRsbOverride = 0; 368 pVM->cpum.s.HostFeatures.s.fArchVmmNeedNotFlushL1d = 0; 369 pVM->cpum.s.HostFeatures.s.fArchMdsNo = 0; 370 370 uint32_t const cStdRange = ASMCpuId_EAX(0); 371 371 if ( RTX86IsValidStdRange(cStdRange) … … 380 380 uint64_t const fHostArchVal = ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES); 381 381 uint64_t fArchVal = fHostArchVal; 382 pVM->cpum.s.HostFeatures. fArchRdclNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO);383 pVM->cpum.s.HostFeatures. fArchIbrsAll = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL);384 pVM->cpum.s.HostFeatures. fArchRsbOverride = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO);385 pVM->cpum.s.HostFeatures. fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D);386 pVM->cpum.s.HostFeatures. fArchMdsNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO);382 pVM->cpum.s.HostFeatures.s.fArchRdclNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO); 383 pVM->cpum.s.HostFeatures.s.fArchIbrsAll = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL); 384 pVM->cpum.s.HostFeatures.s.fArchRsbOverride = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO); 385 pVM->cpum.s.HostFeatures.s.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D); 386 pVM->cpum.s.HostFeatures.s.fArchMdsNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO); 387 387 388 388 /* guest: */ … … 401 401 else 402 402 { 403 pVM->cpum.s.HostFeatures. fArchCap = 0;403 pVM->cpum.s.HostFeatures.s.fArchCap = 0; 404 404 LogRel(("CPUM: IA32_ARCH_CAPABILITIES unsupported\n")); 405 405 } … … 471 471 VMMR0_INT_DECL(int) CPUMR0Trap07Handler(PVMCC pVM, PVMCPUCC pVCpu) 472 472 { 473 Assert(pVM->cpum.s.HostFeatures. fFxSaveRstor);473 Assert(pVM->cpum.s.HostFeatures.s.fFxSaveRstor); 474 474 Assert(ASMGetCR4() & X86_CR4_OSFXSR); 475 475 … … 542 542 * wrt. extended state (linux). */ 543 543 544 if (!pVM->cpum.s.HostFeatures. fLeakyFxSR)544 if (!pVM->cpum.s.HostFeatures.s.fLeakyFxSR) 545 545 { 546 546 Assert(!(pVCpu->cpum.s.fUseFlags & CPUM_USED_MANUAL_XMM_RESTORE)); … … 583 583 { 584 584 bool fSavedGuest; 585 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.HostFeatures. fFxSaveRstor);585 Assert(pVCpu->CTX_SUFF(pVM)->cpum.s.HostFeatures.s.fFxSaveRstor); 586 586 Assert(ASMGetCR4() & X86_CR4_OSFXSR); 587 587 if (pVCpu->cpum.s.fUseFlags & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST)) … … 614 614 else 615 615 fSavedGuest = false; 616 Assert (!( pVCpu->cpum.s.fUseFlags617 & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST | CPUM_USED_MANUAL_XMM_RESTORE)));616 AssertMsg(!( pVCpu->cpum.s.fUseFlags 617 & (CPUM_USED_FPU_GUEST | CPUM_USED_FPU_HOST | CPUM_USED_MANUAL_XMM_RESTORE)), ("%#x\n", pVCpu->cpum.s.fUseFlags)); 618 618 Assert(!pVCpu->cpum.s.Guest.fUsedFpuGuest); 619 619 return fSavedGuest; -
trunk/src/VBox/VMM/VMMR3/CPUM-armv8.cpp
r107032 r107650 706 706 707 707 /* Load CPUID and explode guest features. */ 708 return cpumR3LoadCpuId (pVM, pSSM, uVersion);708 return cpumR3LoadCpuIdArmV8(pVM, pSSM, uVersion); 709 709 } 710 710 … … 1100 1100 } 1101 1101 1102 1102 #if 0 /* nobody is are using these atm, they are for AMD64/darwin only */ 1103 1103 /** 1104 1104 * Marks the guest debug state as active. … … 1131 1131 ASMAtomicOrU32(&pVCpu->cpum.s.fUseFlags, CPUM_USED_DEBUG_REGS_HYPER); 1132 1132 } 1133 #endif -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r107220 r107650 220 220 static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 221 221 static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 222 #ifdef RT_ARCH_AMD64 222 223 static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 224 #endif 223 225 224 226 … … 226 228 * Global Variables * 227 229 *********************************************************************************************************************************/ 228 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)229 230 /** Host CPU features. */ 230 231 DECL_HIDDEN_DATA(CPUHOSTFEATURES) g_CpumHostFeatures; 231 #endif232 232 233 233 /** Saved state field descriptors for CPUMCTX. */ … … 1211 1211 { 1212 1212 RT_NOREF(pszArgs); 1213 PCCPUMFEATURES pHostFeatures = &pVM->cpum.s.HostFeatures; 1213 #ifdef RT_ARCH_AMD64 1214 PCCPUMFEATURES pHostFeatures = &pVM->cpum.s.HostFeatures.s; 1215 #else 1216 PCCPUMFEATURES pHostFeatures = &pVM->cpum.s.GuestFeatures; 1217 #endif 1214 1218 PCCPUMFEATURES pGuestFeatures = &pVM->cpum.s.GuestFeatures; 1215 1219 if ( pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_INTEL … … 1217 1221 || pHostFeatures->enmCpuVendor == CPUMCPUVENDOR_SHANGHAI) 1218 1222 { 1219 #define VMXFEATDUMP(a_szDesc, a_Var) \ 1223 #ifdef RT_ARCH_AMD64 1224 # define VMXFEATDUMP(a_szDesc, a_Var) \ 1220 1225 pHlp->pfnPrintf(pHlp, " %s = %u (%u)\n", a_szDesc, pGuestFeatures->a_Var, pHostFeatures->a_Var) 1226 #else 1227 # define VMXFEATDUMP(a_szDesc, a_Var) \ 1228 pHlp->pfnPrintf(pHlp, " %s = %u\n", a_szDesc, pGuestFeatures->a_Var) 1229 #endif 1221 1230 1222 1231 pHlp->pfnPrintf(pHlp, "Nested hardware virtualization - VMX features\n"); 1232 #ifdef RT_ARCH_AMD64 1223 1233 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest (host)\n"); 1234 #else 1235 pHlp->pfnPrintf(pHlp, " Mnemonic - Description = guest\n"); 1236 #endif 1224 1237 VMXFEATDUMP("VMX - Virtual-Machine Extensions ", fVmx); 1225 1238 /* Basic. */ … … 1845 1858 if (!VM_IS_HM_ENABLED(pVM) && !VM_IS_EXEC_ENGINE_IEM(pVM)) 1846 1859 pszWhy = "execution engine is neither HM nor IEM"; 1860 #ifdef RT_ARCH_AMD64 1847 1861 else if (VM_IS_HM_ENABLED(pVM) && !HMIsNestedPagingActive(pVM)) 1848 1862 pszWhy = "nested paging is not enabled for the VM or it is not supported by the host"; 1849 else if (VM_IS_HM_ENABLED(pVM) && !pVM->cpum.s.HostFeatures. fNoExecute)1863 else if (VM_IS_HM_ENABLED(pVM) && !pVM->cpum.s.HostFeatures.s.fNoExecute) 1850 1864 pszWhy = "NX is not available on the host"; 1865 #endif 1851 1866 if (pszWhy) 1852 1867 { … … 1958 1973 * by the hardware, hence we merge our emulated features with the host features below. 1959 1974 */ 1960 PCCPUMFEATURES pBaseFeat = cpumR3IsHwAssistNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures : &EmuFeat; 1961 PCPUMFEATURES pGuestFeat = &pVM->cpum.s.GuestFeatures; 1975 #ifdef RT_ARCH_AMD64 1976 PCCPUMFEATURES const pBaseFeat = cpumR3IsHwAssistNstGstExecAllowed(pVM) ? &pVM->cpum.s.HostFeatures.s : &EmuFeat; 1977 #else 1978 PCCPUMFEATURES const pBaseFeat = &EmuFeat; 1979 #endif 1980 PCPUMFEATURES const pGuestFeat = &pVM->cpum.s.GuestFeatures; 1962 1981 Assert(pBaseFeat->fVmx); 1963 1982 #define CPUMVMX_SET_GST_FEAT(a_Feat) \ … … 2186 2205 AssertCompileSizeAlignment(CPUMCTX, 64); 2187 2206 AssertCompileSizeAlignment(CPUMCTXMSRS, 64); 2207 #ifdef RT_ARCH_AMD64 2188 2208 AssertCompileSizeAlignment(CPUMHOSTCTX, 64); 2209 #endif 2189 2210 AssertCompileMemberAlignment(VM, cpum, 64); 2190 2211 AssertCompileMemberAlignment(VMCPU, cpum.s, 64); … … 2212 2233 AssertLogRelRCReturn(rc, rc); 2213 2234 2235 /* Use the host features detected by CPUMR0ModuleInit if available. */ 2236 if (pVM->cpum.s.HostFeatures.Common.enmCpuVendor != CPUMCPUVENDOR_INVALID) 2237 g_CpumHostFeatures.s = pVM->cpum.s.HostFeatures.s; 2238 else 2239 { 2214 2240 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 2215 /* Use the host features detected by CPUMR0ModuleInit if available. */2216 if (pVM->cpum.s.HostFeatures.enmCpuVendor != CPUMCPUVENDOR_INVALID)2217 g_CpumHostFeatures.s = pVM->cpum.s.HostFeatures;2218 else2219 {2220 2241 PCPUMCPUIDLEAF paLeaves; 2221 2242 uint32_t cLeaves; 2222 rc = CPUMCpuIdCollectLeaves X86(&paLeaves, &cLeaves);2243 rc = CPUMCpuIdCollectLeavesFromX86Host(&paLeaves, &cLeaves); 2223 2244 AssertLogRelRCReturn(rc, rc); 2224 2245 … … 2226 2247 RTMemFree(paLeaves); 2227 2248 AssertLogRelRCReturn(rc, rc); 2228 }2229 pVM->cpum.s.HostFeatures = g_CpumHostFeatures.s;2230 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;2231 2249 2232 2250 #elif defined(RT_ARCH_ARM64) 2251 CPUMARMV8IDREGS IdRegs = {0}; 2252 rc = CPUMCpuIdCollectIdRegistersFromArmV8Host(&IdRegs); 2253 AssertLogRelRCReturn(rc, rc); 2254 2255 rc = cpumCpuIdExplodeFeaturesArmV8(&IdRegs, &g_CpumHostFeatures.s); 2256 AssertLogRelRCReturn(rc, rc); 2257 2258 #else 2259 # error port me 2260 #endif 2261 AssertLogRelRCReturn(rc, rc); 2262 pVM->cpum.s.HostFeatures.s = g_CpumHostFeatures.s; 2263 } 2264 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.Common.enmCpuVendor; /* a bit bogus for mismatching host/guest */ 2265 2266 #if 0 /** @todo fix */ 2233 2267 /** @todo we shouldn't be using the x86/AMD64 CPUMFEATURES for HostFeatures, 2234 2268 * but it's too much work to fix that now. So, instead we just set … … 2300 2334 */ 2301 2335 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 2302 if (!pVM->cpum.s.HostFeatures. fFxSaveRstor)2336 if (!pVM->cpum.s.HostFeatures.s.fFxSaveRstor) 2303 2337 return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support the FXSAVE/FXRSTOR instruction."); 2304 if (!pVM->cpum.s.HostFeatures. fMmx)2338 if (!pVM->cpum.s.HostFeatures.s.fMmx) 2305 2339 return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support MMX."); 2306 if (!pVM->cpum.s.HostFeatures. fTsc)2340 if (!pVM->cpum.s.HostFeatures.s.fTsc) 2307 2341 return VMSetError(pVM, VERR_UNSUPPORTED_CPU, RT_SRC_POS, "Host CPU does not support RDTSC."); 2308 2342 #endif … … 2314 2348 uint64_t fXStateHostMask = 0; 2315 2349 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 2316 if ( pVM->cpum.s.HostFeatures. fXSaveRstor2317 && pVM->cpum.s.HostFeatures. fOpSysXSaveRstor)2350 if ( pVM->cpum.s.HostFeatures.s.fXSaveRstor 2351 && pVM->cpum.s.HostFeatures.s.fOpSysXSaveRstor) 2318 2352 { 2319 2353 fXStateHostMask = fXcr0Host = ASMGetXcr0(); … … 2333 2367 * Initialize the host XSAVE/XRSTOR mask. 2334 2368 */ 2335 uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.cbMaxExtendedState; 2369 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 2370 uint32_t cbMaxXState = pVM->cpum.s.HostFeatures.s.cbMaxExtendedState; 2336 2371 cbMaxXState = RT_ALIGN(cbMaxXState, 128); 2337 AssertLogRelReturn( pVM->cpum.s.HostFeatures. cbMaxExtendedState >= sizeof(X86FXSTATE)2338 && pVM->cpum.s.HostFeatures. cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.abXState)2339 && pVM->cpum.s.HostFeatures. cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.abXState)2372 AssertLogRelReturn( pVM->cpum.s.HostFeatures.s.cbMaxExtendedState >= sizeof(X86FXSTATE) 2373 && pVM->cpum.s.HostFeatures.s.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Host.abXState) 2374 && pVM->cpum.s.HostFeatures.s.cbMaxExtendedState <= sizeof(pVM->apCpusR3[0]->cpum.s.Guest.abXState) 2340 2375 , VERR_CPUM_IPE_2); 2376 #endif 2341 2377 2342 2378 for (VMCPUID i = 0; i < pVM->cCpus; i++) 2343 2379 { 2344 2380 PVMCPU pVCpu = pVM->apCpusR3[i]; 2345 2381 RT_NOREF(pVCpu); 2382 2383 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 2346 2384 pVCpu->cpum.s.Host.fXStateMask = fXStateHostMask; 2385 #endif 2386 #ifdef VBOX_VMM_TARGET_X86 2347 2387 pVCpu->cpum.s.hNestedVmxPreemptTimer = NIL_TMTIMERHANDLE; 2388 #endif 2348 2389 } 2349 2390 … … 2369 2410 DBGFR3InfoRegisterInternalEx(pVM, "cpumhyper", "Displays the hypervisor cpu state.", 2370 2411 &cpumR3InfoHyper, DBGFINFO_FLAGS_ALL_EMTS); 2412 #ifdef RT_ARCH_AMD64 2371 2413 DBGFR3InfoRegisterInternalEx(pVM, "cpumhost", "Displays the host cpu state.", 2372 2414 &cpumR3InfoHost, DBGFINFO_FLAGS_ALL_EMTS); 2415 #endif 2373 2416 DBGFR3InfoRegisterInternalEx(pVM, "cpumguestinstr", "Displays the current guest instruction.", 2374 2417 &cpumR3InfoGuestInstr, DBGFINFO_FLAGS_ALL_EMTS); … … 2573 2616 2574 2617 pCtx->aXcr[0] = XSAVE_C_X87; 2575 if (pVM->cpum.s.HostFeatures.cbMaxExtendedState >= RT_UOFFSETOF(X86XSAVEAREA, Hdr)) 2618 #ifdef RT_ARCH_AMD64 /** @todo x86-on-ARM64: recheck this! */ 2619 if (pVM->cpum.s.HostFeatures.s.cbMaxExtendedState >= RT_UOFFSETOF(X86XSAVEAREA, Hdr)) 2620 #endif 2576 2621 { 2577 2622 /* The entire FXSAVE state needs loading when we switch to XSAVE/XRSTOR … … 3236 3281 3237 3282 /* Load CPUID and explode guest features. */ 3238 rc = cpumR3LoadCpuId (pVM, pSSM, uVersion, &GuestMsrs);3283 rc = cpumR3LoadCpuIdX86(pVM, pSSM, uVersion, &GuestMsrs); 3239 3284 if (fVmxGstFeat) 3240 3285 { … … 4380 4425 cpumR3InfoGuestHwvirt(pVM, pHlp, pszArgs); 4381 4426 cpumR3InfoHyper(pVM, pHlp, pszArgs); 4427 #ifdef RT_ARCH_AMD64 4382 4428 cpumR3InfoHost(pVM, pHlp, pszArgs); 4429 #endif 4383 4430 } 4384 4431 … … 5048 5095 5049 5096 5097 #ifdef RT_ARCH_AMD64 5050 5098 /** 5051 5099 * Display the host cpu state. … … 5110 5158 pCtx->FSbase, pCtx->GSbase, pCtx->efer); 5111 5159 } 5160 #endif /* RT_ARCH_AMD64 */ 5161 5112 5162 5113 5163 /** … … 5397 5447 LogRel(("******************** End of CPUID dump **********************\n")); 5398 5448 5449 #ifdef RT_ARCH_AMD64 5399 5450 /* 5400 5451 * Log VT-x extended features. … … 5403 5454 * to do here for SVM. 5404 5455 */ 5405 if (pVM->cpum.s.HostFeatures. fVmx)5456 if (pVM->cpum.s.HostFeatures.s.fVmx) 5406 5457 { 5407 5458 LogRel(("*********************** VT-x features ***********************\n")); … … 5410 5461 LogRel(("******************* End of VT-x features ********************\n")); 5411 5462 } 5463 #endif 5412 5464 5413 5465 /* -
trunk/src/VBox/VMM/VMMR3/CPUMDbg.cpp
r107113 r107650 1276 1276 * @param pVM The cross context VM structure. 1277 1277 */ 1278 intcpumR3DbgInit(PVM pVM)1278 DECLHIDDEN(int) cpumR3DbgInit(PVM pVM) 1279 1279 { 1280 1280 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId-armv8.cpp
r106061 r107650 93 93 /** Pointer to CPUID config (from CFGM). */ 94 94 typedef CPUMCPUIDCONFIG *PCPUMCPUIDCONFIG; 95 96 97 /**98 * Explode the CPU features from the given ID registers.99 *100 * @returns VBox status code.101 * @param pIdRegs The ID registers to explode the features from.102 * @param pFeatures Where to store the features to.103 */104 static int cpumCpuIdExplodeFeatures(PCCPUMIDREGS pIdRegs, PCPUMFEATURES pFeatures)105 {106 uint64_t u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;107 108 static uint8_t s_aPaRange[] = { 32, 36, 40, 42, 44, 48, 52 };109 AssertLogRelMsgReturn(RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE) < RT_ELEMENTS(s_aPaRange),110 ("CPUM: Invalid/Unsupported PARange value in ID_AA64MMFR0_EL1 register: %u\n",111 RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)),112 VERR_CPUM_IPE_1);113 114 pFeatures->cMaxPhysAddrWidth = s_aPaRange[RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_PARANGE)];115 pFeatures->fTGran4K = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN4) != ARMV8_ID_AA64MMFR0_EL1_TGRAN4_NOT_IMPL;116 pFeatures->fTGran16K = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN16) != ARMV8_ID_AA64MMFR0_EL1_TGRAN16_NOT_IMPL;117 pFeatures->fTGran64K = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_TGRAN64) != ARMV8_ID_AA64MMFR0_EL1_TGRAN64_NOT_IMPL;118 119 /* ID_AA64ISAR0_EL1 features. */120 u64IdReg = pIdRegs->u64RegIdAa64Isar0El1;121 pFeatures->fAes = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES) >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED;122 pFeatures->fPmull = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_AES) >= ARMV8_ID_AA64ISAR0_EL1_AES_SUPPORTED_PMULL;123 pFeatures->fSha1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA1) >= ARMV8_ID_AA64ISAR0_EL1_SHA1_SUPPORTED;124 pFeatures->fSha256 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2) >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256;125 pFeatures->fSha512 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA2) >= ARMV8_ID_AA64ISAR0_EL1_SHA2_SUPPORTED_SHA256_SHA512;126 pFeatures->fCrc32 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_CRC32) >= ARMV8_ID_AA64ISAR0_EL1_CRC32_SUPPORTED;127 pFeatures->fLse = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_ATOMIC) >= ARMV8_ID_AA64ISAR0_EL1_ATOMIC_SUPPORTED;128 pFeatures->fTme = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TME) >= ARMV8_ID_AA64ISAR0_EL1_TME_SUPPORTED;129 pFeatures->fRdm = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RDM) >= ARMV8_ID_AA64ISAR0_EL1_RDM_SUPPORTED;130 pFeatures->fSha3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SHA3) >= ARMV8_ID_AA64ISAR0_EL1_SHA3_SUPPORTED;131 pFeatures->fSm3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM3) >= ARMV8_ID_AA64ISAR0_EL1_SM3_SUPPORTED;132 pFeatures->fSm4 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_SM4) >= ARMV8_ID_AA64ISAR0_EL1_SM4_SUPPORTED;133 pFeatures->fDotProd = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_DP) >= ARMV8_ID_AA64ISAR0_EL1_DP_SUPPORTED;134 pFeatures->fFhm = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_FHM) >= ARMV8_ID_AA64ISAR0_EL1_FHM_SUPPORTED;135 pFeatures->fFlagM = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS) >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED;136 pFeatures->fFlagM2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TS) >= ARMV8_ID_AA64ISAR0_EL1_TS_SUPPORTED_2;137 pFeatures->fTlbios = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB) >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED;138 pFeatures->fTlbirange = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_TLB) >= ARMV8_ID_AA64ISAR0_EL1_TLB_SUPPORTED_RANGE;139 pFeatures->fRng = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR0_EL1_RNDR) >= ARMV8_ID_AA64ISAR0_EL1_RNDR_SUPPORTED;140 141 /* ID_AA64ISAR1_EL1 features. */142 u64IdReg = pIdRegs->u64RegIdAa64Isar1El1;143 pFeatures->fDpb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB) >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED;144 pFeatures->fDpb2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DPB) >= ARMV8_ID_AA64ISAR1_EL1_DPB_SUPPORTED_2;145 146 /* PAuth using QARMA5. */147 pFeatures->fPacQarma5 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) != ARMV8_ID_AA64ISAR1_EL1_APA_NOT_IMPL;148 if (pFeatures->fPacQarma5)149 {150 pFeatures->fPAuth = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH;151 pFeatures->fEpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_EPAC;152 pFeatures->fPAuth2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_PAUTH2;153 pFeatures->fFpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPAC;154 pFeatures->fFpacCombine = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_APA) >= ARMV8_ID_AA64ISAR1_EL1_APA_SUPPORTED_FPACCOMBINE;155 }156 157 /* PAuth using implementation defined algorithm. */158 pFeatures->fPacImp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) != ARMV8_ID_AA64ISAR1_EL1_API_NOT_IMPL;159 if (pFeatures->fPacQarma5)160 {161 pFeatures->fPAuth = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH;162 pFeatures->fEpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_EPAC;163 pFeatures->fPAuth2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_PAUTH2;164 pFeatures->fFpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPAC;165 pFeatures->fFpacCombine = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_API) >= ARMV8_ID_AA64ISAR1_EL1_API_SUPPORTED_FPACCOMBINE;166 }167 168 pFeatures->fJscvt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FJCVTZS) >= ARMV8_ID_AA64ISAR1_EL1_FJCVTZS_SUPPORTED;169 pFeatures->fFcma = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FCMA) >= ARMV8_ID_AA64ISAR1_EL1_FCMA_SUPPORTED;170 pFeatures->fLrcpc = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC) >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED;171 pFeatures->fLrcpc2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LRCPC) >= ARMV8_ID_AA64ISAR1_EL1_LRCPC_SUPPORTED_2;172 pFeatures->fFrintts = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_FRINTTS) >= ARMV8_ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED;173 pFeatures->fSb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SB) >= ARMV8_ID_AA64ISAR1_EL1_SB_SUPPORTED;174 pFeatures->fSpecres = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_SPECRES) >= ARMV8_ID_AA64ISAR1_EL1_SPECRES_SUPPORTED;175 pFeatures->fBf16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16) >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_BF16;176 pFeatures->fEbf16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_BF16) >= ARMV8_ID_AA64ISAR1_EL1_BF16_SUPPORTED_EBF16;177 pFeatures->fDgh = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_DGH) >= ARMV8_ID_AA64ISAR1_EL1_DGH_SUPPORTED;178 pFeatures->fI8mm = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_I8MM) >= ARMV8_ID_AA64ISAR1_EL1_I8MM_SUPPORTED;179 pFeatures->fXs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_XS) >= ARMV8_ID_AA64ISAR1_EL1_XS_SUPPORTED;180 pFeatures->fLs64 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64) >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED;181 pFeatures->fLs64V = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64) >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_V;182 pFeatures->fLs64Accdata = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR1_EL1_LS64) >= ARMV8_ID_AA64ISAR1_EL1_LS64_SUPPORTED_ACCDATA;183 184 /* ID_AA64ISAR2_EL1 features. */185 u64IdReg = pIdRegs->u64RegIdAa64Isar2El1;186 pFeatures->fWfxt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_WFXT) >= ARMV8_ID_AA64ISAR2_EL1_WFXT_SUPPORTED;187 pFeatures->fRpres = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_RPRES) >= ARMV8_ID_AA64ISAR2_EL1_RPRES_SUPPORTED;188 189 /* PAuth using QARMA3. */190 pFeatures->fPacQarma3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_GPA3) >= ARMV8_ID_AA64ISAR2_EL1_GPA3_SUPPORTED;191 pFeatures->fPacQarma3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) != ARMV8_ID_AA64ISAR2_EL1_APA3_NOT_IMPL;192 if (pFeatures->fPacQarma5)193 {194 pFeatures->fPAuth = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH;195 pFeatures->fEpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_EPAC;196 pFeatures->fPAuth2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_PAUTH2;197 pFeatures->fFpac = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPAC;198 pFeatures->fFpacCombine = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_APA3) >= ARMV8_ID_AA64ISAR2_EL1_APA3_SUPPORTED_FPACCOMBINE;199 }200 201 pFeatures->fMops = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_MOPS) >= ARMV8_ID_AA64ISAR2_EL1_MOPS_SUPPORTED;202 pFeatures->fHbc = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_BC) >= ARMV8_ID_AA64ISAR2_EL1_BC_SUPPORTED;203 pFeatures->fConstPacField = RT_BF_GET(u64IdReg, ARMV8_ID_AA64ISAR2_EL1_PACFRAC) >= ARMV8_ID_AA64ISAR2_EL1_PACFRAC_TRUE;204 205 /* ID_AA64PFR0_EL1 */206 u64IdReg = pIdRegs->u64RegIdAa64Pfr0El1;207 /* The FP and AdvSIMD field must have the same value. */208 Assert(RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) == RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD));209 pFeatures->fFp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) != ARMV8_ID_AA64PFR0_EL1_FP_NOT_IMPL;210 pFeatures->fFp16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_FP) == ARMV8_ID_AA64PFR0_EL1_FP_IMPL_SP_DP_HP;211 pFeatures->fAdvSimd = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD) != ARMV8_ID_AA64PFR0_EL1_ADVSIMD_NOT_IMPL;212 pFeatures->fFp16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_ADVSIMD) == ARMV8_ID_AA64PFR0_EL1_ADVSIMD_IMPL_SP_DP_HP;213 pFeatures->fRas = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS) >= ARMV8_ID_AA64PFR0_EL1_RAS_SUPPORTED;214 pFeatures->fRasV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RAS) >= ARMV8_ID_AA64PFR0_EL1_RAS_V1P1;215 pFeatures->fSve = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SVE) >= ARMV8_ID_AA64PFR0_EL1_SVE_SUPPORTED;216 pFeatures->fSecEl2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_SEL2) >= ARMV8_ID_AA64PFR0_EL1_SEL2_SUPPORTED;217 pFeatures->fAmuV1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU) >= ARMV8_ID_AA64PFR0_EL1_AMU_V1;218 pFeatures->fAmuV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_AMU) >= ARMV8_ID_AA64PFR0_EL1_AMU_V1P1;219 pFeatures->fDit = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_DIT) >= ARMV8_ID_AA64PFR0_EL1_DIT_SUPPORTED;220 pFeatures->fRme = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_RME) >= ARMV8_ID_AA64PFR0_EL1_RME_SUPPORTED;221 pFeatures->fCsv2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2) >= ARMV8_ID_AA64PFR0_EL1_CSV2_SUPPORTED;222 pFeatures->fCsv2v3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR0_EL1_CSV2) >= ARMV8_ID_AA64PFR0_EL1_CSV2_3_SUPPORTED;223 224 /* ID_AA64PFR1_EL1 */225 u64IdReg = pIdRegs->u64RegIdAa64Pfr1El1;226 pFeatures->fBti = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_BT) >= ARMV8_ID_AA64PFR1_EL1_BT_SUPPORTED;227 pFeatures->fSsbs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS) >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED;228 pFeatures->fSsbs2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SSBS) >= ARMV8_ID_AA64PFR1_EL1_SSBS_SUPPORTED_MSR_MRS;229 pFeatures->fMte = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE) >= ARMV8_ID_AA64PFR1_EL1_MTE_INSN_ONLY;230 pFeatures->fMte2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE) >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL;231 pFeatures->fMte3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_MTE) >= ARMV8_ID_AA64PFR1_EL1_MTE_FULL_ASYM_TAG_FAULT_CHK;232 /** @todo RAS_frac, MPAM_frac, CSV2_frac. */233 pFeatures->fSme = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME) >= ARMV8_ID_AA64PFR1_EL1_SME_SUPPORTED;234 pFeatures->fSme2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_SME) >= ARMV8_ID_AA64PFR1_EL1_SME_SME2;235 pFeatures->fRngTrap = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_RNDRTRAP) >= ARMV8_ID_AA64PFR1_EL1_RNDRTRAP_SUPPORTED;236 pFeatures->fNmi = RT_BF_GET(u64IdReg, ARMV8_ID_AA64PFR1_EL1_NMI) >= ARMV8_ID_AA64PFR1_EL1_NMI_SUPPORTED;237 238 /* ID_AA64MMFR0_EL1 */239 u64IdReg = pIdRegs->u64RegIdAa64Mmfr0El1;240 pFeatures->fExs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_EXS) >= ARMV8_ID_AA64MMFR0_EL1_EXS_SUPPORTED;241 pFeatures->fFgt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_FGT) >= ARMV8_ID_AA64MMFR0_EL1_FGT_SUPPORTED;242 pFeatures->fEcv = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR0_EL1_ECV) >= ARMV8_ID_AA64MMFR0_EL1_ECV_SUPPORTED;243 244 /* ID_AA64MMFR1_EL1 */245 u64IdReg = pIdRegs->u64RegIdAa64Mmfr1El1;246 pFeatures->fHafdbs = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HAFDBS) >= ARMV8_ID_AA64MMFR1_EL1_HAFDBS_SUPPORTED;247 pFeatures->fVmid16 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VMIDBITS) >= ARMV8_ID_AA64MMFR1_EL1_VMIDBITS_16;248 pFeatures->fVhe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_VHE) >= ARMV8_ID_AA64MMFR1_EL1_VHE_SUPPORTED;249 pFeatures->fHpds = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS) >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED;250 pFeatures->fHpds2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HPDS) >= ARMV8_ID_AA64MMFR1_EL1_HPDS_SUPPORTED_2;251 pFeatures->fLor = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_LO) >= ARMV8_ID_AA64MMFR1_EL1_LO_SUPPORTED;252 pFeatures->fPan = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN) >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED;253 pFeatures->fPan2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN) >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_2;254 pFeatures->fPan3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_PAN) >= ARMV8_ID_AA64MMFR1_EL1_PAN_SUPPORTED_3;255 pFeatures->fXnx = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_XNX) >= ARMV8_ID_AA64MMFR1_EL1_XNX_SUPPORTED;256 pFeatures->fTwed = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TWED) >= ARMV8_ID_AA64MMFR1_EL1_TWED_SUPPORTED;257 pFeatures->fEts2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_ETS) >= ARMV8_ID_AA64MMFR1_EL1_ETS_SUPPORTED;258 pFeatures->fHcx = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_HCX) >= ARMV8_ID_AA64MMFR1_EL1_HCX_SUPPORTED;259 pFeatures->fAfp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_AFP) >= ARMV8_ID_AA64MMFR1_EL1_AFP_SUPPORTED;260 pFeatures->fNTlbpa = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_NTLBPA) >= ARMV8_ID_AA64MMFR1_EL1_NTLBPA_INCLUDE_COHERENT_ONLY;261 pFeatures->fTidcp1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_TIDCP1) >= ARMV8_ID_AA64MMFR1_EL1_TIDCP1_SUPPORTED;262 pFeatures->fCmow = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR1_EL1_CMOW) >= ARMV8_ID_AA64MMFR1_EL1_CMOW_SUPPORTED;263 264 /* ID_AA64MMFR2_EL1 */265 u64IdReg = pIdRegs->u64RegIdAa64Mmfr2El1;266 pFeatures->fTtcnp = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CNP) >= ARMV8_ID_AA64MMFR2_EL1_CNP_SUPPORTED;267 pFeatures->fUao = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_UAO) >= ARMV8_ID_AA64MMFR2_EL1_UAO_SUPPORTED;268 pFeatures->fLsmaoc = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_LSM) >= ARMV8_ID_AA64MMFR2_EL1_LSM_SUPPORTED;269 pFeatures->fIesb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IESB) >= ARMV8_ID_AA64MMFR2_EL1_IESB_SUPPORTED;270 pFeatures->fLva = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_VARANGE) >= ARMV8_ID_AA64MMFR2_EL1_VARANGE_52BITS_64KB_GRAN;271 pFeatures->fCcidx = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_CCIDX) >= ARMV8_ID_AA64MMFR2_EL1_CCIDX_64BIT;272 pFeatures->fNv = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV) >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED;273 pFeatures->fNv2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_NV) >= ARMV8_ID_AA64MMFR2_EL1_NV_SUPPORTED_2;274 pFeatures->fTtst = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_ST) >= ARMV8_ID_AA64MMFR2_EL1_ST_SUPPORTED;275 pFeatures->fLse2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_AT) >= ARMV8_ID_AA64MMFR2_EL1_AT_SUPPORTED;276 pFeatures->fIdst = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_IDS) >= ARMV8_ID_AA64MMFR2_EL1_IDS_EC_18H;277 pFeatures->fS2Fwb = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_FWB) >= ARMV8_ID_AA64MMFR2_EL1_FWB_SUPPORTED;278 pFeatures->fTtl = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_TTL) >= ARMV8_ID_AA64MMFR2_EL1_TTL_SUPPORTED;279 pFeatures->fEvt = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_EVT) >= ARMV8_ID_AA64MMFR2_EL1_EVT_SUPPORTED;280 pFeatures->fE0Pd = RT_BF_GET(u64IdReg, ARMV8_ID_AA64MMFR2_EL1_E0PD) >= ARMV8_ID_AA64MMFR2_EL1_E0PD_SUPPORTED;281 282 /* ID_AA64DFR0_EL1 */283 u64IdReg = pIdRegs->u64RegIdAa64Dfr0El1;284 pFeatures->fDebugV8p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8_VHE;285 pFeatures->fDebugV8p2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p2;286 pFeatures->fDebugV8p4 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p4;287 pFeatures->fDebugV8p8 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DEBUGVER) >= ARMV8_ID_AA64DFR0_EL1_DEBUGVER_ARMV8p8;288 pFeatures->fPmuV3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3;289 pFeatures->fPmuV3p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P1;290 pFeatures->fPmuV3p4 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P4;291 pFeatures->fPmuV3p5 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P5;292 pFeatures->fPmuV3p7 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P7;293 pFeatures->fPmuV3p8 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMUVER) >= ARMV8_ID_AA64DFR0_EL1_PMUVER_SUPPORTED_V3P8;294 pFeatures->fSpe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED;295 pFeatures->fSpeV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P1;296 pFeatures->fSpeV1p2 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P2;297 pFeatures->fSpeV1p3 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_PMSVER) >= ARMV8_ID_AA64DFR0_EL1_PMSVER_SUPPORTED_V1P3;298 pFeatures->fDoubleLock = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK) == ARMV8_ID_AA64DFR0_EL1_DOUBLELOCK_SUPPORTED;299 pFeatures->fTrf = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEFILT) >= ARMV8_ID_AA64DFR0_EL1_TRACEFILT_SUPPORTED;300 pFeatures->fTrbe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER) >= ARMV8_ID_AA64DFR0_EL1_TRACEBUFFER_SUPPORTED;301 pFeatures->fMtPmu = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_MTPMU) == ARMV8_ID_AA64DFR0_EL1_MTPMU_SUPPORTED;302 pFeatures->fBrbe = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE) >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED;303 pFeatures->fBrbeV1p1 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_BRBE) >= ARMV8_ID_AA64DFR0_EL1_BRBE_SUPPORTED_V1P1;304 pFeatures->fHpmn0 = RT_BF_GET(u64IdReg, ARMV8_ID_AA64DFR0_EL1_HPMN0) >= ARMV8_ID_AA64DFR0_EL1_HPMN0_SUPPORTED;305 306 return VINF_SUCCESS;307 }308 95 309 96 … … 348 135 /* The CPUID entries we start with here isn't necessarily the ones of the host, so we 349 136 must consult HostFeatures when processing CPUMISAEXTCFG variables. */ 350 PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures ;137 PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures.s; 351 138 #define PASSTHRU_FEATURE(a_IdReg, enmConfig, fHostFeature, a_IdRegNm, a_IdRegValSup, a_IdRegValNotSup) \ 352 139 (a_IdReg) = ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) \ … … 588 375 * on the VM config. 589 376 */ 590 VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUM IDREGS pIdRegs)377 VMMR3DECL(int) CPUMR3PopulateFeaturesByIdRegisters(PVM pVM, PCCPUMARMV8IDREGS pIdRegs) 591 378 { 592 379 /* Set the host features from the given ID registers. */ 593 int rc = cpumCpuIdExplodeFeatures (pIdRegs, &g_CpumHostFeatures.s);380 int rc = cpumCpuIdExplodeFeaturesArmV8(pIdRegs, &g_CpumHostFeatures.s); 594 381 AssertRCReturn(rc, rc); 595 382 596 pVM->cpum.s.HostFeatures 597 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures. enmCpuVendor;383 pVM->cpum.s.HostFeatures.s = g_CpumHostFeatures.s; 384 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.Common.enmCpuVendor; 598 385 pVM->cpum.s.HostIdRegs = *pIdRegs; 599 386 pVM->cpum.s.GuestIdRegs = *pIdRegs; … … 631 418 */ 632 419 if (RT_SUCCESS(rc)) 633 rc = cpumCpuIdExplodeFeatures (pIdRegs, &pCpum->GuestFeatures);420 rc = cpumCpuIdExplodeFeaturesArmV8(pIdRegs, &pCpum->GuestFeatures); 634 421 635 422 /* … … 650 437 * @param ppIdRegs Where to store the pointer to the guest ID register struct. 651 438 */ 652 VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUM IDREGS *ppIdRegs)439 VMMR3_INT_DECL(int) CPUMR3QueryGuestIdRegs(PVM pVM, PCCPUMARMV8IDREGS *ppIdRegs) 653 440 { 654 441 AssertPtrReturn(ppIdRegs, VERR_INVALID_POINTER); … … 668 455 * 669 456 */ 670 /** Saved state field descriptors for CPUM IDREGS. */671 static const SSMFIELD g_aCpum IdRegsFields[] =672 { 673 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Pfr0El1),674 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Pfr1El1),675 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Dfr0El1),676 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Dfr1El1),677 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Afr0El1),678 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Afr1El1),679 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Isar0El1),680 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Isar1El1),681 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Isar2El1),682 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Mmfr0El1),683 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Mmfr1El1),684 SSMFIELD_ENTRY(CPUM IDREGS, u64RegIdAa64Mmfr2El1),685 SSMFIELD_ENTRY(CPUM IDREGS, u64RegClidrEl1),686 SSMFIELD_ENTRY(CPUM IDREGS, u64RegCtrEl0),687 SSMFIELD_ENTRY(CPUM IDREGS, u64RegDczidEl0),457 /** Saved state field descriptors for CPUMARMV8IDREGS. */ 458 static const SSMFIELD g_aCpumArmV8IdRegsFields[] = 459 { 460 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1), 461 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1), 462 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1), 463 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1), 464 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Afr0El1), 465 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Afr1El1), 466 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1), 467 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1), 468 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Isar2El1), 469 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1), 470 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1), 471 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1), 472 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegClidrEl1), 473 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegCtrEl0), 474 SSMFIELD_ENTRY(CPUMARMV8IDREGS, u64RegDczidEl0), 688 475 SSMFIELD_ENTRY_TERM() 689 476 }; … … 701 488 * Save all the CPU ID leaves. 702 489 */ 703 SSMR3PutStructEx(pSSM, &pVM->cpum.s.GuestIdRegs, sizeof(pVM->cpum.s.GuestIdRegs), 0, g_aCpum IdRegsFields, NULL);490 SSMR3PutStructEx(pSSM, &pVM->cpum.s.GuestIdRegs, sizeof(pVM->cpum.s.GuestIdRegs), 0, g_aCpumArmV8IdRegsFields, NULL); 704 491 } 705 492 … … 714 501 * @param pGuestIdRegs The guest ID register as loaded from the saved state. 715 502 */ 716 static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUM IDREGS pGuestIdRegs)503 static int cpumR3LoadCpuIdInner(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMARMV8IDREGS pGuestIdRegs) 717 504 { 718 505 /* … … 926 713 927 714 /** 928 * Loads the CPU ID leaves saved by pass 0 .715 * Loads the CPU ID leaves saved by pass 0, ARMv8 targets. 929 716 * 930 717 * @returns VBox status code. … … 933 720 * @param uVersion The format version. 934 721 */ 935 int cpumR3LoadCpuId (PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)936 { 937 CPUM IDREGS GuestIdRegs;938 int rc = SSMR3GetStructEx(pSSM, &GuestIdRegs, sizeof(GuestIdRegs), 0, g_aCpum IdRegsFields, NULL);722 int cpumR3LoadCpuIdArmV8(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion) 723 { 724 CPUMARMV8IDREGS GuestIdRegs; 725 int rc = SSMR3GetStructEx(pSSM, &GuestIdRegs, sizeof(GuestIdRegs), 0, g_aCpumArmV8IdRegsFields, NULL); 939 726 AssertRCReturn(rc, rc); 940 727 … … 1434 1221 do { \ 1435 1222 if (fVerbose) \ 1436 pHlp->pfnPrintf(pHlp, " %*s = %u (%u)\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag, pVM->cpum.s.HostFeatures. a_Flag); \1223 pHlp->pfnPrintf(pHlp, " %*s = %u (%u)\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag, pVM->cpum.s.HostFeatures.s.a_Flag); \ 1437 1224 else \ 1438 1225 pHlp->pfnPrintf(pHlp, " %*s = %u\n", 41, #a_FeatNm, pVM->cpum.s.GuestFeatures.a_Flag); \ -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r107570 r107650 1335 1335 /* The CPUID entries we start with here isn't necessarily the ones of the host, so we 1336 1336 must consult HostFeatures when processing CPUMISAEXTCFG variables. */ 1337 PCCPUMFEATURES pHstFeat = &pCpum->HostFeatures; 1337 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) 1338 PCCPUMFEATURES const pHstFeat = &pCpum->HostFeatures.s; 1339 #else 1340 PCCPUMFEATURES const pHstFeat = &pCpum->GuestFeatures; 1341 #endif 1338 1342 #define PASSTHRU_FEATURE(enmConfig, fHostFeature, fConst) \ 1339 1343 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) ? (fConst) : 0) 1340 1344 #define PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, fAndExpr, fConst) \ 1341 1345 ((enmConfig) && ((enmConfig) == CPUMISAEXTCFG_ENABLED_ALWAYS || (fHostFeature)) && (fAndExpr) ? (fConst) : 0) 1346 #define PASSTHRU_FEATURE_NOT_IEM(enmConfig, fHostFeature, fConst) \ 1347 PASSTHRU_FEATURE_EX(enmConfig, fHostFeature, !VM_IS_EXEC_ENGINE_IEM(pVM), fConst) 1342 1348 #define PASSTHRU_FEATURE_TODO(enmConfig, fConst) ((enmConfig) ? (fConst) : 0) 1343 1349 … … 1406 1412 //| X86_CPUID_FEATURE_ECX_TPRUPDATE 1407 1413 //| X86_CPUID_FEATURE_ECX_PDCM - not implemented yet. 1408 | PASSTHRU_FEATURE (pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID)1414 | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmPcid, pHstFeat->fPcid, X86_CPUID_FEATURE_ECX_PCID) 1409 1415 //| X86_CPUID_FEATURE_ECX_DCA - not implemented yet. 1410 1416 | PASSTHRU_FEATURE(pConfig->enmSse41, pHstFeat->fSse41, X86_CPUID_FEATURE_ECX_SSE4_1) … … 1866 1872 | X86_CPUID_STEXT_FEATURE_EBX_BMI2 1867 1873 //| X86_CPUID_STEXT_FEATURE_EBX_ERMS RT_BIT(9) 1868 | PASSTHRU_FEATURE (pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID)1874 | PASSTHRU_FEATURE_NOT_IEM(pConfig->enmInvpcid, pHstFeat->fInvpcid, X86_CPUID_STEXT_FEATURE_EBX_INVPCID) 1869 1875 //| X86_CPUID_STEXT_FEATURE_EBX_RTM RT_BIT(11) 1870 1876 //| X86_CPUID_STEXT_FEATURE_EBX_PQM RT_BIT(12) … … 2791 2797 AssertLogRelRCReturn(rc, rc); 2792 2798 2799 #ifdef RT_ARCH_AMD64 /** @todo next VT-x/AMD-V on non-AMD64 hosts */ 2793 2800 bool fQueryNestedHwvirt = false 2794 2801 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2795 || pVM->cpum.s.HostFeatures. enmCpuVendor == CPUMCPUVENDOR_AMD2796 || pVM->cpum.s.HostFeatures. enmCpuVendor == CPUMCPUVENDOR_HYGON2802 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_AMD 2803 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_HYGON 2797 2804 #endif 2798 2805 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 2799 || pVM->cpum.s.HostFeatures. enmCpuVendor == CPUMCPUVENDOR_INTEL2800 || pVM->cpum.s.HostFeatures. enmCpuVendor == CPUMCPUVENDOR_VIA2806 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_INTEL 2807 || pVM->cpum.s.HostFeatures.s.enmCpuVendor == CPUMCPUVENDOR_VIA 2801 2808 #endif 2802 2809 ; … … 2823 2830 } 2824 2831 } 2832 #endif /** @todo */ 2825 2833 2826 2834 /* … … 2898 2906 AssertLogRelRCReturn(rc, rc); 2899 2907 2900 bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.fXSaveRstor 2901 && pVM->cpum.s.HostFeatures.fOpSysXSaveRstor 2908 #ifdef RT_ARCH_AMD64 2909 bool const fMayHaveXSave = pVM->cpum.s.HostFeatures.s.fXSaveRstor 2910 && pVM->cpum.s.HostFeatures.s.fOpSysXSaveRstor 2902 2911 && ( VM_IS_NEM_ENABLED(pVM) 2903 2912 ? NEMHCGetFeatures(pVM) & NEM_FEAT_F_XSAVE_XRSTOR … … 2906 2915 : fNestedPagingAndFullGuestExec); 2907 2916 uint64_t const fXStateHostMask = pVM->cpum.s.fXStateHostMask; 2917 #else 2918 bool const fMayHaveXSave = true; 2919 uint64_t const fXStateHostMask = XSAVE_C_YMM | XSAVE_C_SSE | XSAVE_C_X87; 2920 #endif 2908 2921 2909 2922 /** @cfgm{/CPUM/IsaExts/XSAVE, boolean, depends} … … 3289 3302 { 3290 3303 #ifdef RT_ARCH_AMD64 3291 Assert(pVM->cpum.s.HostFeatures. fMtrr);3304 Assert(pVM->cpum.s.HostFeatures.s.fMtrr); 3292 3305 #endif 3293 3306 … … 3695 3708 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 3696 3709 # define CHECK_X86_HOST_FEATURE_RET(a_fFeature, a_szFeature) \ 3697 if (!pVM->cpum.s.HostFeatures. a_fFeature) \3710 if (!pVM->cpum.s.HostFeatures.s. a_fFeature) \ 3698 3711 { \ 3699 3712 LogRel(("CPUM: WARNING! Can't turn on " a_szFeature " when the host doesn't support it!\n")); \ … … 3875 3888 /* Valid for both Intel and AMD. */ 3876 3889 pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx = pLeaf->uEdx |= X86_CPUID_EXT_FEATURE_EDX_RDTSCP; 3877 pVM->cpum.s. HostFeatures.fRdTscP = 1;3890 pVM->cpum.s.GuestFeatures.fRdTscP = 1; 3878 3891 LogRel(("CPUM: SetGuestCpuIdFeature: Enabled RDTSCP.\n")); 3879 3892 break; … … 3900 3913 #ifdef RT_ARCH_AMD64 3901 3914 if ( !pLeaf 3902 || !(pVM->cpum.s.HostFeatures. fIbpb || pVM->cpum.s.HostFeatures.fIbrs))3915 || !(pVM->cpum.s.HostFeatures.s.fIbpb || pVM->cpum.s.HostFeatures.s.fIbrs)) 3903 3916 { 3904 3917 LogRel(("CPUM: WARNING! Can't turn on Speculation Control when the host doesn't support it!\n")); … … 3918 3931 #ifdef RT_ARCH_AMD64 3919 3932 /* We will only expose STIBP if IBRS is present to keep things simpler (simple is not an option). */ 3920 if (pVM->cpum.s.HostFeatures. fIbrs)3933 if (pVM->cpum.s.HostFeatures.s.fIbrs) 3921 3934 #endif 3922 3935 { … … 3924 3937 pVM->cpum.s.GuestFeatures.fIbrs = 1; 3925 3938 #ifdef RT_ARCH_AMD64 3926 if (pVM->cpum.s.HostFeatures. fStibp)3939 if (pVM->cpum.s.HostFeatures.s.fStibp) 3927 3940 #endif 3928 3941 { … … 3965 3978 3966 3979 #ifdef RT_ARCH_AMD64 3967 if (pVM->cpum.s.HostFeatures. fArchCap)3980 if (pVM->cpum.s.HostFeatures.s.fArchCap) 3968 3981 #endif 3969 3982 { … … 3984 3997 3985 3998 /* Advertise IBRS_ALL if present at this point... */ 3986 if (pVM->cpum.s.HostFeatures.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL) 3999 #ifdef RT_ARCH_AMD64 4000 if (pVM->cpum.s.HostFeatures.s.fArchCap & MSR_IA32_ARCH_CAP_F_IBRS_ALL) 4001 #endif 3987 4002 VMCC_FOR_EACH_VMCPU_STMT(pVM, pVCpu->cpum.s.GuestMsrs.msr.ArchCaps |= MSR_IA32_ARCH_CAP_F_IBRS_ALL); 3988 4003 } … … 4956 4971 4957 4972 /** 4958 * Loads the CPU ID leaves saved by pass 0 .4973 * Loads the CPU ID leaves saved by pass 0, x86 targets. 4959 4974 * 4960 4975 * @returns VBox status code. … … 4964 4979 * @param pMsrs The guest MSRs. 4965 4980 */ 4966 int cpumR3LoadCpuId (PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs)4981 int cpumR3LoadCpuIdX86(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pMsrs) 4967 4982 { 4968 4983 AssertMsgReturn(uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2, ("%u\n", uVersion), VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION); -
trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
r106630 r107650 901 901 if (RT_FAILURE(rc)) 902 902 return rc; 903 rc = CPUMCpuIdCollectLeaves X86(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves);903 rc = CPUMCpuIdCollectLeavesFromX86Host(&pInfo->paCpuIdLeavesR3, &pInfo->cCpuIdLeaves); 904 904 if (RT_FAILURE(rc)) 905 905 return rc; -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp
r107316 r107650 572 572 } s_aIdRegs[] = 573 573 { 574 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Dfr0El1) },575 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Dfr1El1) },576 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Isar0El1) },577 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Isar1El1) },578 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Mmfr0El1) },579 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Mmfr1El1) },580 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Mmfr2El1) },581 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Pfr0El1) },582 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegIdAa64Pfr1El1) },583 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUM IDREGS, u64RegClidrEl1) },584 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUM IDREGS, u64RegCtrEl0) },585 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUM IDREGS, u64RegDczidEl0) }574 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1) }, 575 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1) }, 576 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) }, 577 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) }, 578 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) }, 579 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) }, 580 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) }, 581 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1) }, 582 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1) }, 583 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegClidrEl1) }, 584 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegCtrEl0) }, 585 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegDczidEl0) } 586 586 }; 587 587 … … 1450 1450 1451 1451 /* Query ID registers and hand them to CPUM. */ 1452 CPUM IDREGS IdRegs; RT_ZERO(IdRegs);1452 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs); 1453 1453 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++) 1454 1454 { … … 2540 2540 } s_aSysIdRegs[] = 2541 2541 { 2542 #define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }2542 #define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMARMV8IDREGS, a_CpumIdReg) } 2543 2543 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1), 2544 2544 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1), … … 2553 2553 }; 2554 2554 2555 PCCPUM IDREGS pIdRegsGst = NULL;2555 PCCPUMARMV8IDREGS pIdRegsGst = NULL; 2556 2556 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst); 2557 2557 AssertRCReturn(rc, rc); -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-linux-armv8.cpp
r107308 r107650 292 292 } s_aIdRegs[] = 293 293 { 294 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },295 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },296 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },297 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },298 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },299 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },300 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },301 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },302 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1), RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) }294 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR0_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr0El1) }, 295 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64DFR1_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Dfr1El1) }, 296 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR0_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar0El1) }, 297 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64ISAR1_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Isar1El1) }, 298 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR0_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr0El1) }, 299 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR1_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr1El1) }, 300 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64MMFR2_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Mmfr2El1) }, 301 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR0_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr0El1) }, 302 { KVM_ARM64_REG_SYS_CREATE(ARMV8_AARCH64_SYSREG_ID_AA64PFR1_EL1), RT_UOFFSETOF(CPUMARMV8IDREGS, u64RegIdAa64Pfr1El1) } 303 303 }; 304 304 … … 480 480 481 481 /* Need to query the ID registers and populate CPUM. */ 482 CPUM IDREGS IdRegs; RT_ZERO(IdRegs);482 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs); 483 483 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++) 484 484 { -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win-armv8.cpp
r107194 r107650 831 831 * these are partition wide registers and need to be queried/set with WHV_ANY_VP. 832 832 */ 833 CPUM IDREGS IdRegs; RT_ZERO(IdRegs);833 CPUMARMV8IDREGS IdRegs; RT_ZERO(IdRegs); 834 834 835 835 WHV_REGISTER_NAME aenmNames[10]; … … 870 870 871 871 /* Apply any overrides to the partition. */ 872 PCCPUM IDREGS pIdRegsGst = NULL;872 PCCPUMARMV8IDREGS pIdRegsGst = NULL; 873 873 rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst); 874 874 AssertRCReturn(rc, rc); -
trunk/src/VBox/VMM/include/CPUMInternal-armv8.h
r106061 r107650 1 1 /* $Id$ */ 2 2 /** @file 3 * CPUM - Internal header file, ARMv8 variant.3 * CPUM - Internal header file, obsolete. 4 4 */ 5 5 … … 32 32 #endif 33 33 34 #ifndef VBOX_FOR_DTRACE_LIB 35 # include <VBox/cdefs.h> 36 # include <VBox/types.h> 37 # include <VBox/vmm/stam.h> 38 #else 39 # pragma D depends_on library cpumctx.d 40 # pragma D depends_on library cpum.d 41 42 /* Some fudging. */ 43 typedef uint64_t STAMCOUNTER; 44 #endif 45 46 47 48 49 /** @defgroup grp_cpum_int Internals 50 * @ingroup grp_cpum 51 * @internal 52 * @{ 53 */ 54 55 /** Use flags (CPUM::fUseFlags). 56 * @{ */ 57 /** Set to indicate that we should save host DR0-7 and load the hypervisor debug 58 * registers in the raw-mode world switchers. (See CPUMRecalcHyperDRx.) */ 59 #define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(0) 60 /** Used in ring-0 to indicate that we have loaded the hypervisor debug 61 * registers. */ 62 #define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(1) 63 /** Used in ring-0 to indicate that we have loaded the guest debug 64 * registers (DR0-3 and maybe DR6) for direct use by the guest. 65 * DR7 (and AMD-V DR6) are handled via the VMCB. */ 66 #define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(2) 67 /** @} */ 68 69 70 /** @name CPUM Saved State Version. 71 * @{ */ 72 /** The current saved state version. */ 73 #define CPUM_SAVED_STATE_VERSION 1 74 /** @} */ 75 76 77 /** 78 * CPU info 79 */ 80 typedef struct CPUMINFO 81 { 82 /** The number of system register ranges (CPUMSSREGRANGE) in the array pointed to below. */ 83 uint32_t cSysRegRanges; 84 85 /** Pointer to the sysrem register ranges. */ 86 R3PTRTYPE(PCPUMSYSREGRANGE) paSysRegRangesR3; 87 88 /** System register ranges. */ 89 CPUMSYSREGRANGE aSysRegRanges[128]; 90 } CPUMINFO; 91 /** Pointer to a CPU info structure. */ 92 typedef CPUMINFO *PCPUMINFO; 93 /** Pointer to a const CPU info structure. */ 94 typedef CPUMINFO const *CPCPUMINFO; 95 96 97 /** 98 * CPUM Data (part of VM) 99 */ 100 typedef struct CPUM 101 { 102 /** The (more) portable CPUID level. */ 103 uint8_t u8PortableCpuIdLevel; 104 /** Indicates that a state restore is pending. 105 * This is used to verify load order dependencies (PGM). */ 106 bool fPendingRestore; 107 /** The initial exception level (EL) to start the CPU after a reset, 108 * should be either ARMV8_AARCH64_EL_1 or ARMV8_AARCH64_EL_2 for nested virtualization. */ 109 uint8_t bResetEl; 110 111 uint8_t abPadding0[5]; 112 113 /** The reset value of the program counter. */ 114 uint64_t u64ResetPc; 115 116 /** Align to 64-byte boundary. */ 117 uint8_t abPadding1[48]; 118 119 /** Host CPU feature information. 120 * Externaly visible via the VM structure, aligned on 64-byte boundrary. */ 121 CPUMFEATURES HostFeatures; 122 /** Guest CPU feature information. 123 * Externaly visible via that VM structure, aligned with HostFeatures. */ 124 CPUMFEATURES GuestFeatures; 125 /** Guest CPU info. */ 126 CPUMINFO GuestInfo; 127 /** Host CPU ID registers. */ 128 CPUMIDREGS HostIdRegs; 129 /** Guest CPU ID registers. */ 130 CPUMIDREGS GuestIdRegs; 131 132 /** @name System register statistics. 133 * @{ */ 134 STAMCOUNTER cSysRegWrites; 135 STAMCOUNTER cSysRegWritesToIgnoredBits; 136 STAMCOUNTER cSysRegWritesRaiseExcp; 137 STAMCOUNTER cSysRegWritesUnknown; 138 STAMCOUNTER cSysRegReads; 139 STAMCOUNTER cSysRegReadsRaiseExcp; 140 STAMCOUNTER cSysRegReadsUnknown; 141 /** @} */ 142 } CPUM; 143 #ifndef VBOX_FOR_DTRACE_LIB 144 AssertCompileMemberOffset(CPUM, HostFeatures, 64); 145 AssertCompileMemberOffset(CPUM, GuestFeatures, 112); 146 #endif 147 /** Pointer to the CPUM instance data residing in the shared VM structure. */ 148 typedef CPUM *PCPUM; 149 150 /** 151 * CPUM Data (part of VMCPU) 152 */ 153 typedef struct CPUMCPU 154 { 155 /** Guest context. 156 * Aligned on a 64-byte boundary. */ 157 CPUMCTX Guest; 158 159 /** Use flags. 160 * These flags indicates both what is to be used and what has been used. */ 161 uint32_t fUseFlags; 162 163 /** Changed flags. 164 * These flags indicates to REM (and others) which important guest 165 * registers which has been changed since last time the flags were cleared. 166 * See the CPUM_CHANGED_* defines for what we keep track of. 167 * 168 * @todo Obsolete, but will probably be refactored so keep it for reference. */ 169 uint32_t fChanged; 170 } CPUMCPU; 171 #ifndef VBOX_FOR_DTRACE_LIB 172 /** @todo Compile time size/alignment assertions. */ 173 #endif 174 /** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */ 175 typedef CPUMCPU *PCPUMCPU; 176 177 #ifndef VBOX_FOR_DTRACE_LIB 178 RT_C_DECLS_BEGIN 179 180 # ifdef IN_RING3 181 DECLHIDDEN(int) cpumR3DbgInit(PVM pVM); 182 DECLHIDDEN(int) cpumR3SysRegStrictInitChecks(void); 183 184 void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM); 185 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion); 186 187 DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 188 DECLCALLBACK(void) cpumR3CpuFeatInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 189 190 # endif 191 192 RT_C_DECLS_END 193 #endif /* !VBOX_FOR_DTRACE_LIB */ 194 195 /** @} */ 34 #include "CPUMInternal.h" 196 35 197 36 #endif /* !VMM_INCLUDED_SRC_include_CPUMInternal_armv8_h */ -
trunk/src/VBox/VMM/include/CPUMInternal.h
r107570 r107650 60 60 * @note Was part of saved state (6.1 and earlier). 61 61 * @{ */ 62 #if defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 63 62 64 /** Indicates that we've saved the host FPU, SSE, whatever state and that it 63 65 * needs to be restored. */ … … 100 102 /** Set if the VM supports long-mode. */ 101 103 #define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20) 104 105 #endif 102 106 /** @} */ 103 107 … … 105 109 /** @name CPUM Saved State Version. 106 110 * @{ */ 111 107 112 /** The current saved state version. 108 * @todo When bumping to next version, add CPUMCTX::enmHwVirt and113 * @todo AMD64:When bumping to next version, add CPUMCTX::enmHwVirt and 109 114 * uMicrocodeRevision to the saved state. */ 110 #define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4 115 #if defined(VBOX_VMM_TARGET_X86) 116 # define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4 117 #elif defined(VBOX_VMM_TARGET_ARMV8) 118 # define CPUM_SAVED_STATE_VERSION CPUM_SAVED_STATE_VERSION_ARMV8_V1 119 #endif 120 121 #if defined(VBOX_VMM_TARGET_X86) 111 122 /** The saved state version with u32RestoreProcCtls2 for Nested Microsoft 112 123 * Hyper-V. */ 113 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_423124 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_4 23 114 125 /** The saved state version with more virtual VMCS fields (HLAT prefix size, 115 126 * PCONFIG-exiting bitmap, HLAT ptr, VM-exit ctls2) and a CPUMCTX field (VM-exit 116 127 * ctls2 MSR). */ 117 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_322128 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_3 22 118 129 /** The saved state version with PAE PDPEs added. */ 119 # define CPUM_SAVED_STATE_VERSION_PAE_PDPES21130 # define CPUM_SAVED_STATE_VERSION_PAE_PDPES 21 120 131 /** The saved state version with more virtual VMCS fields and CPUMCTX VMX fields. */ 121 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_220132 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_2 20 122 133 /** The saved state version including VMX hardware virtualization state. */ 123 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX19134 # define CPUM_SAVED_STATE_VERSION_HWVIRT_VMX 19 124 135 /** The saved state version including SVM hardware virtualization state. */ 125 # define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM18136 # define CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 18 126 137 /** The saved state version including XSAVE state. */ 127 # define CPUM_SAVED_STATE_VERSION_XSAVE17138 # define CPUM_SAVED_STATE_VERSION_XSAVE 17 128 139 /** The saved state version with good CPUID leaf count. */ 129 # define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16140 # define CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT 16 130 141 /** CPUID changes with explode forgetting to update the leaf count on 131 142 * restore, resulting in garbage being saved restoring+saving old states). */ 132 # define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15143 # define CPUM_SAVED_STATE_VERSION_BAD_CPUID_COUNT 15 133 144 /** The saved state version before the CPUIDs changes. */ 134 # define CPUM_SAVED_STATE_VERSION_PUT_STRUCT14145 # define CPUM_SAVED_STATE_VERSION_PUT_STRUCT 14 135 146 /** The saved state version before using SSMR3PutStruct. */ 136 # define CPUM_SAVED_STATE_VERSION_MEM13147 # define CPUM_SAVED_STATE_VERSION_MEM 13 137 148 /** The saved state version before introducing the MSR size field. */ 138 # define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE12149 # define CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE 12 139 150 /** The saved state version of 3.2, 3.1 and 3.3 trunk before the hidden 140 151 * selector register change (CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID). */ 141 # define CPUM_SAVED_STATE_VERSION_VER3_211152 # define CPUM_SAVED_STATE_VERSION_VER3_2 11 142 153 /** The saved state version of 3.0 and 3.1 trunk before the teleportation 143 154 * changes. */ 144 # define CPUM_SAVED_STATE_VERSION_VER3_010155 # define CPUM_SAVED_STATE_VERSION_VER3_0 10 145 156 /** The saved state version for the 2.1 trunk before the MSR changes. */ 146 # define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR9157 # define CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR 9 147 158 /** The saved state version of 2.0, used for backwards compatibility. */ 148 # define CPUM_SAVED_STATE_VERSION_VER2_08159 # define CPUM_SAVED_STATE_VERSION_VER2_0 8 149 160 /** The saved state version of 1.6, used for backwards compatibility. */ 150 #define CPUM_SAVED_STATE_VERSION_VER1_6 6 161 # define CPUM_SAVED_STATE_VERSION_VER1_6 6 162 #endif 163 164 #if defined(VBOX_VMM_TARGET_ARMV8) 165 /** The initial ARMv8 saved state. */ 166 # define CPUM_SAVED_STATE_VERSION_ARMV8_V1 1 167 #endif 151 168 /** @} */ 152 169 153 170 154 /** @name XSAVE limits. 171 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) 172 /** @name AMD64: XSAVE limits. 155 173 * @{ */ 156 174 /** Max size we accept for the XSAVE area. … … 160 178 #define CPUM_MIN_XSAVE_AREA_SIZE 0x240 161 179 /** @} */ 162 180 #endif 163 181 164 182 /** … … 167 185 typedef struct CPUMINFO 168 186 { 187 #if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 169 188 /** The number of MSR ranges (CPUMMSRRANGE) in the array pointed to below. */ 170 189 uint32_t cMsrRanges; … … 207 226 * allocation. The insanity is mainly for more recent AMD CPUs. */ 208 227 CPUMMSRRANGE aMsrRanges[8192]; 228 229 #elif defined(VBOX_VMM_TARGET_ARMV8) 230 /** The number of system register ranges (CPUMSSREGRANGE) in the array pointed to below. */ 231 uint32_t cSysRegRanges; 232 uint32_t uPadding; 233 234 /** Pointer to the sysrem register ranges. */ 235 R3PTRTYPE(PCPUMSYSREGRANGE) paSysRegRangesR3; 236 237 /** System register ranges. */ 238 CPUMSYSREGRANGE aSysRegRanges[128]; 239 #else 240 # error "port me" 241 #endif 209 242 } CPUMINFO; 210 243 /** Pointer to a CPU info structure. */ … … 214 247 215 248 249 #ifdef RT_ARCH_AMD64 216 250 /** 217 251 * The saved host CPU state. … … 317 351 /* padding to get 64byte aligned size */ 318 352 uint8_t auPadding[24]; 319 # if HC_ARCH_BITS != 64320 # error HC_ARCH_BITS not defined or unsupported321 # endif353 # if HC_ARCH_BITS != 64 354 # error HC_ARCH_BITS not defined or unsupported 355 # endif 322 356 } CPUMHOSTCTX; 323 # ifndef VBOX_FOR_DTRACE_LIB357 # ifndef VBOX_FOR_DTRACE_LIB 324 358 AssertCompileSizeAlignment(CPUMHOSTCTX, 64); 325 # endif359 # endif 326 360 /** Pointer to the saved host CPU state. */ 327 361 typedef CPUMHOSTCTX *PCPUMHOSTCTX; 328 329 362 #endif /* RT_ARCH_AMD64 */ 363 364 365 #if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 330 366 /** 331 367 * The hypervisor context CPU state (just DRx left now). … … 344 380 uint64_t au64Padding[7]; 345 381 } CPUMHYPERCTX; 346 # ifndef VBOX_FOR_DTRACE_LIB382 # ifndef VBOX_FOR_DTRACE_LIB 347 383 AssertCompileSizeAlignment(CPUMHYPERCTX, 64); 348 # endif384 # endif 349 385 /** Pointer to the hypervisor context CPU state. */ 350 386 typedef CPUMHYPERCTX *PCPUMHYPERCTX; 387 #endif 351 388 352 389 … … 356 393 typedef struct CPUM 357 394 { 358 /** Use flags. 359 * These flags indicates which CPU features the host uses. 360 */ 361 uint32_t fHostUseFlags; 395 /** Guest CPU feature information. 396 * Externaly visible via that VM structure, aligned with HostFeatures. */ 397 CPUMFEATURES GuestFeatures; 398 /** Host CPU feature information. 399 * Externaly visible via the VM structure, aligned on 64-byte boundrary. */ 400 CPUHOSTFEATURES HostFeatures; 362 401 363 402 /** The (more) portable CPUID level. */ … … 366 405 * This is used to verify load order dependencies (PGM). */ 367 406 bool fPendingRestore; 407 408 #if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 368 409 /** Whether MTRR reads report valid memory types for memory regions. */ 369 410 bool fMtrrRead; 370 411 /** Whether the guest's writes to MTRRs are implemented. */ 371 412 bool fMtrrWrite; 413 /** Use flags. 414 * These flags indicates which CPU features the host uses. 415 */ 416 uint32_t fHostUseFlags; 372 417 373 418 /** XSAVE/XRTOR components we can expose to the guest mask. */ … … 377 422 uint64_t fXStateHostMask; 378 423 379 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)380 /** The host MXCSR mask (determined at init). */381 uint32_t fHostMxCsrMask;382 #else383 uint32_t u32UnusedOnNonX86;384 #endif385 uint8_t abPadding1[4];386 387 424 /** Random value we store in the reserved RFLAGS bits we don't use ourselves so 388 425 * we can detect corruption. */ 389 426 uint64_t fReservedRFlagsCookie; 390 427 391 /** Align to 64-byte boundary. */392 uint8_t abPadding2[16+8];393 394 /** Host CPU feature information.395 * Externaly visible via the VM structure, aligned on 64-byte boundrary. */396 CPUMFEATURES HostFeatures;397 /** Guest CPU feature information.398 * Externaly visible via that VM structure, aligned with HostFeatures. */399 CPUMFEATURES GuestFeatures;400 428 /** Guest CPU info. */ 401 429 CPUMINFO GuestInfo; … … 418 446 STAMCOUNTER cMsrReadsUnknown; 419 447 /** @} */ 448 449 #elif defined(VBOX_VMM_TARGET_ARMV8) 450 /** The initial exception level (EL) to start the CPU after a reset, 451 * should be either ARMV8_AARCH64_EL_1 or ARMV8_AARCH64_EL_2 for nested virtualization. */ 452 uint8_t bResetEl; 453 uint8_t abPadding0[5]; 454 455 /** The reset value of the program counter. */ 456 uint64_t u64ResetPc; 457 458 /** Guest CPU info. */ 459 CPUMINFO GuestInfo; 460 /** Guest CPU ID registers. */ 461 CPUMARMV8IDREGS GuestIdRegs; 462 463 /** @name System register statistics. 464 * @{ */ 465 STAMCOUNTER cSysRegWrites; 466 STAMCOUNTER cSysRegWritesToIgnoredBits; 467 STAMCOUNTER cSysRegWritesRaiseExcp; 468 STAMCOUNTER cSysRegWritesUnknown; 469 STAMCOUNTER cSysRegReads; 470 STAMCOUNTER cSysRegReadsRaiseExcp; 471 STAMCOUNTER cSysRegReadsUnknown; 472 /** @} */ 473 #endif 474 475 #ifdef RT_ARCH_ARM64 476 /** Host CPU ID registers. */ 477 CPUMARMV8IDREGS HostIdRegs; 478 479 #elif defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 480 /** The host MXCSR mask (determined at init). */ 481 uint32_t fHostMxCsrMask; 482 #endif 420 483 } CPUM; 421 484 #ifndef VBOX_FOR_DTRACE_LIB 485 AssertCompileMemberOffset(CPUM, GuestFeatures, 0); 422 486 AssertCompileMemberOffset(CPUM, HostFeatures, 64); 423 AssertCompileMemberOffset(CPUM, GuestFeatures, 112);487 AssertCompileMemberOffset(CPUM, u8PortableCpuIdLevel, 128); 424 488 #endif 425 489 /** Pointer to the CPUM instance data residing in the shared VM structure. */ … … 434 498 * Aligned on a 64-byte boundary. */ 435 499 CPUMCTX Guest; 500 #if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 436 501 /** Guest context - misc MSRs 437 502 * Aligned on a 64-byte boundary. */ 438 503 CPUMCTXMSRS GuestMsrs; 439 440 /** Nested VMX: VMX-preemption timer. */ 441 TMTIMERHANDLE hNestedVmxPreemptTimer; 504 #endif 505 #ifdef RT_ARCH_AMD64 506 /** Saved host context. Only valid while inside RC or HM contexts. 507 * Must be aligned on a 64-byte boundary. */ 508 CPUMHOSTCTX Host; 509 #endif 442 510 443 511 /** Use flags. … … 453 521 uint32_t fChanged; 454 522 455 /** Temporary storage for the return code of the function called in the 456 * 32-64 switcher. */ 457 uint32_t u32RetCode; 458 523 #if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 524 /** Nested VMX: VMX-preemption timer. */ 525 TMTIMERHANDLE hNestedVmxPreemptTimer; 459 526 /** Whether the X86_CPUID_FEATURE_EDX_APIC and X86_CPUID_AMD_FEATURE_EDX_APIC 460 527 * (?) bits are visible or not. (The APIC is responsible for setting this 461 528 * when loading state, so we won't save it.) */ 462 529 bool fCpuIdApicFeatureVisible; 463 464 /** Align the next member on a 64-byte boundary. */ 465 uint8_t abPadding2[64 - 8 - 4*3 - 1]; 466 467 /** Saved host context. Only valid while inside RC or HM contexts. 468 * Must be aligned on a 64-byte boundary. */ 469 CPUMHOSTCTX Host; 530 uint8_t abPadding[7]; 531 470 532 /** Old hypervisor context, only used for combined DRx values now. 471 533 * Must be aligned on a 64-byte boundary. */ 472 534 CPUMHYPERCTX Hyper; 535 #endif 473 536 474 537 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 478 541 } CPUMCPU; 479 542 #ifndef VBOX_FOR_DTRACE_LIB 543 # ifdef RT_ARCH_AMD64 480 544 AssertCompileMemberAlignment(CPUMCPU, Host, 64); 545 # endif 481 546 #endif 482 547 /** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */ … … 486 551 RT_C_DECLS_BEGIN 487 552 553 # if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 488 554 PCPUMCPUIDLEAF cpumCpuIdGetLeaf(PVM pVM, uint32_t uLeaf); 489 555 PCPUMCPUIDLEAF cpumCpuIdGetLeafEx(PVM pVM, uint32_t uLeaf, uint32_t uSubLeaf, bool *pfExactSubLeafHit); 556 # endif 557 # if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) 490 558 PCPUMCPUIDLEAF cpumCpuIdGetLeafInt(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, uint32_t uLeaf, uint32_t uSubLeaf); 491 559 PCPUMCPUIDLEAF cpumCpuIdEnsureSpace(PVM pVM, PCPUMCPUIDLEAF *ppaLeaves, uint32_t cLeaves); 492 # ifdef VBOX_STRICT560 # ifdef VBOX_STRICT 493 561 void cpumCpuIdAssertOrder(PCPUMCPUIDLEAF paLeaves, uint32_t cLeaves); 494 # endif562 # endif 495 563 int cpumCpuIdExplodeFeaturesX86(PCCPUMCPUIDLEAF paLeaves, uint32_t cLeaves, PCCPUMMSRS pMsrs, 496 PCPUMFEATURES pFeatures); 564 CPUMFEATURESX86 *pFeatures); 565 # endif /* defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) || defined(VBOX_VMM_TARGET_X86) */ 566 # if defined(RT_ARCH_ARM64) || defined(VBOX_VMM_TARGET_ARMV8) 567 int cpumCpuIdExplodeFeaturesArmV8(PCCPUMARMV8IDREGS pIdRegs, CPUMFEATURESARMV8 *pFeatures); 568 # endif 497 569 498 570 # ifdef IN_RING3 499 int cpumR3DbgInit(PVM pVM); 571 DECLHIDDEN(int) cpumR3DbgInit(PVM pVM); 572 # if defined(VBOX_VMM_TARGET_ARMV8) 573 DECLHIDDEN(int) cpumR3SysRegStrictInitChecks(void); 574 # elif defined(VBOX_VMM_TARGET_X86) 500 575 int cpumR3InitCpuIdAndMsrs(PVM pVM, PCCPUMMSRS pHostMsrs); 501 576 void cpumR3InitVmxGuestFeaturesAndMsrs(PVM pVM, PCFGMNODE pCpumCfg, PCVMXMSRS pHostVmxMsrs, 502 577 PVMXMSRS pGuestVmxMsrs); 503 578 void cpumR3CpuIdRing3InitDone(PVM pVM); 579 # endif 504 580 void cpumR3SaveCpuId(PVM pVM, PSSMHANDLE pSSM); 505 int cpumR3LoadCpuId(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs); 581 # ifdef VBOX_VMM_TARGET_X86 582 int cpumR3LoadCpuIdX86(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, PCCPUMMSRS pGuestMsrs); 506 583 int cpumR3LoadCpuIdPre32(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion); 584 # elif defined(VBOX_VMM_TARGET_ARMV8) 585 int cpumR3LoadCpuIdArmV8(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion); 586 DECLCALLBACK(void) cpumR3CpuFeatInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 587 # endif 507 588 DECLCALLBACK(void) cpumR3CpuIdInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 508 589 509 590 int cpumR3DbGetCpuInfo(const char *pszName, PCPUMINFO pInfo); 591 # ifdef VBOX_VMM_TARGET_X86 510 592 int cpumR3MsrRangesInsert(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t *pcMsrRanges, PCCPUMMSRRANGE pNewRange); 511 593 int cpumR3MsrReconcileWithCpuId(PVM pVM); … … 514 596 int cpumR3MsrStrictInitChecks(void); 515 597 PCPUMMSRRANGE cpumLookupMsrRange(PVM pVM, uint32_t idMsr); 516 # endif 517 518 # ifdef IN_RC 519 DECLASM(int) cpumHandleLazyFPUAsm(PCPUMCPU pCPUM); 520 # endif 598 # endif 599 # endif /* IN_RING3 */ 521 600 522 601 # ifdef IN_RING0 602 # if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 523 603 DECLASM(int) cpumR0SaveHostRestoreGuestFPUState(PCPUMCPU pCPUM); 524 604 DECLASM(void) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 525 # if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)605 # if ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 526 606 DECLASM(void) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM); 607 # endif 527 608 # endif 528 609 # endif 529 610 530 611 # if defined(IN_RC) || defined(IN_RING0) 612 # if defined(VBOX_VMM_TARGET_X86) /** @todo temporary: */ || defined(VBOX_VMM_TARGET_AGNOSTIC) 531 613 DECLASM(int) cpumRZSaveHostFPUState(PCPUMCPU pCPUM); 532 614 DECLASM(void) cpumRZSaveGuestFpuState(PCPUMCPU pCPUM, bool fLeaveFpuAccessible); 533 615 DECLASM(void) cpumRZSaveGuestSseRegisters(PCPUMCPU pCPUM); 534 616 DECLASM(void) cpumRZSaveGuestAvxRegisters(PCPUMCPU pCPUM); 617 # endif 535 618 # endif 536 619 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r107639 r107650 80 80 81 81 struc CPUM 82 ;...83 . fHostUseFlags resd 182 .GuestFeatures resb 64 83 .HostFeatures resb 64 84 84 85 85 .u8PortableCpuIdLevel resb 1 … … 88 88 .fMtrrWrite resb 1 89 89 90 .fHostUseFlags resd 1 91 90 92 alignb 8 91 93 .fXStateGuestMask resq 1 92 94 .fXStateHostMask resq 1 93 94 alignb 64 95 .HostFeatures resb 48 96 .GuestFeatures resb 48 95 .fReservedRFlagsCookie resq 1 96 97 alignb 8 97 98 .GuestInfo resb CPUMINFO_size 98 99 … … 110 111 .cMsrReadsRaiseGp resq 1 111 112 .cMsrReadsUnknown resq 1 113 114 .fHostMxCsrMask resd 1 115 alignb 8 112 116 endstruc 113 117 … … 289 293 290 294 ; 291 ; Other stuff.292 ;293 .hNestedVmxPreemptTimer resq 1294 295 .fUseFlags resd 1296 .fChanged resd 1297 .u32RetCode resd 1298 .fCpuIdApicFeatureVisible resb 1299 300 ;301 295 ; Host context state 302 296 ; … … 368 362 .Host.xcr0 resq 1 369 363 .Host.fXStateMask resq 1 364 alignb 64 365 366 ; 367 ; Other stuff. 368 ; 369 .fUseFlags resd 1 370 .fChanged resd 1 371 alignb 8 372 .hNestedVmxPreemptTimer resq 1 373 .fCpuIdApicFeatureVisible resb 1 370 374 371 375 ; 372 376 ; Hypervisor Context. 373 377 ; 374 alignb 64378 alignb 8 375 379 .Hyper resq 0 376 380 .Hyper.dr resq 8 377 381 .Hyper.cr3 resq 1 378 alignb 64382 .Hyper.au64Padding resq 7 379 383 380 384 %ifdef VBOX_WITH_CRASHDUMP_MAGIC -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r107220 r107650 57 57 GEN_CHECK_OFF(CPUMCPU, fUseFlags); 58 58 GEN_CHECK_OFF(CPUMCPU, fChanged); 59 GEN_CHECK_OFF(CPUMCPU, u32RetCode);60 59 GEN_CHECK_OFF(CPUMCPU, fCpuIdApicFeatureVisible); 61 60 -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r107178 r107650 283 283 #endif 284 284 CHECK_SIZE_ALIGNMENT(CPUMCTX, 64); 285 #ifdef RT_ARCH_AMD64 285 286 CHECK_SIZE_ALIGNMENT(CPUMHOSTCTX, 64); 287 #endif 286 288 CHECK_SIZE_ALIGNMENT(CPUMCTXMSRS, 64); 287 289 -
trunk/src/VBox/VMM/tools/VBoxCpuReport.cpp
r106061 r107650 4533 4533 PCPUMCPUIDLEAF paLeaves; 4534 4534 uint32_t cLeaves; 4535 int rc = CPUMCpuIdCollectLeaves X86(&paLeaves, &cLeaves);4535 int rc = CPUMCpuIdCollectLeavesFromX86Host(&paLeaves, &cLeaves); 4536 4536 if (RT_FAILURE(rc)) 4537 4537 return RTMsgErrorRc(rc, "CPUMR3CollectCpuIdInfo failed: %Rrc\n", rc);
Note:
See TracChangeset
for help on using the changeset viewer.