Changeset 98827 in vbox
- Timestamp:
- Mar 3, 2023 12:01:42 PM (2 years ago)
- svn:sync-xref-src-repo-rev:
- 156149
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r98703 r98827 1140 1140 /** Support SHA instructions. */ 1141 1141 uint32_t fSha : 1; 1142 /** Support ADX instructions. */ 1143 uint32_t fAdx : 1; 1142 1144 1143 1145 /** Supports AMD 3DNow instructions. */ … … 1200 1202 /** Alignment padding / reserved for future use (96 bits total, plus 12 bytes 1201 1203 * prior to the bit fields -> total of 24 bytes) */ 1202 uint32_t fPadding0 : 2 5;1204 uint32_t fPadding0 : 24; 1203 1205 1204 1206 -
trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp
r98703 r98827 1469 1469 pFeatures->fRtm = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_RTM); 1470 1470 pFeatures->fSha = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_SHA); 1471 pFeatures->fAdx = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_ADX); 1471 1472 1472 1473 pFeatures->fIbpb = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB); -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r98821 r98827 299 299 popf ; load the mixed flags. 300 300 ;%endif 301 %endmacro 302 303 ;; 304 ; Load the relevant flags from [%1]. 305 ; 306 ; @remarks Clobbers T0, stack. Changes EFLAGS. 307 ; @param A2 The register pointing to the flags. 308 ; @param 1 The parameter (A0..A3) pointing to the eflags. 309 ; @param 2 The set of flags to load. 310 ; @param 3 The set of undefined flags. 311 ; 312 %macro IEM_LOAD_FLAGS 3 313 pushf ; store current flags 314 mov T0_32, [%1] ; load the guest flags 315 and dword [xSP], ~(%2 | %3) ; mask out the modified and undefined flags 316 and T0_32, (%2 | %3) ; select the modified and undefined flags. 317 or [xSP], T0 ; merge guest flags with host flags. 318 popf ; load the mixed flags. 301 319 %endmacro 302 320 … … 6428 6446 EPILOGUE_3_ARGS 6429 6447 ENDPROC iemAImpl_sha256rnds2_u128 6448 6449 6450 ; 6451 ; 32-bit forms of ADCX and ADOX 6452 ; 6453 ; @param A0 Pointer to the destination operand (input/output). 6454 ; @param A1 Pointer to the EFLAGS value (input/output). 6455 ; @param A2 32-bit source operand 1 (input). 6456 ; 6457 %macro IEMIMPL_ADX_32 2 6458 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 8 6459 PROLOGUE_4_ARGS 6460 6461 IEM_LOAD_FLAGS A1, %2, 0 6462 %1 A2_32, [A0] 6463 mov [A0], A2_32 6464 IEM_SAVE_FLAGS A1, %2, 0 6465 6466 EPILOGUE_4_ARGS 6467 ENDPROC iemAImpl_ %+ %1 %+ _u32 6468 %endmacro 6469 6470 ; 6471 ; 64-bit forms of ADCX and ADOX 6472 ; 6473 ; @param A0 Pointer to the destination operand (input/output). 6474 ; @param A1 Pointer to the EFLAGS value (input/output). 6475 ; @param A2 64-bit source operand 1 (input). 6476 ; 6477 %macro IEMIMPL_ADX_64 2 6478 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 8 6479 PROLOGUE_4_ARGS 6480 6481 IEM_LOAD_FLAGS A1, %2, 0 6482 %1 A2, [A0] 6483 mov [A0], A2 6484 IEM_SAVE_FLAGS A1, %2, 0 6485 6486 EPILOGUE_4_ARGS 6487 ENDPROC iemAImpl_ %+ %1 %+ _u64 6488 %endmacro 6489 6490 IEMIMPL_ADX_32 adcx, X86_EFL_CF 6491 IEMIMPL_ADX_64 adcx, X86_EFL_CF 6492 6493 IEMIMPL_ADX_32 adox, X86_EFL_OF 6494 IEMIMPL_ADX_64 adox, X86_EFL_OF -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r98821 r98827 18005 18005 puDst->au32[0] = au32F[2]; 18006 18006 } 18007 18008 18009 /** 18010 * ADCX 18011 */ 18012 #define ADX_EMIT(a_Flag, a_Type, a_Max) \ 18013 do \ 18014 { \ 18015 bool f = RT_BOOL(*pfEFlags & (a_Flag)); \ 18016 a_Type uTmp = *puDst + uSrc; \ 18017 if (uTmp < uSrc) \ 18018 *pfEFlags |= (a_Flag); \ 18019 else \ 18020 *pfEFlags &= ~(a_Flag); \ 18021 if ( uTmp == a_Max \ 18022 && f) \ 18023 *pfEFlags |= (a_Flag); \ 18024 if (f) \ 18025 uTmp++; \ 18026 *puDst = uTmp; \ 18027 } \ 18028 while (0) 18029 18030 IEM_DECL_IMPL_DEF(void, iemAImpl_adcx_u32_fallback,(uint32_t *puDst, uint32_t *pfEFlags, uint32_t uSrc)) 18031 { 18032 ADX_EMIT(X86_EFL_CF, uint32_t, UINT32_MAX); 18033 } 18034 18035 IEM_DECL_IMPL_DEF(void, iemAImpl_adcx_u64_fallback,(uint64_t *puDst, uint32_t *pfEFlags, uint64_t uSrc)) 18036 { 18037 ADX_EMIT(X86_EFL_CF, uint64_t, UINT64_MAX); 18038 } 18039 18040 18041 /** 18042 * ADOX 18043 */ 18044 IEM_DECL_IMPL_DEF(void, iemAImpl_adox_u32_fallback,(uint32_t *puDst, uint32_t *pfEFlags, uint32_t uSrc)) 18045 { 18046 ADX_EMIT(X86_EFL_OF, uint32_t, UINT32_MAX); 18047 } 18048 18049 IEM_DECL_IMPL_DEF(void, iemAImpl_adox_u64_fallback,(uint64_t *puDst, uint32_t *pfEFlags, uint64_t uSrc)) 18050 { 18051 ADX_EMIT(X86_EFL_OF, uint64_t, UINT64_MAX); 18052 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h
r98703 r98827 2010 2010 2011 2011 /* Opcode 0x0f 0x38 0xf6 - invalid. */ 2012 2013 #define ADX_EMIT(a_Variant) \ 2014 do \ 2015 { \ 2016 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAdx) \ 2017 return iemOp_InvalidNeedRM(pVCpu); \ 2018 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \ 2019 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) \ 2020 { \ 2021 if (IEM_IS_MODRM_REG_MODE(bRm)) \ 2022 { \ 2023 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 2024 IEM_MC_BEGIN(3, 0); \ 2025 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 2026 IEM_MC_ARG(uint32_t *, pEFlags, 1); \ 2027 IEM_MC_ARG(uint64_t, u64Src, 2); \ 2028 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 2029 IEM_MC_FETCH_GREG_U64(u64Src, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 2030 IEM_MC_REF_EFLAGS(pEFlags); \ 2031 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fAdx, iemAImpl_## a_Variant ##_u64, iemAImpl_## a_Variant ##_u64_fallback), \ 2032 pu64Dst, pEFlags, u64Src); \ 2033 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 2034 IEM_MC_END(); \ 2035 } \ 2036 else \ 2037 { \ 2038 IEM_MC_BEGIN(3, 1); \ 2039 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 2040 IEM_MC_ARG(uint32_t *, pEFlags, 1); \ 2041 IEM_MC_ARG(uint64_t, u64Src, 2); \ 2042 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ 2043 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); \ 2044 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 2045 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 2046 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 2047 IEM_MC_REF_EFLAGS(pEFlags); \ 2048 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fAdx, iemAImpl_## a_Variant ##_u64, iemAImpl_## a_Variant ##_u64_fallback), \ 2049 pu64Dst, pEFlags, u64Src); \ 2050 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 2051 IEM_MC_END(); \ 2052 } \ 2053 } \ 2054 else \ 2055 { \ 2056 if (IEM_IS_MODRM_REG_MODE(bRm)) \ 2057 { \ 2058 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 2059 IEM_MC_BEGIN(3, 0); \ 2060 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 2061 IEM_MC_ARG(uint32_t *, pEFlags, 1); \ 2062 IEM_MC_ARG(uint32_t, u32Src, 2); \ 2063 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 2064 IEM_MC_FETCH_GREG_U32(u32Src, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 2065 IEM_MC_REF_EFLAGS(pEFlags); \ 2066 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fAdx, iemAImpl_## a_Variant ##_u32, iemAImpl_## a_Variant ##_u32_fallback), \ 2067 pu32Dst, pEFlags, u32Src); \ 2068 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 2069 IEM_MC_END(); \ 2070 } \ 2071 else \ 2072 { \ 2073 IEM_MC_BEGIN(3, 1); \ 2074 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 2075 IEM_MC_ARG(uint32_t *, pEFlags, 1); \ 2076 IEM_MC_ARG(uint32_t, u32Src, 2); \ 2077 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ 2078 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); \ 2079 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 2080 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 2081 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 2082 IEM_MC_REF_EFLAGS(pEFlags); \ 2083 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fAdx, iemAImpl_## a_Variant ##_u32, iemAImpl_## a_Variant ##_u32_fallback), \ 2084 pu32Dst, pEFlags, u32Src); \ 2085 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 2086 IEM_MC_END(); \ 2087 } \ 2088 } \ 2089 } while(0) 2090 2012 2091 /** Opcode 0x66 0x0f 0x38 0xf6. */ 2013 FNIEMOP_STUB(iemOp_adcx_Gy_Ey); 2092 FNIEMOP_DEF(iemOp_adcx_Gy_Ey) 2093 { 2094 IEMOP_MNEMONIC2(RM, ADCX, adcx, Gy, Ey, DISOPTYPE_HARMLESS, 0); 2095 ADX_EMIT(adcx); 2096 } 2097 2098 2014 2099 /** Opcode 0xf3 0x0f 0x38 0xf6. */ 2015 FNIEMOP_STUB(iemOp_adox_Gy_Ey); 2100 FNIEMOP_DEF(iemOp_adox_Gy_Ey) 2101 { 2102 IEMOP_MNEMONIC2(RM, ADOX, adox, Gy, Ey, DISOPTYPE_HARMLESS, 0); 2103 ADX_EMIT(adox); 2104 } 2105 2106 2016 2107 /* Opcode 0xf2 0x0f 0x38 0xf6 - invalid (vex only). */ 2017 2108 -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r98703 r98827 1049 1049 CPUMISAEXTCFG enmRdSeed; 1050 1050 CPUMISAEXTCFG enmSha; 1051 CPUMISAEXTCFG enmAdx; 1051 1052 CPUMISAEXTCFG enmCLFlushOpt; 1052 1053 CPUMISAEXTCFG enmFsGsBase; … … 1846 1847 //| RT_BIT(17) - reserved 1847 1848 | PASSTHRU_FEATURE_TODO(pConfig->enmRdSeed, X86_CPUID_STEXT_FEATURE_EBX_RDSEED) 1848 //| X86_CPUID_STEXT_FEATURE_EBX_ADX RT_BIT(19)1849 | PASSTHRU_FEATURE(pConfig->enmAdx, pHstFeat->fAdx, X86_CPUID_STEXT_FEATURE_EBX_ADX) 1849 1850 //| X86_CPUID_STEXT_FEATURE_EBX_SMAP RT_BIT(20) 1850 1851 //| RT_BIT(21) - reserved … … 1889 1890 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512F, X86_CPUID_STEXT_FEATURE_EBX_AVX512F); 1890 1891 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, RDSEED, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmRdSeed); 1892 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, ADX, X86_CPUID_STEXT_FEATURE_EBX_ADX, pConfig->enmAdx); 1891 1893 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, CLFLUSHOPT, X86_CPUID_STEXT_FEATURE_EBX_RDSEED, pConfig->enmCLFlushOpt); 1892 1894 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512PF, X86_CPUID_STEXT_FEATURE_EBX_AVX512PF); … … 1912 1914 if (pConfig->enmRdSeed == CPUMISAEXTCFG_ENABLED_ALWAYS) 1913 1915 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_RDSEED; 1916 if (pConfig->enmAdx == CPUMISAEXTCFG_ENABLED_ALWAYS) 1917 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_ADX; 1914 1918 if (pConfig->enmCLFlushOpt == CPUMISAEXTCFG_ENABLED_ALWAYS) 1915 1919 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT; … … 2749 2753 "|RDRAND" 2750 2754 "|RDSEED" 2755 "|ADX" 2751 2756 "|CLFLUSHOPT" 2752 2757 "|SHA" … … 2878 2883 */ 2879 2884 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "RDSEED", &pConfig->enmRdSeed, fNestedPagingAndFullGuestExec); 2885 AssertLogRelRCReturn(rc, rc); 2886 2887 /** @cfgm{/CPUM/IsaExts/ADX, isaextcfg, depends} 2888 * Whether to expose the ADX instructions to the guest. For the time being 2889 * the default is to only do this for VMs with nested paging and AMD-V or 2890 * unrestricted guest mode. 2891 */ 2892 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ADX", &pConfig->enmAdx, fNestedPagingAndFullGuestExec); 2880 2893 AssertLogRelRCReturn(rc, rc); 2881 2894 -
trunk/src/VBox/VMM/include/IEMInternal.h
r98821 r98827 1674 1674 FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback; 1675 1675 FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback; 1676 /** @} */ 1677 1678 /** @name ADOX and ADCX 1679 * @{ */ 1680 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU32,(uint32_t *puDst, uint32_t *pfEFlags, uint32_t uSrc)); 1681 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU64,(uint64_t *puDst, uint32_t *pfEFlags, uint64_t uSrc)); 1682 typedef FNIEMAIMPLADXU32 *PFNIEMAIMPLADXU32; 1683 typedef FNIEMAIMPLADXU64 *PFNIEMAIMPLADXU64; 1684 1685 FNIEMAIMPLADXU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback; 1686 FNIEMAIMPLADXU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback; 1687 FNIEMAIMPLADXU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback; 1688 FNIEMAIMPLADXU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback; 1676 1689 /** @} */ 1677 1690
Note:
See TracChangeset
for help on using the changeset viewer.