Changeset 96652 in vbox
- Timestamp:
- Sep 8, 2022 8:49:40 AM (2 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r96407 r96652 1126 1126 /** Support RDSEED instruction. */ 1127 1127 uint32_t fRdSeed : 1; 1128 /** Support PCLMULQDQ instruction. */ 1129 uint32_t fPclMul : 1; 1128 1130 1129 1131 /** Supports AMD 3DNow instructions. */ … … 1186 1188 /** Alignment padding / reserved for future use (96 bits total, plus 12 bytes 1187 1189 * prior to the bit fields -> total of 24 bytes) */ 1188 uint32_t fPadding0 ;1190 uint32_t fPadding0 : 31; 1189 1191 1190 1192 -
trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp
r96407 r96652 1447 1447 pFeatures->fRdRand = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_RDRAND); 1448 1448 pFeatures->fVmx = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_VMX); 1449 pFeatures->fPclMul = RT_BOOL(pStd1Leaf->uEcx & X86_CPUID_FEATURE_ECX_PCLMUL); 1449 1450 if (pFeatures->fVmx) 1450 1451 cpumExplodeVmxFeatures(&pMsrs->hwvirt.vmx, pFeatures); -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r96624 r96652 5108 5108 IEMIMPL_MEDIA_SSE_INSN_IMM8_6 pblendw 5109 5109 IEMIMPL_MEDIA_SSE_INSN_IMM8_6 palignr 5110 IEMIMPL_MEDIA_SSE_INSN_IMM8_6 pclmulqdq 5110 5111 5111 5112 … … 5116 5117 ; 5117 5118 ; @param 1 The instruction name. 5119 ; @param 2 Whether the instruction has a 256-bit variant (1) or not (0). 5118 5120 ; 5119 5121 ; @param A0 Pointer to the destination media register size operand (output). … … 5122 5124 ; @param A3 The 8-bit immediate 5123 5125 ; 5124 %macro IEMIMPL_MEDIA_AVX_INSN_IMM8_6 15126 %macro IEMIMPL_MEDIA_AVX_INSN_IMM8_6 2 5125 5127 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u128, 16 5126 5128 PROLOGUE_4_ARGS … … 5150 5152 ENDPROC iemAImpl_ %+ %1 %+ _u128 5151 5153 5154 %if %2 == 1 5152 5155 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u256, 16 5153 5156 PROLOGUE_4_ARGS … … 5176 5179 dw 0x107ff - (.immEnd - .imm0) ; will cause warning if entries are too small. 5177 5180 ENDPROC iemAImpl_ %+ %1 %+ _u256 5178 %endmacro 5179 5180 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vblendps 5181 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vblendpd 5182 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vpblendw 5183 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vpalignr 5181 %endif 5182 %endmacro 5183 5184 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vblendps, 1 5185 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vblendpd, 1 5186 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vpblendw, 1 5187 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vpalignr, 1 5188 IEMIMPL_MEDIA_AVX_INSN_IMM8_6 vpclmulqdq, 0 5184 5189 5185 5190 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r96624 r96652 15420 15420 AssertReleaseFailed(); 15421 15421 } 15422 15423 15424 /* 15425 * [V]PCLMULQDQ 15426 */ 15427 IEM_DECL_IMPL_DEF(void, iemAImpl_pclmulqdq_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil)) 15428 { 15429 iemAImpl_vpclmulqdq_u128_fallback(puDst, puDst, puSrc, bEvil); 15430 } 15431 15432 15433 IEM_DECL_IMPL_DEF(void, iemAImpl_vpclmulqdq_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil)) 15434 { 15435 uint64_t uSrc1 = puSrc1->au64[bEvil & 0x1]; 15436 uint64_t uSrc2 = puSrc2->au64[(bEvil >> 4) & 0x1]; 15437 15438 puDst->au64[0] = 0; 15439 puDst->au64[1] = 0; 15440 15441 /* 15442 * See https://en.wikipedia.org/wiki/Carry-less_product#Example (as of 2022-09-08) for the algorithm. 15443 * Do the first round outside the loop to avoid ASAN complaining about shift exponent being too large (64) 15444 * and squeeze out some optimizations. 15445 */ 15446 if (uSrc1 & 0x1) 15447 puDst->au64[0] = uSrc2; 15448 15449 uSrc1 >>= 1; 15450 15451 uint8_t iDigit = 1; 15452 while (uSrc1) 15453 { 15454 if (uSrc1 & 0x1) 15455 { 15456 puDst->au64[0] ^= (uSrc2 << iDigit); 15457 puDst->au64[1] ^= uSrc2 >> (64 - iDigit); 15458 } 15459 15460 uSrc1 >>= 1; 15461 iDigit++; 15462 } 15463 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f3a.cpp.h
r96625 r96652 336 336 FNIEMOP_STUB(iemOp_mpsadbw_Vx_Wx_Ib); 337 337 /* Opcode 0x66 0x0f 0x43 - invalid */ 338 339 338 340 /** Opcode 0x66 0x0f 0x44. */ 339 FNIEMOP_STUB(iemOp_pclmulqdq_Vdq_Wdq_Ib); 341 FNIEMOP_DEF(iemOp_pclmulqdq_Vdq_Wdq_Ib) 342 { 343 IEMOP_MNEMONIC3(RMI, PCLMULQDQ, pclmulqdq, Vdq, Wdq, Ib, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, 0); 344 345 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 346 if (IEM_IS_MODRM_REG_MODE(bRm)) 347 { 348 /* 349 * Register, register. 350 */ 351 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); 352 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 353 IEM_MC_BEGIN(3, 0); 354 IEM_MC_ARG(PRTUINT128U, puDst, 0); 355 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 356 IEM_MC_ARG_CONST(uint8_t, bImmArg, /*=*/ bImm, 2); 357 IEM_MC_MAYBE_RAISE_PCLMUL_RELATED_XCPT(); 358 IEM_MC_PREPARE_SSE_USAGE(); 359 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 360 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 361 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fPclMul, 362 iemAImpl_pclmulqdq_u128, 363 iemAImpl_pclmulqdq_u128_fallback), 364 puDst, puSrc, bImmArg); 365 IEM_MC_ADVANCE_RIP(); 366 IEM_MC_END(); 367 } 368 else 369 { 370 /* 371 * Register, memory. 372 */ 373 IEM_MC_BEGIN(3, 2); 374 IEM_MC_ARG(PRTUINT128U, puDst, 0); 375 IEM_MC_LOCAL(RTUINT128U, uSrc); 376 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 377 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 378 379 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 380 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); 381 IEM_MC_ARG_CONST(uint8_t, bImmArg, /*=*/ bImm, 2); 382 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 383 IEM_MC_MAYBE_RAISE_PCLMUL_RELATED_XCPT(); 384 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 385 386 IEM_MC_PREPARE_SSE_USAGE(); 387 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 388 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fPclMul, 389 iemAImpl_pclmulqdq_u128, 390 iemAImpl_pclmulqdq_u128_fallback), 391 puDst, puSrc, bImmArg); 392 393 IEM_MC_ADVANCE_RIP(); 394 IEM_MC_END(); 395 } 396 return VINF_SUCCESS; 397 } 398 399 340 400 /* Opcode 0x66 0x0f 0x45 - invalid */ 341 401 /* Opcode 0x66 0x0f 0x46 - invalid (vex only) */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap3.cpp.h
r96537 r96652 289 289 FNIEMOP_STUB(iemOp_vmpsadbw_Vx_Hx_Wx_Ib); 290 290 /* Opcode VEX.66.0F3A 0x43 - invalid */ 291 292 291 293 /** Opcode VEX.66.0F3A 0x44. */ 292 FNIEMOP_STUB(iemOp_vpclmulqdq_Vdq_Hdq_Wdq_Ib); 294 FNIEMOP_DEF(iemOp_vpclmulqdq_Vdq_Hdq_Wdq_Ib) 295 { 296 //IEMOP_MNEMONIC3(VEX_RVM, VPCLMULQDQ, vpclmulqdq, Vdq, Hdq, Wdq, DISOPTYPE_HARMLESS, 0); /* @todo */ 297 298 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 299 if (IEM_IS_MODRM_REG_MODE(bRm)) 300 { 301 /* 302 * Register, register. 303 */ 304 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); 305 IEMOP_HLP_DONE_VEX_DECODING_L0_EX(fPclMul); 306 IEM_MC_BEGIN(4, 0); 307 IEM_MC_ARG(PRTUINT128U, puDst, 0); 308 IEM_MC_ARG(PCRTUINT128U, puSrc1, 1); 309 IEM_MC_ARG(PCRTUINT128U, puSrc2, 2); 310 IEM_MC_ARG_CONST(uint8_t, bImmArg, /*=*/ bImm, 3); 311 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 312 IEM_MC_PREPARE_AVX_USAGE(); 313 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 314 IEM_MC_REF_XREG_U128_CONST(puSrc1, IEM_GET_EFFECTIVE_VVVV(pVCpu)); 315 IEM_MC_REF_XREG_U128_CONST(puSrc2, IEM_GET_MODRM_RM(pVCpu, bRm)); 316 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(fPclMul, iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback), 317 puDst, puSrc1, puSrc2, bImmArg); 318 IEM_MC_CLEAR_YREG_128_UP( IEM_GET_MODRM_REG(pVCpu, bRm)); 319 IEM_MC_ADVANCE_RIP(); 320 IEM_MC_END(); 321 } 322 else 323 { 324 /* 325 * Register, memory. 326 */ 327 IEM_MC_BEGIN(4, 2); 328 IEM_MC_LOCAL(RTUINT128U, uSrc2); 329 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 330 IEM_MC_ARG(PRTUINT128U, puDst, 0); 331 IEM_MC_ARG(PCRTUINT128U, puSrc1, 1); 332 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc2, uSrc2, 2); 333 334 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 335 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); 336 IEM_MC_ARG_CONST(uint8_t, bImmArg, /*=*/ bImm, 3); 337 IEMOP_HLP_DONE_VEX_DECODING_L0_EX(fPclMul); 338 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 339 IEM_MC_PREPARE_AVX_USAGE(); 340 341 IEM_MC_FETCH_MEM_U128_NO_AC(uSrc2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 342 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 343 IEM_MC_REF_XREG_U128_CONST(puSrc1, IEM_GET_EFFECTIVE_VVVV(pVCpu)); 344 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(fPclMul, iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback), 345 puDst, puSrc1, puSrc2, bImmArg); 346 IEM_MC_CLEAR_YREG_128_UP( IEM_GET_MODRM_REG(pVCpu, bRm)); 347 348 IEM_MC_ADVANCE_RIP(); 349 IEM_MC_END(); 350 } 351 return VINF_SUCCESS; 352 } 353 354 293 355 /* Opcode VEX.66.0F3A 0x45 - invalid */ 294 356 /** Opcode VEX.66.0F3A 0x46 (vex only) */ -
trunk/src/VBox/VMM/include/IEMInternal.h
r96636 r96652 2251 2251 IEM_DECL_IMPL_DEF(void, iemAImpl_pcmpistri_u128_fallback,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRISRC pSrc, uint8_t bEvil)); 2252 2252 2253 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback; 2254 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback; 2253 2255 /** @} */ 2254 2256 -
trunk/src/VBox/VMM/include/IEMMc.h
r96407 r96652 213 213 return iemRaiseUndefinedOpcode(pVCpu); \ 214 214 } \ 215 } while (0) 216 #define IEM_MC_MAYBE_RAISE_PCLMUL_RELATED_XCPT() \ 217 do { \ 218 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 219 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \ 220 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fPclMul) \ 221 return iemRaiseUndefinedOpcode(pVCpu); \ 222 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 223 return iemRaiseDeviceNotAvailable(pVCpu); \ 215 224 } while (0) 216 225 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r96440 r96652 576 576 #define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) do { (void)fMcBegin; } while (0) 577 577 #define IEM_MC_MAYBE_RAISE_SSE_AVX_SIMD_FP_OR_UD_XCPT() do { (void)fMcBegin; } while (0) 578 #define IEM_MC_MAYBE_RAISE_PCLMUL_RELATED_XCPT() do { (void)fMcBegin; } while (0) 578 579 579 580 #define IEM_MC_LOCAL(a_Type, a_Name) (void)fMcBegin; \
Note:
See TracChangeset
for help on using the changeset viewer.