Changeset 98703 in vbox
- Timestamp:
- Feb 23, 2023 3:10:16 PM (2 years ago)
- svn:sync-xref-src-repo-rev:
- 156003
- Location:
- trunk
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.h
r98103 r98703 1138 1138 /** Support MOVBE instruction. */ 1139 1139 uint32_t fMovBe : 1; 1140 /** Support SHA instructions. */ 1141 uint32_t fSha : 1; 1140 1142 1141 1143 /** Supports AMD 3DNow instructions. */ … … 1198 1200 /** Alignment padding / reserved for future use (96 bits total, plus 12 bytes 1199 1201 * prior to the bit fields -> total of 24 bytes) */ 1200 uint32_t fPadding0 : 2 6;1202 uint32_t fPadding0 : 25; 1201 1203 1202 1204 -
trunk/src/VBox/VMM/VMMAll/CPUMAllCpuId.cpp
r98103 r98703 1468 1468 pFeatures->fHle = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_HLE); 1469 1469 pFeatures->fRtm = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_RTM); 1470 pFeatures->fSha = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_SHA); 1470 1471 1471 1472 pFeatures->fIbpb = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB); -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r98103 r98703 3739 3739 IEMIMPL_MEDIA_OPT_F2 aesenclast, 0 3740 3740 IEMIMPL_MEDIA_OPT_F2 aesdeclast, 0 3741 IEMIMPL_MEDIA_OPT_F2 sha1nexte, 0 3742 IEMIMPL_MEDIA_OPT_F2 sha1msg1, 0 3743 IEMIMPL_MEDIA_OPT_F2 sha1msg2, 0 3744 IEMIMPL_MEDIA_OPT_F2 sha256msg1, 0 3745 IEMIMPL_MEDIA_OPT_F2 sha256msg2, 0 3741 3746 3742 3747 ;; … … 6237 6242 IEMIMPL_RDRAND_RDSEED rdseed, rax, 64 6238 6243 6244 6245 ;; 6246 ; sha1rnds4 xmm1, xmm2, imm8. 6247 ; 6248 ; @param 1 The instruction name. 6249 ; 6250 ; @param A0 Pointer to the first media register size operand (input/output). 6251 ; @param A1 Pointer to the second source media register size operand (input). 6252 ; @param A2 The 8-bit immediate 6253 ; 6254 BEGINPROC_FASTCALL iemAImpl_sha1rnds4_u128, 16 6255 PROLOGUE_3_ARGS 6256 IEMIMPL_SSE_PROLOGUE 6257 6258 movdqu xmm0, [A0] 6259 movdqu xmm1, [A1] 6260 lea T1, [.imm0 xWrtRIP] 6261 lea T0, [A2 + A2*2] ; sizeof(insnX+ret) == 6: (A2 * 3) * 2 6262 lea T1, [T1 + T0*2] 6263 call T1 6264 movdqu [A0], xmm0 6265 6266 IEMIMPL_SSE_EPILOGUE 6267 EPILOGUE_3_ARGS 6268 %assign bImm 0 6269 %rep 256 6270 .imm %+ bImm: 6271 sha1rnds4 xmm0, xmm1, bImm 6272 ret 6273 %assign bImm bImm + 1 6274 %endrep 6275 .immEnd: ; 256*6 == 0x600 6276 dw 0xf9ff + (.immEnd - .imm0) ; will cause warning if entries are too big. 6277 dw 0x105ff - (.immEnd - .imm0) ; will cause warning if entries are too small. 6278 ENDPROC iemAImpl_sha1rnds4_u128 6279 6280 6281 ;; 6282 ; sha256rnds2 xmm1, xmm2, <XMM0>. 6283 ; 6284 ; @param 1 The instruction name. 6285 ; 6286 ; @param A0 Pointer to the first media register size operand (input/output). 6287 ; @param A1 Pointer to the second source media register size operand (input). 6288 ; @param A2 Pointer to the implicit XMM0 constants (input). 6289 ; 6290 BEGINPROC_FASTCALL iemAImpl_sha256rnds2_u128, 16 6291 PROLOGUE_3_ARGS 6292 IEMIMPL_SSE_PROLOGUE 6293 6294 movdqu xmm0, [A2] 6295 movdqu xmm1, [A0] 6296 movdqu xmm2, [A1] 6297 sha256rnds2 xmm1, xmm2 6298 movdqu [A0], xmm1 6299 6300 IEMIMPL_SSE_EPILOGUE 6301 EPILOGUE_3_ARGS 6302 ENDPROC iemAImpl_sha256rnds2_u128 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r98492 r98703 17423 17423 } 17424 17424 17425 17426 /** 17427 * SHA1NEXTE 17428 */ 17429 IEM_DECL_IMPL_DEF(void, iemAImpl_sha1nexte_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 17430 { 17431 uint32_t u32Tmp = ASMRotateLeftU32(puDst->au32[3], 30); 17432 17433 puDst->au32[0] = puSrc->au32[0]; 17434 puDst->au32[1] = puSrc->au32[1]; 17435 puDst->au32[2] = puSrc->au32[2]; 17436 puDst->au32[3] = puSrc->au32[3] + u32Tmp; 17437 } 17438 17439 /** 17440 * SHA1MSG1 17441 */ 17442 IEM_DECL_IMPL_DEF(void, iemAImpl_sha1msg1_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 17443 { 17444 uint32_t u32W0 = puDst->au32[3]; 17445 uint32_t u32W1 = puDst->au32[2]; 17446 uint32_t u32W2 = puDst->au32[1]; 17447 uint32_t u32W3 = puDst->au32[0]; 17448 uint32_t u32W4 = puSrc->au32[3]; 17449 uint32_t u32W5 = puSrc->au32[2]; 17450 17451 puDst->au32[3] = u32W2 ^ u32W0; 17452 puDst->au32[2] = u32W3 ^ u32W1; 17453 puDst->au32[1] = u32W4 ^ u32W2; 17454 puDst->au32[0] = u32W5 ^ u32W3; 17455 } 17456 17457 /** 17458 * SHA1MSG2 17459 */ 17460 IEM_DECL_IMPL_DEF(void, iemAImpl_sha1msg2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 17461 { 17462 uint32_t u32W13 = puSrc->au32[2]; 17463 uint32_t u32W14 = puSrc->au32[1]; 17464 uint32_t u32W15 = puSrc->au32[0]; 17465 uint32_t u32W16 = ASMRotateLeftU32(puDst->au32[3] ^ u32W13, 1); 17466 uint32_t u32W17 = ASMRotateLeftU32(puDst->au32[2] ^ u32W14, 1); 17467 uint32_t u32W18 = ASMRotateLeftU32(puDst->au32[1] ^ u32W15, 1); 17468 uint32_t u32W19 = ASMRotateLeftU32(puDst->au32[0] ^ u32W16, 1); 17469 17470 puDst->au32[3] = u32W16; 17471 puDst->au32[2] = u32W17; 17472 puDst->au32[1] = u32W18; 17473 puDst->au32[0] = u32W19; 17474 } 17475 17476 /** 17477 * SHA1RNDS4 17478 */ 17479 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHA1RNDS4FN, (uint32_t u32B, uint32_t u32C, uint32_t u32D)); 17480 typedef FNIEMAIMPLSHA1RNDS4FN *PFNIEMAIMPLSHA1RNDS4FN; 17481 17482 static DECLCALLBACK(uint32_t) iemAImpl_sha1rnds4_f0(uint32_t u32B, uint32_t u32C, uint32_t u32D) 17483 { 17484 return (u32B & u32C) ^ (~u32B & u32D); 17485 } 17486 17487 static DECLCALLBACK(uint32_t) iemAImpl_sha1rnds4_f1(uint32_t u32B, uint32_t u32C, uint32_t u32D) 17488 { 17489 return u32B ^ u32C ^ u32D; 17490 } 17491 17492 static DECLCALLBACK(uint32_t) iemAImpl_sha1rnds4_f2(uint32_t u32B, uint32_t u32C, uint32_t u32D) 17493 { 17494 return (u32B & u32C) ^ (u32B & u32D) ^ (u32C & u32D); 17495 } 17496 17497 static DECLCALLBACK(uint32_t) iemAImpl_sha1rnds4_f3(uint32_t u32B, uint32_t u32C, uint32_t u32D) 17498 { 17499 return u32B ^ u32C ^ u32D; 17500 } 17501 17502 IEM_DECL_IMPL_DEF(void, iemAImpl_sha1rnds4_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil)) 17503 { 17504 static uint32_t s_au32K[] = { UINT32_C(0x5a827999), UINT32_C(0x6ed9eba1), UINT32_C(0x8f1bbcdc), UINT32_C(0xca62c1d6) }; 17505 static PFNIEMAIMPLSHA1RNDS4FN s_apfnFn[] = { iemAImpl_sha1rnds4_f0, iemAImpl_sha1rnds4_f1, iemAImpl_sha1rnds4_f2, iemAImpl_sha1rnds4_f3 }; 17506 17507 uint32_t au32A[5]; 17508 uint32_t au32B[5]; 17509 uint32_t au32C[5]; 17510 uint32_t au32D[5]; 17511 uint32_t au32E[5]; 17512 uint32_t au32W[4]; 17513 PFNIEMAIMPLSHA1RNDS4FN pfnFn = s_apfnFn[bEvil & 0x3]; 17514 uint32_t u32K = s_au32K[bEvil & 0x3]; 17515 17516 au32A[0] = puDst->au32[3]; 17517 au32B[0] = puDst->au32[2]; 17518 au32C[0] = puDst->au32[1]; 17519 au32D[0] = puDst->au32[0]; 17520 for (uint32_t i = 0; i < RT_ELEMENTS(au32W); i++) 17521 au32W[i] = puSrc->au32[3 - i]; 17522 17523 /* Round 0 is a bit different than the other rounds. */ 17524 au32A[1] = pfnFn(au32B[0], au32C[0], au32D[0]) + ASMRotateLeftU32(au32A[0], 5) + au32W[0] + u32K; 17525 au32B[1] = au32A[0]; 17526 au32C[1] = ASMRotateLeftU32(au32B[0], 30); 17527 au32D[1] = au32C[0]; 17528 au32E[1] = au32D[0]; 17529 17530 for (uint32_t i = 1; i <= 3; i++) 17531 { 17532 au32A[i + 1] = pfnFn(au32B[i], au32C[i], au32D[i]) + ASMRotateLeftU32(au32A[i], 5) + au32W[i] + au32E[i] + u32K; 17533 au32B[i + 1] = au32A[i]; 17534 au32C[i + 1] = ASMRotateLeftU32(au32B[i], 30); 17535 au32D[i + 1] = au32C[i]; 17536 au32E[i + 1] = au32D[i]; 17537 } 17538 17539 puDst->au32[3] = au32A[4]; 17540 puDst->au32[2] = au32B[4]; 17541 puDst->au32[1] = au32C[4]; 17542 puDst->au32[0] = au32D[4]; 17543 } 17544 17545 17546 /** 17547 * SHA256MSG1 17548 */ 17549 DECLINLINE(uint32_t) iemAImpl_sha256_lower_sigma0(uint32_t u32Val) 17550 { 17551 return ASMRotateRightU32(u32Val, 7) ^ ASMRotateRightU32(u32Val, 18) ^ (u32Val >> 3); 17552 } 17553 17554 IEM_DECL_IMPL_DEF(void, iemAImpl_sha256msg1_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 17555 { 17556 uint32_t u32W4 = puSrc->au32[0]; 17557 uint32_t u32W3 = puDst->au32[3]; 17558 uint32_t u32W2 = puDst->au32[2]; 17559 uint32_t u32W1 = puDst->au32[1]; 17560 uint32_t u32W0 = puDst->au32[0]; 17561 17562 puDst->au32[3] = u32W3 + iemAImpl_sha256_lower_sigma0(u32W4); 17563 puDst->au32[2] = u32W2 + iemAImpl_sha256_lower_sigma0(u32W3); 17564 puDst->au32[1] = u32W1 + iemAImpl_sha256_lower_sigma0(u32W2); 17565 puDst->au32[0] = u32W0 + iemAImpl_sha256_lower_sigma0(u32W1); 17566 } 17567 17568 /** 17569 * SHA256MSG2 17570 */ 17571 DECLINLINE(uint32_t) iemAImpl_sha256_lower_sigma1(uint32_t u32Val) 17572 { 17573 return ASMRotateRightU32(u32Val, 17) ^ ASMRotateRightU32(u32Val, 19) ^ (u32Val >> 10); 17574 } 17575 17576 IEM_DECL_IMPL_DEF(void, iemAImpl_sha256msg2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 17577 { 17578 uint32_t u32W14 = puSrc->au32[2]; 17579 uint32_t u32W15 = puSrc->au32[3]; 17580 uint32_t u32W16 = puDst->au32[0] + iemAImpl_sha256_lower_sigma1(u32W14); 17581 uint32_t u32W17 = puDst->au32[1] + iemAImpl_sha256_lower_sigma1(u32W15); 17582 uint32_t u32W18 = puDst->au32[2] + iemAImpl_sha256_lower_sigma1(u32W16); 17583 uint32_t u32W19 = puDst->au32[3] + iemAImpl_sha256_lower_sigma1(u32W17); 17584 17585 puDst->au32[3] = u32W19; 17586 puDst->au32[2] = u32W18; 17587 puDst->au32[1] = u32W17; 17588 puDst->au32[0] = u32W16; 17589 } 17590 17591 /** 17592 * SHA256RNDS2 17593 */ 17594 DECLINLINE(uint32_t) iemAImpl_sha256_ch(uint32_t u32X, uint32_t u32Y, uint32_t u32Z) 17595 { 17596 return (u32X & u32Y) ^ (~u32X & u32Z); 17597 } 17598 17599 DECLINLINE(uint32_t) iemAImpl_sha256_maj(uint32_t u32X, uint32_t u32Y, uint32_t u32Z) 17600 { 17601 return (u32X & u32Y) ^ (u32X & u32Z) ^ (u32Y & u32Z); 17602 } 17603 17604 DECLINLINE(uint32_t) iemAImpl_sha256_upper_sigma0(uint32_t u32Val) 17605 { 17606 return ASMRotateRightU32(u32Val, 2) ^ ASMRotateRightU32(u32Val, 13) ^ ASMRotateRightU32(u32Val, 22); 17607 } 17608 17609 DECLINLINE(uint32_t) iemAImpl_sha256_upper_sigma1(uint32_t u32Val) 17610 { 17611 return ASMRotateRightU32(u32Val, 6) ^ ASMRotateRightU32(u32Val, 11) ^ ASMRotateRightU32(u32Val, 25); 17612 } 17613 17614 IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants)) 17615 { 17616 uint32_t au32A[3]; 17617 uint32_t au32B[3]; 17618 uint32_t au32C[3]; 17619 uint32_t au32D[3]; 17620 uint32_t au32E[3]; 17621 uint32_t au32F[3]; 17622 uint32_t au32G[3]; 17623 uint32_t au32H[3]; 17624 uint32_t au32WK[2]; 17625 17626 au32A[0] = puSrc->au32[3]; 17627 au32B[0] = puSrc->au32[2]; 17628 au32C[0] = puDst->au32[3]; 17629 au32D[0] = puDst->au32[2]; 17630 au32E[0] = puSrc->au32[1]; 17631 au32F[0] = puSrc->au32[0]; 17632 au32G[0] = puDst->au32[1]; 17633 au32H[0] = puDst->au32[0]; 17634 17635 au32WK[0] = puXmm0Constants->au32[0]; 17636 au32WK[1] = puXmm0Constants->au32[1]; 17637 17638 for (uint32_t i = 0; i < 2; i++) 17639 { 17640 au32A[i + 1] = iemAImpl_sha256_ch(au32E[i], au32F[i], au32G[i]) 17641 + iemAImpl_sha256_upper_sigma1(au32E[i]) 17642 + au32WK[i] 17643 + au32H[i] 17644 + iemAImpl_sha256_maj(au32A[i], au32B[i], au32C[i]) 17645 + iemAImpl_sha256_upper_sigma0(au32A[i]); 17646 au32B[i + 1] = au32A[i]; 17647 au32C[i + 1] = au32B[i]; 17648 au32D[i + 1] = au32C[i]; 17649 au32E[i + 1] = iemAImpl_sha256_ch(au32E[i], au32F[i], au32G[i]) 17650 + iemAImpl_sha256_upper_sigma1(au32E[i]) 17651 + au32WK[i] 17652 + au32H[i] 17653 + au32D[i]; 17654 au32F[i + 1] = au32E[i]; 17655 au32G[i + 1] = au32F[i]; 17656 au32H[i + 1] = au32G[i]; 17657 } 17658 17659 puDst->au32[3] = au32A[2]; 17660 puDst->au32[2] = au32B[2]; 17661 puDst->au32[1] = au32E[2]; 17662 puDst->au32[0] = au32F[2]; 17663 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f38.cpp.h
r98103 r98703 274 274 * 275 275 * @sa iemOpCommonSse2_FullFull_To_Full, iemOpCommonSsse3_FullFull_To_Full, 276 * iemOpCommonSse41_FullFull_To_Full, iemOpCommonSse42_FullFull_To_Full 276 * iemOpCommonSse41_FullFull_To_Full, iemOpCommonSse42_FullFull_To_Full, 277 * iemOpCommonSha_FullFull_To_Full 277 278 */ 278 279 FNIEMOP_DEF_1(iemOpCommonAesNi_FullFull_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128) … … 310 311 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 311 312 IEM_MC_MAYBE_RAISE_AESNI_RELATED_XCPT(); 313 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 314 315 IEM_MC_PREPARE_SSE_USAGE(); 316 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 317 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 318 319 IEM_MC_ADVANCE_RIP_AND_FINISH(); 320 IEM_MC_END(); 321 } 322 } 323 324 325 /** 326 * Common worker for SSE-style SHA instructions of the form: 327 * shaxxx xmm1, xmm2/mem128 328 * 329 * Proper alignment of the 128-bit operand is enforced. 330 * Exceptions type 4. SHA cpuid checks. 331 * 332 * Unlike iemOpCommonSse41_FullFull_To_Full, the @a pfnU128 worker function 333 * takes no FXSAVE state, just the operands. 334 * 335 * @sa iemOpCommonSse2_FullFull_To_Full, iemOpCommonSsse3_FullFull_To_Full, 336 * iemOpCommonSse41_FullFull_To_Full, iemOpCommonSse42_FullFull_To_Full, 337 * iemOpCommonAesNi_FullFull_To_Full 338 */ 339 FNIEMOP_DEF_1(iemOpCommonSha_FullFull_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128) 340 { 341 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 342 if (IEM_IS_MODRM_REG_MODE(bRm)) 343 { 344 /* 345 * Register, register. 346 */ 347 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 348 IEM_MC_BEGIN(2, 0); 349 IEM_MC_ARG(PRTUINT128U, puDst, 0); 350 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 351 IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT(); 352 IEM_MC_PREPARE_SSE_USAGE(); 353 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 354 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 355 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 356 IEM_MC_ADVANCE_RIP_AND_FINISH(); 357 IEM_MC_END(); 358 } 359 else 360 { 361 /* 362 * Register, memory. 363 */ 364 IEM_MC_BEGIN(2, 2); 365 IEM_MC_ARG(PRTUINT128U, puDst, 0); 366 IEM_MC_LOCAL(RTUINT128U, uSrc); 367 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 368 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 369 370 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 371 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 372 IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT(); 312 373 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 313 374 … … 1410 1471 /* Opcode 0x0f 0x38 0xc7 - invalid. */ 1411 1472 /* Opcode 0x66 0x0f 0x38 0xc7 - invalid. */ 1473 1474 1412 1475 /** Opcode 0x0f 0x38 0xc8. */ 1413 FNIEMOP_STUB(iemOp_sha1nexte_Vdq_Wdq); 1476 FNIEMOP_DEF(iemOp_sha1nexte_Vdq_Wdq) 1477 { 1478 IEMOP_MNEMONIC2(RM, SHA1NEXTE, sha1nexte, Vdq, Wdq, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, IEMOPHINT_IGNORES_OP_SIZES); 1479 return FNIEMOP_CALL_1(iemOpCommonSha_FullFull_To_Full, 1480 IEM_SELECT_HOST_OR_FALLBACK(fSha, iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback)); 1481 } 1482 1483 1414 1484 /* Opcode 0x66 0x0f 0x38 0xc8 - invalid. */ 1485 1486 1415 1487 /** Opcode 0x0f 0x38 0xc9. */ 1416 FNIEMOP_STUB(iemOp_sha1msg1_Vdq_Wdq); 1488 FNIEMOP_DEF(iemOp_sha1msg1_Vdq_Wdq) 1489 { 1490 IEMOP_MNEMONIC2(RM, SHA1MSG1, sha1msg1, Vdq, Wdq, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, IEMOPHINT_IGNORES_OP_SIZES); 1491 return FNIEMOP_CALL_1(iemOpCommonSha_FullFull_To_Full, 1492 IEM_SELECT_HOST_OR_FALLBACK(fSha, iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback)); 1493 } 1494 1495 1417 1496 /* Opcode 0x66 0x0f 0x38 0xc9 - invalid. */ 1497 1498 1418 1499 /** Opcode 0x0f 0x38 0xca. */ 1419 FNIEMOP_STUB(iemOp_sha1msg2_Vdq_Wdq); 1500 FNIEMOP_DEF(iemOp_sha1msg2_Vdq_Wdq) 1501 { 1502 IEMOP_MNEMONIC2(RM, SHA1MSG2, sha1msg2, Vdq, Wdq, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, IEMOPHINT_IGNORES_OP_SIZES); 1503 return FNIEMOP_CALL_1(iemOpCommonSha_FullFull_To_Full, 1504 IEM_SELECT_HOST_OR_FALLBACK(fSha, iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback)); 1505 } 1506 1507 1420 1508 /* Opcode 0x66 0x0f 0x38 0xca - invalid. */ 1509 1510 1421 1511 /** Opcode 0x0f 0x38 0xcb. */ 1422 FNIEMOP_STUB(iemOp_sha256rnds2_Vdq_Wdq); 1512 FNIEMOP_DEF(iemOp_sha256rnds2_Vdq_Wdq) 1513 { 1514 IEMOP_MNEMONIC2(RM, SHA256RNDS2, sha256rnds2, Vdq, Wdq, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, IEMOPHINT_IGNORES_OP_SIZES); /** @todo Actually RMI with implicit XMM0 */ 1515 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1516 if (IEM_IS_MODRM_REG_MODE(bRm)) 1517 { 1518 /* 1519 * Register, register. 1520 */ 1521 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1522 IEM_MC_BEGIN(3, 0); 1523 IEM_MC_ARG(PRTUINT128U, puDst, 0); 1524 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 1525 IEM_MC_ARG(PCRTUINT128U, puXmm0, 2); 1526 IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT(); 1527 IEM_MC_PREPARE_SSE_USAGE(); 1528 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 1529 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 1530 IEM_MC_REF_XREG_U128_CONST(puXmm0, 0); 1531 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fSha, iemAImpl_sha256rnds2_u128, iemAImpl_sha256rnds2_u128_fallback), 1532 puDst, puSrc, puXmm0); 1533 IEM_MC_ADVANCE_RIP_AND_FINISH(); 1534 IEM_MC_END(); 1535 } 1536 else 1537 { 1538 /* 1539 * Register, memory. 1540 */ 1541 IEM_MC_BEGIN(3, 2); 1542 IEM_MC_ARG(PRTUINT128U, puDst, 0); 1543 IEM_MC_LOCAL(RTUINT128U, uSrc); 1544 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 1545 IEM_MC_ARG(PCRTUINT128U, puXmm0, 2); 1546 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 1547 1548 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1549 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1550 IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT(); 1551 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 1552 1553 IEM_MC_PREPARE_SSE_USAGE(); 1554 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 1555 IEM_MC_REF_XREG_U128_CONST(puXmm0, 0); 1556 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fSha, iemAImpl_sha256rnds2_u128, iemAImpl_sha256rnds2_u128_fallback), 1557 puDst, puSrc, puXmm0); 1558 IEM_MC_ADVANCE_RIP_AND_FINISH(); 1559 IEM_MC_END(); 1560 } 1561 } 1562 1563 1423 1564 /* Opcode 0x66 0x0f 0x38 0xcb - invalid. */ 1565 1566 1424 1567 /** Opcode 0x0f 0x38 0xcc. */ 1425 FNIEMOP_STUB(iemOp_sha256msg1_Vdq_Wdq); 1568 FNIEMOP_DEF(iemOp_sha256msg1_Vdq_Wdq) 1569 { 1570 IEMOP_MNEMONIC2(RM, SHA256MSG1, sha256msg1, Vdq, Wdq, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, IEMOPHINT_IGNORES_OP_SIZES); 1571 return FNIEMOP_CALL_1(iemOpCommonSha_FullFull_To_Full, 1572 IEM_SELECT_HOST_OR_FALLBACK(fSha, iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback)); 1573 } 1574 1575 1426 1576 /* Opcode 0x66 0x0f 0x38 0xcc - invalid. */ 1577 1578 1427 1579 /** Opcode 0x0f 0x38 0xcd. */ 1428 FNIEMOP_STUB(iemOp_sha256msg2_Vdq_Wdq); 1580 FNIEMOP_DEF(iemOp_sha256msg2_Vdq_Wdq) 1581 { 1582 IEMOP_MNEMONIC2(RM, SHA256MSG2, sha256msg2, Vdq, Wdq, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, IEMOPHINT_IGNORES_OP_SIZES); 1583 return FNIEMOP_CALL_1(iemOpCommonSha_FullFull_To_Full, 1584 IEM_SELECT_HOST_OR_FALLBACK(fSha, iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback)); 1585 } 1586 1587 1429 1588 /* Opcode 0x66 0x0f 0x38 0xcd - invalid. */ 1430 1589 /* Opcode 0x0f 0x38 0xce - invalid. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThree0f3a.cpp.h
r98103 r98703 1229 1229 /* Opcode 0x0f 0xca - invalid */ 1230 1230 /* Opcode 0x0f 0xcb - invalid */ 1231 1232 1231 1233 /* Opcode 0x0f 0xcc */ 1232 FNIEMOP_STUB(iemOp_sha1rnds4_Vdq_Wdq_Ib); 1234 FNIEMOP_DEF(iemOp_sha1rnds4_Vdq_Wdq_Ib) 1235 { 1236 IEMOP_MNEMONIC3(RMI, SHA1RNDS4, sha1rnds4, Vdq, Wdq, Ib, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, 0); 1237 1238 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1239 if (IEM_IS_MODRM_REG_MODE(bRm)) 1240 { 1241 /* 1242 * XMM, XMM, imm8 1243 */ 1244 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); 1245 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1246 IEM_MC_BEGIN(3, 0); 1247 IEM_MC_ARG(PRTUINT128U, puDst, 0); 1248 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 1249 IEM_MC_ARG_CONST(uint8_t, bImmArg, /*=*/ bImm, 2); 1250 IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT(); 1251 IEM_MC_PREPARE_SSE_USAGE(); 1252 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 1253 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 1254 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fSha, 1255 iemAImpl_sha1rnds4_u128, 1256 iemAImpl_sha1rnds4_u128_fallback), 1257 puDst, puSrc, bImmArg); 1258 IEM_MC_ADVANCE_RIP_AND_FINISH(); 1259 IEM_MC_END(); 1260 } 1261 else 1262 { 1263 /* 1264 * XMM, [mem128], imm8. 1265 */ 1266 IEM_MC_BEGIN(3, 2); 1267 IEM_MC_ARG(PRTUINT128U, puDst, 0); 1268 IEM_MC_LOCAL(RTUINT128U, uSrc); 1269 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 1270 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 1271 1272 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); 1273 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); 1274 IEM_MC_ARG_CONST(uint8_t, bImmArg, /*=*/ bImm, 2); 1275 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1276 IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT(); 1277 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 1278 1279 IEM_MC_PREPARE_SSE_USAGE(); 1280 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 1281 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(fSha, 1282 iemAImpl_sha1rnds4_u128, 1283 iemAImpl_sha1rnds4_u128_fallback), 1284 puDst, puSrc, bImmArg); 1285 IEM_MC_ADVANCE_RIP_AND_FINISH(); 1286 IEM_MC_END(); 1287 } 1288 } 1289 1290 1233 1291 /* Opcode 0x0f 0xcd - invalid */ 1234 1292 /* Opcode 0x0f 0xce - invalid */ -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r98354 r98703 1048 1048 CPUMISAEXTCFG enmRdRand; 1049 1049 CPUMISAEXTCFG enmRdSeed; 1050 CPUMISAEXTCFG enmSha; 1050 1051 CPUMISAEXTCFG enmCLFlushOpt; 1051 1052 CPUMISAEXTCFG enmFsGsBase; … … 1855 1856 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512ER RT_BIT(27) 1856 1857 //| X86_CPUID_STEXT_FEATURE_EBX_AVX512CD RT_BIT(28) 1857 //| X86_CPUID_STEXT_FEATURE_EBX_SHA RT_BIT(29)1858 | PASSTHRU_FEATURE(pConfig->enmSha, pHstFeat->fSha, X86_CPUID_STEXT_FEATURE_EBX_SHA) 1858 1859 //| RT_BIT(30) - reserved 1859 1860 //| RT_BIT(31) - reserved … … 1893 1894 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, AVX512CD, X86_CPUID_STEXT_FEATURE_EBX_AVX512CD); 1894 1895 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEbx, SMAP, X86_CPUID_STEXT_FEATURE_EBX_SMAP); 1895 PORTABLE_DISABLE_FEATURE_BIT ( 1, pCurLeaf->uEbx, SHA, X86_CPUID_STEXT_FEATURE_EBX_SHA);1896 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pCurLeaf->uEbx, SHA, X86_CPUID_STEXT_FEATURE_EBX_SHA, pConfig->enmSha); 1896 1897 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEcx, PREFETCHWT1, X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1); 1897 1898 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, FLUSH_CMD, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD, pConfig->enmFlushCmdMsr); … … 1913 1914 if (pConfig->enmCLFlushOpt == CPUMISAEXTCFG_ENABLED_ALWAYS) 1914 1915 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT; 1916 if (pConfig->enmSha == CPUMISAEXTCFG_ENABLED_ALWAYS) 1917 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_SHA; 1915 1918 if (pConfig->enmInvpcid == CPUMISAEXTCFG_ENABLED_ALWAYS) 1916 1919 pCurLeaf->uEbx |= X86_CPUID_STEXT_FEATURE_EBX_INVPCID; … … 2747 2750 "|RDSEED" 2748 2751 "|CLFLUSHOPT" 2752 "|SHA" 2749 2753 "|FSGSBASE" 2750 2754 "|PCID" … … 2882 2886 */ 2883 2887 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "CLFLUSHOPT", &pConfig->enmCLFlushOpt, fNestedPagingAndFullGuestExec); 2888 AssertLogRelRCReturn(rc, rc); 2889 2890 /** @cfgm{/CPUM/IsaExts/SHA, isaextcfg, depends} 2891 * Whether to expose the SHA instructions to the guest. For the time being 2892 * the default is to only do this for VMs with nested paging and AMD-V or 2893 * unrestricted guest mode. 2894 */ 2895 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SHA", &pConfig->enmSha, fNestedPagingAndFullGuestExec); 2884 2896 AssertLogRelRCReturn(rc, rc); 2885 2897 -
trunk/src/VBox/VMM/include/IEMInternal.h
r98103 r98703 2378 2378 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback; 2379 2379 2380 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback; 2381 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback; 2382 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback; 2383 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback; 2384 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback; 2385 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128, iemAImpl_sha1rnds4_u128_fallback; 2386 IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants)); 2387 IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants)); 2380 2388 2381 2389 typedef struct IEMPCMPISTRISRC -
trunk/src/VBox/VMM/include/IEMMc.h
r98103 r98703 112 112 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \ 113 113 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAesNi) \ 114 return iemRaiseUndefinedOpcode(pVCpu); \ 115 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ 116 return iemRaiseDeviceNotAvailable(pVCpu); \ 117 } while (0) 118 #define IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT() \ 119 do { \ 120 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \ 121 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \ 122 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSha) \ 114 123 return iemRaiseUndefinedOpcode(pVCpu); \ 115 124 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \ -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r98103 r98703 603 603 #define IEM_MC_MAYBE_RAISE_SSE42_RELATED_XCPT() do { (void)fMcBegin; } while (0) 604 604 #define IEM_MC_MAYBE_RAISE_AESNI_RELATED_XCPT() do { (void)fMcBegin; } while (0) 605 #define IEM_MC_MAYBE_RAISE_SHA_RELATED_XCPT() do { (void)fMcBegin; } while (0) 605 606 #define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() do { (void)fMcBegin; } while (0) 606 607 #define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() do { (void)fMcBegin; } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.