Changeset 96109 in vbox
- Timestamp:
- Aug 8, 2022 11:41:33 AM (2 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r96104 r96109 3719 3719 IEMIMPL_MEDIA_OPT_F2 psadbw, 1 3720 3720 IEMIMPL_MEDIA_OPT_F2 pmuldq, 0 3721 3721 IEMIMPL_MEDIA_OPT_F2 unpcklps, 0 3722 IEMIMPL_MEDIA_OPT_F2 unpcklpd, 0 3723 IEMIMPL_MEDIA_OPT_F2 unpckhps, 0 3724 IEMIMPL_MEDIA_OPT_F2 unpckhpd, 0 3722 3725 3723 3726 ;; … … 4213 4216 IEMIMPL_MEDIA_OPT_F3 vpmuldq 4214 4217 IEMIMPL_MEDIA_OPT_F3 vpmuludq 4215 4218 IEMIMPL_MEDIA_OPT_F3 vunpcklps 4219 IEMIMPL_MEDIA_OPT_F3 vunpcklpd 4220 IEMIMPL_MEDIA_OPT_F3 vunpckhps 4221 IEMIMPL_MEDIA_OPT_F3 vunpckhpd 4216 4222 4217 4223 ;; -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r96104 r96109 13246 13246 13247 13247 /* 13248 * UNPCKLPS / VUNPCKLPS 13249 */ 13250 #ifdef IEM_WITHOUT_ASSEMBLY 13251 IEM_DECL_IMPL_DEF(void, iemAImpl_unpcklps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 13252 { 13253 RTUINT128U uSrc1 = *puDst; 13254 RTUINT128U uSrc2 = *puSrc; 13255 ASMCompilerBarrier(); 13256 puDst->au32[0] = uSrc1.au32[0]; 13257 puDst->au32[1] = uSrc2.au32[0]; 13258 puDst->au32[2] = uSrc1.au32[1]; 13259 puDst->au32[3] = uSrc2.au32[1]; 13260 } 13261 13262 #endif 13263 13264 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpcklps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2)) 13265 { 13266 RTUINT128U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13267 RTUINT128U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13268 ASMCompilerBarrier(); 13269 puDst->au32[0] = uSrc1.au32[0]; 13270 puDst->au32[1] = uSrc2.au32[0]; 13271 puDst->au32[2] = uSrc1.au32[1]; 13272 puDst->au32[3] = uSrc2.au32[1]; 13273 } 13274 13275 13276 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpcklps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2)) 13277 { 13278 RTUINT256U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13279 RTUINT256U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13280 ASMCompilerBarrier(); 13281 puDst->au32[0] = uSrc1.au32[0]; 13282 puDst->au32[1] = uSrc2.au32[0]; 13283 puDst->au32[2] = uSrc1.au32[1]; 13284 puDst->au32[3] = uSrc2.au32[1]; 13285 13286 puDst->au32[4] = uSrc1.au32[4]; 13287 puDst->au32[5] = uSrc2.au32[4]; 13288 puDst->au32[6] = uSrc1.au32[5]; 13289 puDst->au32[7] = uSrc2.au32[5]; 13290 } 13291 13292 13293 /* 13294 * UNPCKLPD / VUNPCKLPD 13295 */ 13296 #ifdef IEM_WITHOUT_ASSEMBLY 13297 IEM_DECL_IMPL_DEF(void, iemAImpl_unpcklpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 13298 { 13299 RTUINT128U uSrc1 = *puDst; 13300 RTUINT128U uSrc2 = *puSrc; 13301 ASMCompilerBarrier(); 13302 puDst->au64[0] = uSrc1.au64[0]; 13303 puDst->au64[1] = uSrc2.au64[0]; 13304 } 13305 13306 #endif 13307 13308 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpcklpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2)) 13309 { 13310 RTUINT128U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13311 RTUINT128U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13312 ASMCompilerBarrier(); 13313 puDst->au64[0] = uSrc1.au64[0]; 13314 puDst->au64[1] = uSrc2.au64[0]; 13315 } 13316 13317 13318 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpcklpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2)) 13319 { 13320 RTUINT256U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13321 RTUINT256U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13322 ASMCompilerBarrier(); 13323 puDst->au64[0] = uSrc1.au64[0]; 13324 puDst->au64[1] = uSrc2.au64[0]; 13325 puDst->au64[2] = uSrc1.au64[2]; 13326 puDst->au64[3] = uSrc2.au64[2]; 13327 } 13328 13329 13330 /* 13331 * UNPCKHPS / VUNPCKHPS 13332 */ 13333 #ifdef IEM_WITHOUT_ASSEMBLY 13334 IEM_DECL_IMPL_DEF(void, iemAImpl_unpckhps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 13335 { 13336 RTUINT128U uSrc1 = *puDst; 13337 RTUINT128U uSrc2 = *puSrc; 13338 ASMCompilerBarrier(); 13339 puDst->au32[0] = uSrc1.au32[2]; 13340 puDst->au32[1] = uSrc2.au32[2]; 13341 puDst->au32[2] = uSrc1.au32[3]; 13342 puDst->au32[3] = uSrc2.au32[3]; 13343 } 13344 13345 #endif 13346 13347 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpckhps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2)) 13348 { 13349 RTUINT128U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13350 RTUINT128U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13351 ASMCompilerBarrier(); 13352 puDst->au32[0] = uSrc1.au32[2]; 13353 puDst->au32[1] = uSrc2.au32[2]; 13354 puDst->au32[2] = uSrc1.au32[3]; 13355 puDst->au32[3] = uSrc2.au32[3]; 13356 } 13357 13358 13359 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpckhps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2)) 13360 { 13361 RTUINT256U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13362 RTUINT256U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13363 ASMCompilerBarrier(); 13364 puDst->au32[0] = uSrc1.au32[2]; 13365 puDst->au32[1] = uSrc2.au32[2]; 13366 puDst->au32[2] = uSrc1.au32[3]; 13367 puDst->au32[3] = uSrc2.au32[3]; 13368 13369 puDst->au32[4] = uSrc1.au32[6]; 13370 puDst->au32[5] = uSrc2.au32[6]; 13371 puDst->au32[6] = uSrc1.au32[7]; 13372 puDst->au32[7] = uSrc2.au32[7]; 13373 } 13374 13375 13376 /* 13377 * UNPCKHPD / VUNPCKHPD 13378 */ 13379 #ifdef IEM_WITHOUT_ASSEMBLY 13380 IEM_DECL_IMPL_DEF(void, iemAImpl_unpckhpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc)) 13381 { 13382 RTUINT128U uSrc1 = *puDst; 13383 RTUINT128U uSrc2 = *puSrc; 13384 ASMCompilerBarrier(); 13385 puDst->au64[0] = uSrc1.au64[1]; 13386 puDst->au64[1] = uSrc2.au64[1]; 13387 } 13388 13389 #endif 13390 13391 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpckhpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2)) 13392 { 13393 RTUINT128U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13394 RTUINT128U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13395 ASMCompilerBarrier(); 13396 puDst->au64[0] = uSrc1.au64[1]; 13397 puDst->au64[1] = uSrc2.au64[1]; 13398 } 13399 13400 13401 IEM_DECL_IMPL_DEF(void, iemAImpl_vunpckhpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2)) 13402 { 13403 RTUINT256U uSrc1 = *puSrc1; /* Could overlap with puDst */ 13404 RTUINT256U uSrc2 = *puSrc2; /* Could overlap with puDst */ 13405 ASMCompilerBarrier(); 13406 puDst->au64[0] = uSrc1.au64[1]; 13407 puDst->au64[1] = uSrc2.au64[1]; 13408 puDst->au64[2] = uSrc1.au64[3]; 13409 puDst->au64[3] = uSrc2.au64[3]; 13410 } 13411 13412 13413 /* 13248 13414 * CRC32 (SEE 4.2). 13249 13415 */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r96104 r96109 435 435 436 436 437 /** 438 * Common worker for MMX instructions on the forms: 439 * pxxxx mm1, mm2/mem32 440 * 441 * The 2nd operand is the first half of a register, which in the memory case 442 * means a 32-bit memory access. 443 */ 444 FNIEMOP_DEF_1(iemOpCommonMmx_LowLow_To_Full, FNIEMAIMPLMEDIAOPTF2U64, pfnU64) 445 { 446 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 447 if (IEM_IS_MODRM_REG_MODE(bRm)) 448 { 449 /* 450 * Register, register. 451 */ 452 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 453 IEM_MC_BEGIN(2, 0); 454 IEM_MC_ARG(uint64_t *, puDst, 0); 455 IEM_MC_ARG(uint64_t const *, puSrc, 1); 456 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 457 IEM_MC_PREPARE_FPU_USAGE(); 458 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm)); 459 IEM_MC_REF_MREG_U64_CONST(puSrc, IEM_GET_MODRM_RM_8(bRm)); 460 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc); 461 IEM_MC_MODIFIED_MREG_BY_REF(puDst); 462 IEM_MC_FPU_TO_MMX_MODE(); 463 IEM_MC_ADVANCE_RIP(); 464 IEM_MC_END(); 465 } 466 else 467 { 468 /* 469 * Register, memory. 470 */ 471 IEM_MC_BEGIN(2, 2); 472 IEM_MC_ARG(uint64_t *, puDst, 0); 473 IEM_MC_LOCAL(uint64_t, uSrc); 474 IEM_MC_ARG_LOCAL_REF(uint64_t const *, puSrc, uSrc, 1); 475 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 476 477 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 478 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 479 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 480 IEM_MC_FETCH_MEM_U32_ZX_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 481 482 IEM_MC_PREPARE_FPU_USAGE(); 483 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm)); 484 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc); 485 IEM_MC_MODIFIED_MREG_BY_REF(puDst); 486 IEM_MC_FPU_TO_MMX_MODE(); 487 488 IEM_MC_ADVANCE_RIP(); 489 IEM_MC_END(); 490 } 491 return VINF_SUCCESS; 492 } 493 494 495 /** 496 * Common worker for SSE instructions on the forms: 497 * pxxxx xmm1, xmm2/mem128 498 * 499 * The 2nd operand is the first half of a register, which in the memory case 500 * 128-bit aligned 64-bit or 128-bit memory accessed for SSE. 501 * 502 * Exceptions type 4. 503 */ 504 FNIEMOP_DEF_1(iemOpCommonSse_LowLow_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128) 505 { 506 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 507 if (IEM_IS_MODRM_REG_MODE(bRm)) 508 { 509 /* 510 * Register, register. 511 */ 512 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 513 IEM_MC_BEGIN(2, 0); 514 IEM_MC_ARG(PRTUINT128U, puDst, 0); 515 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 516 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 517 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 518 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 519 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 520 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 521 IEM_MC_ADVANCE_RIP(); 522 IEM_MC_END(); 523 } 524 else 525 { 526 /* 527 * Register, memory. 528 */ 529 IEM_MC_BEGIN(2, 2); 530 IEM_MC_ARG(PRTUINT128U, puDst, 0); 531 IEM_MC_LOCAL(RTUINT128U, uSrc); 532 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 533 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 534 535 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 536 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 537 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 538 /** @todo Most CPUs probably only read the low qword. We read everything to 539 * make sure we apply segmentation and alignment checks correctly. 540 * When we have time, it would be interesting to explore what real 541 * CPUs actually does and whether it will do a TLB load for the high 542 * part or skip any associated \#PF. Ditto for segmentation \#GPs. */ 543 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 544 545 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 546 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 547 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 548 549 IEM_MC_ADVANCE_RIP(); 550 IEM_MC_END(); 551 } 552 return VINF_SUCCESS; 553 } 554 555 556 /** 557 * Common worker for SSE2 instructions on the forms: 558 * pxxxx xmm1, xmm2/mem128 559 * 560 * The 2nd operand is the first half of a register, which in the memory case 561 * 128-bit aligned 64-bit or 128-bit memory accessed for SSE. 562 * 563 * Exceptions type 4. 564 */ 565 FNIEMOP_DEF_1(iemOpCommonSse2_LowLow_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128) 566 { 567 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 568 if (IEM_IS_MODRM_REG_MODE(bRm)) 569 { 570 /* 571 * Register, register. 572 */ 573 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 574 IEM_MC_BEGIN(2, 0); 575 IEM_MC_ARG(PRTUINT128U, puDst, 0); 576 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 577 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 578 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 579 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 580 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 581 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 582 IEM_MC_ADVANCE_RIP(); 583 IEM_MC_END(); 584 } 585 else 586 { 587 /* 588 * Register, memory. 589 */ 590 IEM_MC_BEGIN(2, 2); 591 IEM_MC_ARG(PRTUINT128U, puDst, 0); 592 IEM_MC_LOCAL(RTUINT128U, uSrc); 593 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 594 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 595 596 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 597 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 598 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 599 /** @todo Most CPUs probably only read the low qword. We read everything to 600 * make sure we apply segmentation and alignment checks correctly. 601 * When we have time, it would be interesting to explore what real 602 * CPUs actually does and whether it will do a TLB load for the high 603 * part or skip any associated \#PF. Ditto for segmentation \#GPs. */ 604 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 605 606 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE(); 607 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 608 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 609 610 IEM_MC_ADVANCE_RIP(); 611 IEM_MC_END(); 612 } 613 return VINF_SUCCESS; 614 } 615 616 617 /** 618 * Common worker for MMX instructions on the form: 619 * pxxxx mm1, mm2/mem64 620 * 621 * The 2nd operand is the second half of a register, which in the memory case 622 * means a 64-bit memory access for MMX. 623 */ 624 FNIEMOP_DEF_1(iemOpCommonMmx_HighHigh_To_Full, PFNIEMAIMPLMEDIAOPTF2U64, pfnU64) 625 { 626 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 627 if (IEM_IS_MODRM_REG_MODE(bRm)) 628 { 629 /* 630 * Register, register. 631 */ 632 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 633 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 634 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 635 IEM_MC_BEGIN(2, 0); 636 IEM_MC_ARG(uint64_t *, puDst, 0); 637 IEM_MC_ARG(uint64_t const *, puSrc, 1); 638 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 639 IEM_MC_PREPARE_FPU_USAGE(); 640 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm)); 641 IEM_MC_REF_MREG_U64_CONST(puSrc, IEM_GET_MODRM_RM_8(bRm)); 642 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc); 643 IEM_MC_MODIFIED_MREG_BY_REF(puDst); 644 IEM_MC_FPU_TO_MMX_MODE(); 645 IEM_MC_ADVANCE_RIP(); 646 IEM_MC_END(); 647 } 648 else 649 { 650 /* 651 * Register, memory. 652 */ 653 IEM_MC_BEGIN(2, 2); 654 IEM_MC_ARG(uint64_t *, puDst, 0); 655 IEM_MC_LOCAL(uint64_t, uSrc); 656 IEM_MC_ARG_LOCAL_REF(uint64_t const *, puSrc, uSrc, 1); 657 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 658 659 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 660 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 661 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 662 IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); /* intel docs this to be full 64-bit read */ 663 664 IEM_MC_PREPARE_FPU_USAGE(); 665 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm)); 666 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc); 667 IEM_MC_MODIFIED_MREG_BY_REF(puDst); 668 IEM_MC_FPU_TO_MMX_MODE(); 669 670 IEM_MC_ADVANCE_RIP(); 671 IEM_MC_END(); 672 } 673 return VINF_SUCCESS; 674 } 675 676 677 /** 678 * Common worker for SSE instructions on the form: 679 * pxxxx xmm1, xmm2/mem128 680 * 681 * The 2nd operand is the second half of a register, which for SSE a 128-bit 682 * aligned access where it may read the full 128 bits or only the upper 64 bits. 683 * 684 * Exceptions type 4. 685 */ 686 FNIEMOP_DEF_1(iemOpCommonSse_HighHigh_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128) 687 { 688 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 689 if (IEM_IS_MODRM_REG_MODE(bRm)) 690 { 691 /* 692 * Register, register. 693 */ 694 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 695 IEM_MC_BEGIN(2, 0); 696 IEM_MC_ARG(PRTUINT128U, puDst, 0); 697 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 698 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 699 IEM_MC_PREPARE_SSE_USAGE(); 700 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 701 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 702 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 703 IEM_MC_ADVANCE_RIP(); 704 IEM_MC_END(); 705 } 706 else 707 { 708 /* 709 * Register, memory. 710 */ 711 IEM_MC_BEGIN(2, 2); 712 IEM_MC_ARG(PRTUINT128U, puDst, 0); 713 IEM_MC_LOCAL(RTUINT128U, uSrc); 714 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 715 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 716 717 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 718 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 719 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT(); 720 /** @todo Most CPUs probably only read the high qword. We read everything to 721 * make sure we apply segmentation and alignment checks correctly. 722 * When we have time, it would be interesting to explore what real 723 * CPUs actually does and whether it will do a TLB load for the lower 724 * part or skip any associated \#PF. */ 725 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 726 727 IEM_MC_PREPARE_SSE_USAGE(); 728 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 729 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 730 731 IEM_MC_ADVANCE_RIP(); 732 IEM_MC_END(); 733 } 734 return VINF_SUCCESS; 735 } 736 737 738 /** 739 * Common worker for SSE2 instructions on the form: 740 * pxxxx xmm1, xmm2/mem128 741 * 742 * The 2nd operand is the second half of a register, which for SSE a 128-bit 743 * aligned access where it may read the full 128 bits or only the upper 64 bits. 744 * 745 * Exceptions type 4. 746 */ 747 FNIEMOP_DEF_1(iemOpCommonSse2_HighHigh_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128) 748 { 749 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 750 if (IEM_IS_MODRM_REG_MODE(bRm)) 751 { 752 /* 753 * Register, register. 754 */ 755 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 756 IEM_MC_BEGIN(2, 0); 757 IEM_MC_ARG(PRTUINT128U, puDst, 0); 758 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 759 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 760 IEM_MC_PREPARE_SSE_USAGE(); 761 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 762 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm)); 763 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 764 IEM_MC_ADVANCE_RIP(); 765 IEM_MC_END(); 766 } 767 else 768 { 769 /* 770 * Register, memory. 771 */ 772 IEM_MC_BEGIN(2, 2); 773 IEM_MC_ARG(PRTUINT128U, puDst, 0); 774 IEM_MC_LOCAL(RTUINT128U, uSrc); 775 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 776 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 777 778 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 779 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 780 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 781 /** @todo Most CPUs probably only read the high qword. We read everything to 782 * make sure we apply segmentation and alignment checks correctly. 783 * When we have time, it would be interesting to explore what real 784 * CPUs actually does and whether it will do a TLB load for the lower 785 * part or skip any associated \#PF. */ 786 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 787 788 IEM_MC_PREPARE_SSE_USAGE(); 789 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 790 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc); 791 792 IEM_MC_ADVANCE_RIP(); 793 IEM_MC_END(); 794 } 795 return VINF_SUCCESS; 796 } 797 798 437 799 /** Opcode 0x0f 0x00 /0. */ 438 800 FNIEMOPRM_DEF(iemOp_Grp6_sldt) … … 2112 2474 2113 2475 /** Opcode 0x0f 0x14 - unpcklps Vx, Wx*/ 2114 FNIEMOP_STUB(iemOp_unpcklps_Vx_Wx); 2476 FNIEMOP_DEF(iemOp_unpcklps_Vx_Wx) 2477 { 2478 IEMOP_MNEMONIC2(RM, UNPCKLPS, unpcklps, Vx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, 0); 2479 return FNIEMOP_CALL_1(iemOpCommonSse_LowLow_To_Full, iemAImpl_unpcklps_u128); 2480 } 2481 2482 2115 2483 /** Opcode 0x66 0x0f 0x14 - unpcklpd Vx, Wx */ 2116 FNIEMOP_STUB(iemOp_unpcklpd_Vx_Wx); 2484 FNIEMOP_DEF(iemOp_unpcklpd_Vx_Wx) 2485 { 2486 IEMOP_MNEMONIC2(RM, UNPCKLPD, unpcklpd, Vx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, 0); 2487 return FNIEMOP_CALL_1(iemOpCommonSse2_LowLow_To_Full, iemAImpl_unpcklpd_u128); 2488 } 2489 2117 2490 2118 2491 /** … … 2138 2511 2139 2512 /** Opcode 0x0f 0x15 - unpckhps Vx, Wx */ 2140 FNIEMOP_STUB(iemOp_unpckhps_Vx_Wx); 2513 FNIEMOP_DEF(iemOp_unpckhps_Vx_Wx) 2514 { 2515 IEMOP_MNEMONIC2(RM, UNPCKHPS, unpckhps, Vx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, 0); 2516 return FNIEMOP_CALL_1(iemOpCommonSse_HighHigh_To_Full, iemAImpl_unpckhps_u128); 2517 } 2518 2519 2141 2520 /** Opcode 0x66 0x0f 0x15 - unpckhpd Vx, Wx */ 2142 FNIEMOP_STUB(iemOp_unpckhpd_Vx_Wx); 2521 FNIEMOP_DEF(iemOp_unpckhpd_Vx_Wx) 2522 { 2523 IEMOP_MNEMONIC2(RM, UNPCKHPD, unpckhpd, Vx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_SSE, 0); 2524 return FNIEMOP_CALL_1(iemOpCommonSse2_HighHigh_To_Full, iemAImpl_unpckhpd_u128); 2525 } 2526 2527 2143 2528 /* Opcode 0xf3 0x0f 0x15 - invalid */ 2144 2529 /* Opcode 0xf2 0x0f 0x15 - invalid */ … … 3479 3864 FNIEMOP_STUB(iemOp_maxsd_Vsd_Wsd); 3480 3865 3481 /**3482 * Common worker for MMX instructions on the forms:3483 * pxxxx mm1, mm2/mem323484 *3485 * The 2nd operand is the first half of a register, which in the memory case3486 * means a 32-bit memory access.3487 */3488 FNIEMOP_DEF_1(iemOpCommonMmx_LowLow_To_Full, FNIEMAIMPLMEDIAOPTF2U64, pfnU64)3489 {3490 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);3491 if (IEM_IS_MODRM_REG_MODE(bRm))3492 {3493 /*3494 * Register, register.3495 */3496 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3497 IEM_MC_BEGIN(2, 0);3498 IEM_MC_ARG(uint64_t *, puDst, 0);3499 IEM_MC_ARG(uint64_t const *, puSrc, 1);3500 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();3501 IEM_MC_PREPARE_FPU_USAGE();3502 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm));3503 IEM_MC_REF_MREG_U64_CONST(puSrc, IEM_GET_MODRM_RM_8(bRm));3504 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc);3505 IEM_MC_MODIFIED_MREG_BY_REF(puDst);3506 IEM_MC_FPU_TO_MMX_MODE();3507 IEM_MC_ADVANCE_RIP();3508 IEM_MC_END();3509 }3510 else3511 {3512 /*3513 * Register, memory.3514 */3515 IEM_MC_BEGIN(2, 2);3516 IEM_MC_ARG(uint64_t *, puDst, 0);3517 IEM_MC_LOCAL(uint64_t, uSrc);3518 IEM_MC_ARG_LOCAL_REF(uint64_t const *, puSrc, uSrc, 1);3519 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);3520 3521 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);3522 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3523 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();3524 IEM_MC_FETCH_MEM_U32_ZX_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);3525 3526 IEM_MC_PREPARE_FPU_USAGE();3527 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm));3528 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc);3529 IEM_MC_MODIFIED_MREG_BY_REF(puDst);3530 IEM_MC_FPU_TO_MMX_MODE();3531 3532 IEM_MC_ADVANCE_RIP();3533 IEM_MC_END();3534 }3535 return VINF_SUCCESS;3536 }3537 3538 3539 /**3540 * Common worker for SSE2 instructions on the forms:3541 * pxxxx xmm1, xmm2/mem1283542 *3543 * The 2nd operand is the first half of a register, which in the memory case3544 * 128-bit aligned 64-bit or 128-bit memory accessed for SSE.3545 *3546 * Exceptions type 4.3547 */3548 FNIEMOP_DEF_1(iemOpCommonSse2_LowLow_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128)3549 {3550 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);3551 if (IEM_IS_MODRM_REG_MODE(bRm))3552 {3553 /*3554 * Register, register.3555 */3556 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3557 IEM_MC_BEGIN(2, 0);3558 IEM_MC_ARG(PRTUINT128U, puDst, 0);3559 IEM_MC_ARG(PCRTUINT128U, puSrc, 1);3560 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();3561 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();3562 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm));3563 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm));3564 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc);3565 IEM_MC_ADVANCE_RIP();3566 IEM_MC_END();3567 }3568 else3569 {3570 /*3571 * Register, memory.3572 */3573 IEM_MC_BEGIN(2, 2);3574 IEM_MC_ARG(PRTUINT128U, puDst, 0);3575 IEM_MC_LOCAL(RTUINT128U, uSrc);3576 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1);3577 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);3578 3579 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);3580 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3581 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();3582 /** @todo Most CPUs probably only read the low qword. We read everything to3583 * make sure we apply segmentation and alignment checks correctly.3584 * When we have time, it would be interesting to explore what real3585 * CPUs actually does and whether it will do a TLB load for the high3586 * part or skip any associated \#PF. Ditto for segmentation \#GPs. */3587 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);3588 3589 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();3590 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm));3591 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc);3592 3593 IEM_MC_ADVANCE_RIP();3594 IEM_MC_END();3595 }3596 return VINF_SUCCESS;3597 }3598 3599 3866 3600 3867 /** Opcode 0x0f 0x60 - punpcklbw Pq, Qd */ … … 3750 4017 3751 4018 /* Opcode 0xf3 0x0f 0x67 - invalid */ 3752 3753 3754 /**3755 * Common worker for MMX instructions on the form:3756 * pxxxx mm1, mm2/mem643757 *3758 * The 2nd operand is the second half of a register, which in the memory case3759 * means a 64-bit memory access for MMX.3760 */3761 FNIEMOP_DEF_1(iemOpCommonMmx_HighHigh_To_Full, PFNIEMAIMPLMEDIAOPTF2U64, pfnU64)3762 {3763 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);3764 if (IEM_IS_MODRM_REG_MODE(bRm))3765 {3766 /*3767 * Register, register.3768 */3769 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */3770 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */3771 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3772 IEM_MC_BEGIN(2, 0);3773 IEM_MC_ARG(uint64_t *, puDst, 0);3774 IEM_MC_ARG(uint64_t const *, puSrc, 1);3775 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();3776 IEM_MC_PREPARE_FPU_USAGE();3777 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm));3778 IEM_MC_REF_MREG_U64_CONST(puSrc, IEM_GET_MODRM_RM_8(bRm));3779 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc);3780 IEM_MC_MODIFIED_MREG_BY_REF(puDst);3781 IEM_MC_FPU_TO_MMX_MODE();3782 IEM_MC_ADVANCE_RIP();3783 IEM_MC_END();3784 }3785 else3786 {3787 /*3788 * Register, memory.3789 */3790 IEM_MC_BEGIN(2, 2);3791 IEM_MC_ARG(uint64_t *, puDst, 0);3792 IEM_MC_LOCAL(uint64_t, uSrc);3793 IEM_MC_ARG_LOCAL_REF(uint64_t const *, puSrc, uSrc, 1);3794 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);3795 3796 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);3797 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3798 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();3799 IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); /* intel docs this to be full 64-bit read */3800 3801 IEM_MC_PREPARE_FPU_USAGE();3802 IEM_MC_REF_MREG_U64(puDst, IEM_GET_MODRM_REG_8(bRm));3803 IEM_MC_CALL_VOID_AIMPL_2(pfnU64, puDst, puSrc);3804 IEM_MC_MODIFIED_MREG_BY_REF(puDst);3805 IEM_MC_FPU_TO_MMX_MODE();3806 3807 IEM_MC_ADVANCE_RIP();3808 IEM_MC_END();3809 }3810 return VINF_SUCCESS;3811 }3812 3813 3814 /**3815 * Common worker for SSE2 instructions on the form:3816 * pxxxx xmm1, xmm2/mem1283817 *3818 * The 2nd operand is the second half of a register, which for SSE a 128-bit3819 * aligned access where it may read the full 128 bits or only the upper 64 bits.3820 *3821 * Exceptions type 4.3822 */3823 FNIEMOP_DEF_1(iemOpCommonSse2_HighHigh_To_Full, PFNIEMAIMPLMEDIAOPTF2U128, pfnU128)3824 {3825 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);3826 if (IEM_IS_MODRM_REG_MODE(bRm))3827 {3828 /*3829 * Register, register.3830 */3831 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3832 IEM_MC_BEGIN(2, 0);3833 IEM_MC_ARG(PRTUINT128U, puDst, 0);3834 IEM_MC_ARG(PCRTUINT128U, puSrc, 1);3835 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();3836 IEM_MC_PREPARE_SSE_USAGE();3837 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm));3838 IEM_MC_REF_XREG_U128_CONST(puSrc, IEM_GET_MODRM_RM(pVCpu, bRm));3839 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc);3840 IEM_MC_ADVANCE_RIP();3841 IEM_MC_END();3842 }3843 else3844 {3845 /*3846 * Register, memory.3847 */3848 IEM_MC_BEGIN(2, 2);3849 IEM_MC_ARG(PRTUINT128U, puDst, 0);3850 IEM_MC_LOCAL(RTUINT128U, uSrc);3851 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1);3852 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);3853 3854 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);3855 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();3856 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();3857 /** @todo Most CPUs probably only read the high qword. We read everything to3858 * make sure we apply segmentation and alignment checks correctly.3859 * When we have time, it would be interesting to explore what real3860 * CPUs actually does and whether it will do a TLB load for the lower3861 * part or skip any associated \#PF. */3862 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);3863 3864 IEM_MC_PREPARE_SSE_USAGE();3865 IEM_MC_REF_XREG_U128(puDst, IEM_GET_MODRM_REG(pVCpu, bRm));3866 IEM_MC_CALL_VOID_AIMPL_2(pfnU128, puDst, puSrc);3867 3868 IEM_MC_ADVANCE_RIP();3869 IEM_MC_END();3870 }3871 return VINF_SUCCESS;3872 }3873 4019 3874 4020 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap1.cpp.h
r96104 r96109 1403 1403 1404 1404 /** Opcode VEX.0F 0x14 - vunpcklps Vx, Hx, Wx*/ 1405 FNIEMOP_STUB(iemOp_vunpcklps_Vx_Hx_Wx); 1405 FNIEMOP_DEF(iemOp_vunpcklps_Vx_Hx_Wx) 1406 { 1407 IEMOP_MNEMONIC3(VEX_RVM, VUNPCKLPS, vunpcklps, Vx, Hx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_AVX, 0); 1408 IEMOPMEDIAOPTF3_INIT_VARS( vunpcklps); 1409 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_LowSrc, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); 1410 } 1411 1412 1406 1413 /** Opcode VEX.66.0F 0x14 - vunpcklpd Vx,Hx,Wx */ 1407 FNIEMOP_STUB(iemOp_vunpcklpd_Vx_Hx_Wx); 1414 FNIEMOP_DEF(iemOp_vunpcklpd_Vx_Hx_Wx) 1415 { 1416 IEMOP_MNEMONIC3(VEX_RVM, VUNPCKLPD, vunpcklpd, Vx, Hx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_AVX, 0); 1417 IEMOPMEDIAOPTF3_INIT_VARS( vunpcklpd); 1418 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_LowSrc, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); 1419 } 1420 1421 1408 1422 /* Opcode VEX.F3.0F 0x14 - invalid */ 1409 1423 /* Opcode VEX.F2.0F 0x14 - invalid */ 1424 1425 1410 1426 /** Opcode VEX.0F 0x15 - vunpckhps Vx, Hx, Wx */ 1411 FNIEMOP_STUB(iemOp_vunpckhps_Vx_Hx_Wx); 1427 FNIEMOP_DEF(iemOp_vunpckhps_Vx_Hx_Wx) 1428 { 1429 IEMOP_MNEMONIC3(VEX_RVM, VUNPCKHPS, vunpckhps, Vx, Hx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_AVX, 0); 1430 IEMOPMEDIAOPTF3_INIT_VARS( vunpckhps); 1431 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_LowSrc, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); 1432 } 1433 1434 1412 1435 /** Opcode VEX.66.0F 0x15 - vunpckhpd Vx,Hx,Wx */ 1413 FNIEMOP_STUB(iemOp_vunpckhpd_Vx_Hx_Wx); 1436 FNIEMOP_DEF(iemOp_vunpckhpd_Vx_Hx_Wx) 1437 { 1438 IEMOP_MNEMONIC3(VEX_RVM, VUNPCKHPD, vunpckhpd, Vx, Hx, Wx, DISOPTYPE_HARMLESS | DISOPTYPE_AVX, 0); 1439 IEMOPMEDIAOPTF3_INIT_VARS( vunpckhpd); 1440 return FNIEMOP_CALL_1(iemOpCommonAvxAvx2_Vx_Hx_Wx_LowSrc, IEM_SELECT_HOST_OR_FALLBACK(fAvx2, &s_Host, &s_Fallback)); 1441 } 1442 1443 1414 1444 /* Opcode VEX.F3.0F 0x15 - invalid */ 1415 1445 /* Opcode VEX.F2.0F 0x15 - invalid */ -
trunk/src/VBox/VMM/include/IEMInternal.h
r96104 r96109 1859 1859 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128; 1860 1860 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback; 1861 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128; 1862 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128; 1861 1863 1862 1864 FNIEMAIMPLMEDIAF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback; … … 1992 1994 iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback, 1993 1995 iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback, 1994 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback; 1996 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback, 1997 iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback, 1998 iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback, 1999 iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback, 2000 iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback; 2001 1995 2002 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback, 1996 2003 iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback, 1997 2004 iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback, 1998 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback; 2005 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback, 2006 iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback, 2007 iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback, 2008 iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback, 2009 iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback; 1999 2010 /** @} */ 2000 2011 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r96104 r96109 457 457 #define iemAImpl_psadbw_u128 NULL 458 458 #define iemAImpl_pmuludq_u128 NULL 459 #define iemAImpl_unpcklps_u128 NULL 460 #define iemAImpl_unpcklpd_u128 NULL 461 #define iemAImpl_unpckhps_u128 NULL 462 #define iemAImpl_unpckhpd_u128 NULL 459 463 460 464 /** @} */
Note:
See TracChangeset
for help on using the changeset viewer.