Changeset 66950 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- May 18, 2017 2:24:43 PM (8 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r66932 r66950 11527 11527 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11528 11528 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11529 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11529 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11530 11530 } while (0) 11531 11531 #define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \ … … 11536 11536 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11537 11537 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11538 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11538 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11539 11539 } while (0) 11540 11540 #define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \ … … 11545 11545 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11546 11546 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11547 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11547 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11548 11548 } while (0) 11549 11549 #define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \ … … 11554 11554 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \ 11555 11555 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \ 11556 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \ 11556 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11557 } while (0) 11558 11559 #define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \ 11560 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm) 11561 #define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \ 11562 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm) 11563 #define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \ 11564 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0]) 11565 #define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \ 11566 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \ 11567 uintptr_t const iYRegTmp = (a_iYReg); \ 11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \ 11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \ 11570 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \ 11557 11571 } while (0) 11558 11572 … … 11565 11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \ 11566 11580 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \ 11567 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11581 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11568 11582 } while (0) 11569 11583 #define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \ … … 11575 11589 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11576 11590 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11577 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11591 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11578 11592 } while (0) 11579 11593 … … 11588 11602 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11589 11603 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11590 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11604 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11591 11605 } while (0) 11592 11606 #define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \ … … 11599 11613 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11600 11614 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11601 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11615 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11602 11616 } while (0) 11603 11617 #define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \ … … 11610 11624 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11611 11625 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11626 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11613 11627 } while (0) 11614 11628 #define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \ … … 11620 11634 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \ 11621 11635 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \ 11622 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \11636 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \ 11623 11637 } while (0) 11624 11638 … … 11727 11741 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11728 11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))) 11729 # define IEM_MC_FETCH_MEM_U256_ALIGN_ SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \11743 # define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11730 11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))) 11731 11745 #else 11732 11746 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11733 11747 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)) 11734 # define IEM_MC_FETCH_MEM_U256_ALIGN_ SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \11748 # define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \ 11735 11749 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)) 11736 11750 #endif … … 12296 12310 * Calls a SSE assembly implementation taking two visible arguments. 12297 12311 * 12298 * @param a_pfnAImpl Pointer to the assembly MMXroutine.12312 * @param a_pfnAImpl Pointer to the assembly SSE routine. 12299 12313 * @param a0 The first extra argument. 12300 12314 * @param a1 The second extra argument. … … 12309 12323 * Calls a SSE assembly implementation taking three visible arguments. 12310 12324 * 12311 * @param a_pfnAImpl Pointer to the assembly MMXroutine.12325 * @param a_pfnAImpl Pointer to the assembly SSE routine. 12312 12326 * @param a0 The first extra argument. 12313 12327 * @param a1 The second extra argument. … … 12318 12332 IEM_MC_PREPARE_SSE_USAGE(); \ 12319 12333 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \ 12334 } while (0) 12335 12336 12337 /** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2, 12338 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */ 12339 #define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \ 12340 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0) 12341 12342 /** 12343 * Calls a AVX assembly implementation taking two visible arguments. 12344 * 12345 * There is one implicit zero'th argument, a pointer to the extended state. 12346 * 12347 * @param a_pfnAImpl Pointer to the assembly AVX routine. 12348 * @param a1 The first extra argument. 12349 * @param a2 The second extra argument. 12350 */ 12351 #define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \ 12352 do { \ 12353 IEM_MC_PREPARE_AVX_USAGE(); \ 12354 a_pfnAImpl(pXState, (a1), (a2)); \ 12355 } while (0) 12356 12357 /** 12358 * Calls a AVX assembly implementation taking three visible arguments. 12359 * 12360 * There is one implicit zero'th argument, a pointer to the extended state. 12361 * 12362 * @param a_pfnAImpl Pointer to the assembly AVX routine. 12363 * @param a1 The first extra argument. 12364 * @param a2 The second extra argument. 12365 * @param a3 The third extra argument. 12366 */ 12367 #define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \ 12368 do { \ 12369 IEM_MC_PREPARE_AVX_USAGE(); \ 12370 a_pfnAImpl(pXState, (a1), (a2), (a3)); \ 12320 12371 } while (0) 12321 12372 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r66789 r66950 1382 1382 1383 1383 1384 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc)) 1385 { 1386 pXState->x87.aXMM[iYRegDst].au32[0] = pXState->x87.aXMM[iYRegSrc].au32[0]; 1387 pXState->x87.aXMM[iYRegDst].au32[1] = pXState->x87.aXMM[iYRegSrc].au32[0]; 1388 pXState->x87.aXMM[iYRegDst].au32[2] = pXState->x87.aXMM[iYRegSrc].au32[2]; 1389 pXState->x87.aXMM[iYRegDst].au32[3] = pXState->x87.aXMM[iYRegSrc].au32[2]; 1390 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[0] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[0]; 1391 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[1] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[0]; 1392 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[2] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[2]; 1393 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[3] = pXState->u.YmmHi.aYmmHi[iYRegSrc].au32[2]; 1394 } 1395 1396 1397 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc)) 1398 { 1399 pXState->x87.aXMM[iYRegDst].au32[0] = pSrc->au32[0]; 1400 pXState->x87.aXMM[iYRegDst].au32[1] = pSrc->au32[0]; 1401 pXState->x87.aXMM[iYRegDst].au32[2] = pSrc->au32[2]; 1402 pXState->x87.aXMM[iYRegDst].au32[3] = pSrc->au32[2]; 1403 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[0] = pSrc->au32[4]; 1404 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[1] = pSrc->au32[4]; 1405 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[2] = pSrc->au32[6]; 1406 pXState->u.YmmHi.aYmmHi[iYRegDst].au32[3] = pSrc->au32[6]; 1407 } 1408 1409 1384 1410 IEM_DECL_IMPL_DEF(void, iemAImpl_movshdup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc)) 1385 1411 { -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r66937 r66950 228 228 'Wq': ( 'IDX_UseModRM', 'rm', '%Wq', 'Wq', ), 229 229 'WqZxReg_WO': ( 'IDX_UseModRM', 'rm', '%Wq', 'Wq', ), 230 'Wx': ( 'IDX_UseModRM', 'rm', '%Wx', 'Wx', ), 230 231 231 232 # ModR/M.rm - register only. … … 271 272 'VqHi_WO': ( 'IDX_UseModRM', 'reg', '%Vdq', 'VdqHi', ), 272 273 'VqZx_WO': ( 'IDX_UseModRM', 'reg', '%Vq', 'VqZx', ), 274 'Vx_WO': ( 'IDX_UseModRM', 'reg', '%Vx', 'Vx', ), 273 275 274 276 # VEX.vvvv … … 1145 1147 'o64': 'size_o64', 1146 1148 }, 1149 # VEX.L value. 1150 'vex.l': { 1151 '0': 'vexl_0', 1152 '1': 'vexl_1', 1153 }, 1147 1154 # Execution ring. 1148 1155 'ring': { -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap1.cpp.h
r66937 r66950 740 740 741 741 742 //FNIEMOP_DEF(iemOp_vmovlpd_Vq_Hq_Mq)743 //{744 // uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);745 // if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))746 // {747 // IEMOP_MNEMONIC2(RM_MEM, VMOVLPD, vmovlpd, Vq, Mq, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE);748 //749 // IEM_MC_BEGIN(0, 2);750 // IEM_MC_LOCAL(uint64_t, uSrc);751 // IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);752 //753 // IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);754 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();755 // IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();756 // IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();757 //758 // IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc);759 // IEM_MC_STORE_XREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, uSrc);760 //761 // IEM_MC_ADVANCE_RIP();762 // IEM_MC_END();763 // return VINF_SUCCESS;764 // }765 //766 // /**767 // * @ opdone768 // * @ opmnemonic ud660f12m3769 // * @ opcode 0x12770 // * @ opcodesub 11 mr/reg771 // * @ oppfx 0x66772 // * @ opunused immediate773 // * @ opcpuid sse774 // * @ optest ->775 // */776 // return IEMOP_RAISE_INVALID_OPCODE();777 //}778 779 780 742 /** 781 * @ opcode 0x12 782 * @ oppfx 0xf3 783 * @ opcpuid sse3 784 * @ opgroup og_sse3_pcksclr_datamove 785 * @ opxcpttype 4 786 * @ optest op1=-1 op2=0xdddddddd00000002eeeeeeee00000001 -> 787 * op1=0x00000002000000020000000100000001 743 * @opcode 0x12 744 * @oppfx 0xf3 745 * @opcpuid avx 746 * @opgroup og_avx_pcksclr_datamove 747 * @opxcpttype 4 748 * @optest vex.l==0 / op1=-1 op2=0xdddddddd00000002eeeeeeee00000001 749 * -> op1=0x00000002000000020000000100000001 750 * @optest vex.l==1 / 751 * op2=0xbbbbbbbb00000004cccccccc00000003dddddddd00000002eeeeeeee00000001 752 * -> op1=0x0000000400000004000000030000000300000002000000020000000100000001 753 * @oponly 788 754 */ 789 FNIEMOP_STUB(iemOp_vmovsldup_Vx_Wx); 790 //FNIEMOP_DEF(iemOp_vmovsldup_Vx_Wx) 791 //{ 792 // IEMOP_MNEMONIC2(RM, VMOVSLDUP, vmovsldup, Vdq, Wdq, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); 793 // uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 794 // if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 795 // { 796 // /* 797 // * Register, register. 798 // */ 799 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 800 // IEM_MC_BEGIN(2, 0); 801 // IEM_MC_ARG(PRTUINT128U, puDst, 0); 802 // IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 803 // 804 // IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT(); 805 // IEM_MC_PREPARE_SSE_USAGE(); 806 // 807 // IEM_MC_REF_XREG_U128_CONST(puSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 808 // IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 809 // IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 810 // 811 // IEM_MC_ADVANCE_RIP(); 812 // IEM_MC_END(); 813 // } 814 // else 815 // { 816 // /* 817 // * Register, memory. 818 // */ 819 // IEM_MC_BEGIN(2, 2); 820 // IEM_MC_LOCAL(RTUINT128U, uSrc); 821 // IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 822 // IEM_MC_ARG(PRTUINT128U, puDst, 0); 823 // IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 824 // 825 // IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 826 // IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 827 // IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT(); 828 // IEM_MC_PREPARE_SSE_USAGE(); 829 // 830 // IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 831 // IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 832 // IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 833 // 834 // IEM_MC_ADVANCE_RIP(); 835 // IEM_MC_END(); 836 // } 837 // return VINF_SUCCESS; 838 //} 755 FNIEMOP_DEF(iemOp_vmovsldup_Vx_Wx) 756 { 757 IEMOP_MNEMONIC2(VEX_RM, VMOVSLDUP, vmovsldup, Vx_WO, Wx, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZE); 758 Assert(pVCpu->iem.s.uVexLength <= 1); 759 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 760 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 761 { 762 /* 763 * Register, register. 764 */ 765 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 766 if (pVCpu->iem.s.uVexLength == 0) 767 { 768 IEM_MC_BEGIN(2, 0); 769 IEM_MC_ARG(PRTUINT128U, puDst, 0); 770 IEM_MC_ARG(PCRTUINT128U, puSrc, 1); 771 772 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 773 IEM_MC_PREPARE_AVX_USAGE(); 774 775 IEM_MC_REF_XREG_U128_CONST(puSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 776 IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 777 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 778 IEM_MC_CLEAR_YREG_128_UP(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 779 780 IEM_MC_ADVANCE_RIP(); 781 IEM_MC_END(); 782 } 783 else 784 { 785 IEM_MC_BEGIN(3, 0); 786 IEM_MC_IMPLICIT_AVX_AIMPL_ARGS(); 787 IEM_MC_ARG_CONST(uint8_t, iYRegDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, 1); 788 IEM_MC_ARG_CONST(uint8_t, iYRegSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB, 2); 789 790 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 791 IEM_MC_PREPARE_AVX_USAGE(); 792 IEM_MC_CALL_AVX_AIMPL_2(iemAImpl_vmovsldup_256_rr, iYRegDst, iYRegSrc); 793 794 IEM_MC_ADVANCE_RIP(); 795 IEM_MC_END(); 796 } 797 } 798 else 799 { 800 /* 801 * Register, memory. 802 */ 803 if (pVCpu->iem.s.uVexLength == 0) 804 { 805 IEM_MC_BEGIN(2, 2); 806 IEM_MC_LOCAL(RTUINT128U, uSrc); 807 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 808 IEM_MC_ARG(PRTUINT128U, puDst, 0); 809 IEM_MC_ARG_LOCAL_REF(PCRTUINT128U, puSrc, uSrc, 1); 810 811 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 812 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 813 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 814 IEM_MC_PREPARE_AVX_USAGE(); 815 816 IEM_MC_FETCH_MEM_U128(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 817 IEM_MC_REF_XREG_U128(puDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 818 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_movsldup, puDst, puSrc); 819 IEM_MC_CLEAR_YREG_128_UP(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 820 821 IEM_MC_ADVANCE_RIP(); 822 IEM_MC_END(); 823 } 824 else 825 { 826 IEM_MC_BEGIN(3, 2); 827 IEM_MC_LOCAL(RTUINT256U, uSrc); 828 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 829 IEM_MC_IMPLICIT_AVX_AIMPL_ARGS(); 830 IEM_MC_ARG_CONST(uint8_t, iYRegDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg, 1); 831 IEM_MC_ARG_LOCAL_REF(PCRTUINT256U, puSrc, uSrc, 2); 832 833 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 834 IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV(); 835 IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT(); 836 IEM_MC_PREPARE_AVX_USAGE(); 837 838 IEM_MC_FETCH_MEM_U256(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 839 IEM_MC_CALL_AVX_AIMPL_2(iemAImpl_vmovsldup_256_rm, iYRegDst, puSrc); 840 841 IEM_MC_ADVANCE_RIP(); 842 IEM_MC_END(); 843 } 844 } 845 return VINF_SUCCESS; 846 } 839 847 840 848
Note:
See TracChangeset
for help on using the changeset viewer.