- Timestamp:
- Feb 13, 2017 8:13:06 AM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r65650 r65748 2820 2820 2821 2821 /** 2822 * Common worker for SSE2 and MMX instructions on the forms: 2823 * pxxxx xmm1, xmm2/mem128 2822 * Common worker for MMX instructions on the form: 2824 2823 * pxxxx mm1, mm2/mem64 2825 2824 * 2826 2825 * The 2nd operand is the second half of a register, which in the memory case 2827 * means a 64-bit memory access for MMX, and for MMXa 128-bit aligned access2826 * means a 64-bit memory access for MMX, and for SSE a 128-bit aligned access 2828 2827 * where it may read the full 128 bits or only the upper 64 bits. 2829 2828 * 2830 2829 * Exceptions type 4. 2831 2830 */ 2832 FNIEMOP_DEF_1(iemOpCommonMmx Sse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl)2831 FNIEMOP_DEF_1(iemOpCommonMmx_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl) 2833 2832 { 2834 2833 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2835 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2836 { 2837 case IEM_OP_PRF_SIZE_OP: /* SSE */ 2838 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2839 { 2840 /* 2841 * Register, register. 2842 */ 2843 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2844 IEM_MC_BEGIN(2, 0); 2845 IEM_MC_ARG(uint128_t *, pDst, 0); 2846 IEM_MC_ARG(uint128_t const *, pSrc, 1); 2847 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2848 IEM_MC_PREPARE_SSE_USAGE(); 2849 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2850 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2851 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2852 IEM_MC_ADVANCE_RIP(); 2853 IEM_MC_END(); 2854 } 2855 else 2856 { 2857 /* 2858 * Register, memory. 2859 */ 2860 IEM_MC_BEGIN(2, 2); 2861 IEM_MC_ARG(uint128_t *, pDst, 0); 2862 IEM_MC_LOCAL(uint128_t, uSrc); 2863 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); 2864 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2865 2866 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2867 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2868 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2869 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */ 2870 2871 IEM_MC_PREPARE_SSE_USAGE(); 2872 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2873 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2874 2875 IEM_MC_ADVANCE_RIP(); 2876 IEM_MC_END(); 2877 } 2878 return VINF_SUCCESS; 2879 2880 case 0: /* MMX */ 2881 if (!pImpl->pfnU64) 2882 return IEMOP_RAISE_INVALID_OPCODE(); 2883 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2884 { 2885 /* 2886 * Register, register. 2887 */ 2888 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 2889 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 2890 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2891 IEM_MC_BEGIN(2, 0); 2892 IEM_MC_ARG(uint64_t *, pDst, 0); 2893 IEM_MC_ARG(uint64_t const *, pSrc, 1); 2894 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2895 IEM_MC_PREPARE_FPU_USAGE(); 2896 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2897 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 2898 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2899 IEM_MC_ADVANCE_RIP(); 2900 IEM_MC_END(); 2901 } 2902 else 2903 { 2904 /* 2905 * Register, memory. 2906 */ 2907 IEM_MC_BEGIN(2, 2); 2908 IEM_MC_ARG(uint64_t *, pDst, 0); 2909 IEM_MC_LOCAL(uint64_t, uSrc); 2910 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 2911 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2912 2913 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2914 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2915 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2916 IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2917 2918 IEM_MC_PREPARE_FPU_USAGE(); 2919 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2920 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2921 2922 IEM_MC_ADVANCE_RIP(); 2923 IEM_MC_END(); 2924 } 2925 return VINF_SUCCESS; 2926 2927 default: 2928 return IEMOP_RAISE_INVALID_OPCODE(); 2929 } 2930 } 2931 2932 2933 /** Opcode 0x0f 0x68. */ 2934 FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq) 2935 { 2936 IEMOP_MNEMONIC(punpckhbw, "punpckhbw"); 2937 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw); 2938 } 2939 2940 2941 /** Opcode 0x0f 0x69. */ 2942 FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq) 2943 { 2944 IEMOP_MNEMONIC(punpckhwd, "punpckhwd"); 2945 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd); 2946 } 2947 2948 2949 /** Opcode 0x0f 0x6a. */ 2950 FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq) 2951 { 2952 IEMOP_MNEMONIC(punpckhdq, "punpckhdq"); 2953 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq); 2954 } 2834 AssertReturn(pImpl->pfnU64, IEMOP_RAISE_INVALID_OPCODE()); 2835 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2836 { 2837 /* 2838 * Register, register. 2839 */ 2840 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 2841 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 2842 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2843 IEM_MC_BEGIN(2, 0); 2844 IEM_MC_ARG(uint64_t *, pDst, 0); 2845 IEM_MC_ARG(uint64_t const *, pSrc, 1); 2846 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2847 IEM_MC_PREPARE_FPU_USAGE(); 2848 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2849 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 2850 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2851 IEM_MC_ADVANCE_RIP(); 2852 IEM_MC_END(); 2853 } 2854 else 2855 { 2856 /* 2857 * Register, memory. 2858 */ 2859 IEM_MC_BEGIN(2, 2); 2860 IEM_MC_ARG(uint64_t *, pDst, 0); 2861 IEM_MC_LOCAL(uint64_t, uSrc); 2862 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 2863 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2864 2865 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2866 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2867 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2868 IEM_MC_FETCH_MEM_U64(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2869 2870 IEM_MC_PREPARE_FPU_USAGE(); 2871 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2872 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2873 2874 IEM_MC_ADVANCE_RIP(); 2875 IEM_MC_END(); 2876 } 2877 return VINF_SUCCESS; 2878 } 2879 2880 2881 /** 2882 * Common worker for SSE2 instructions on the form: 2883 * pxxxx xmm1, xmm2/mem128 2884 * 2885 * The 2nd operand is the second half of a register, which in the memory case 2886 * means a 64-bit memory access for MMX, and for SSE a 128-bit aligned access 2887 * where it may read the full 128 bits or only the upper 64 bits. 2888 * 2889 * Exceptions type 4. 2890 */ 2891 FNIEMOP_DEF_1(iemOpCommonSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl) 2892 { 2893 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2894 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2895 { 2896 /* 2897 * Register, register. 2898 */ 2899 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2900 IEM_MC_BEGIN(2, 0); 2901 IEM_MC_ARG(uint128_t *, pDst, 0); 2902 IEM_MC_ARG(uint128_t const *, pSrc, 1); 2903 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2904 IEM_MC_PREPARE_SSE_USAGE(); 2905 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2906 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2907 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2908 IEM_MC_ADVANCE_RIP(); 2909 IEM_MC_END(); 2910 } 2911 else 2912 { 2913 /* 2914 * Register, memory. 2915 */ 2916 IEM_MC_BEGIN(2, 2); 2917 IEM_MC_ARG(uint128_t *, pDst, 0); 2918 IEM_MC_LOCAL(uint128_t, uSrc); 2919 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); 2920 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2921 2922 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2923 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2924 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2925 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */ 2926 2927 IEM_MC_PREPARE_SSE_USAGE(); 2928 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2929 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2930 2931 IEM_MC_ADVANCE_RIP(); 2932 IEM_MC_END(); 2933 } 2934 return VINF_SUCCESS; 2935 } 2936 2937 2938 /** Opcode 0x0f 0x68 - punpckhbw Pq, Qd */ 2939 FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qd) 2940 { 2941 IEMOP_MNEMONIC(punpckhbw, "punpckhbw Pq, Qd"); 2942 return FNIEMOP_CALL_1(iemOpCommonMmx_HighHigh_To_Full, &g_iemAImpl_punpckhbw); 2943 } 2944 2945 /** Opcode 0x66 0x0f 0x68 - vpunpckhbw Vx, Hx, Wx */ 2946 FNIEMOP_DEF(iemOp_vpunpckhbw_Vx_Hx_Wx) 2947 { 2948 IEMOP_MNEMONIC(vpunpckhbw, "vpunpckhbw Vx, Hx, Wx"); 2949 return FNIEMOP_CALL_1(iemOpCommonSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw); 2950 } 2951 /* Opcode 0xf3 0x0f 0x68 - invalid */ 2952 2953 2954 /** Opcode 0x0f 0x69 - punpckhwd Pq, Qd */ 2955 FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd) 2956 { 2957 IEMOP_MNEMONIC(punpckhwd, "punpckhwd Pq, Qd"); 2958 return FNIEMOP_CALL_1(iemOpCommonMmx_HighHigh_To_Full, &g_iemAImpl_punpckhwd); 2959 } 2960 2961 /** Opcode 0x66 0x0f 0x69 - vpunpckhwd Vx, Hx, Wx */ 2962 FNIEMOP_DEF(iemOp_vpunpckhwd_Vx_Hx_Wx) 2963 { 2964 IEMOP_MNEMONIC(vpunpckhwd, "vpunpckhwd Vx, Hx, Wx"); 2965 return FNIEMOP_CALL_1(iemOpCommonSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd); 2966 2967 } 2968 /* Opcode 0xf3 0x0f 0x69 - invalid */ 2969 2970 2971 /** Opcode 0x0f 0x6a - punpckhdq Pq, Qd */ 2972 FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd) 2973 { 2974 IEMOP_MNEMONIC(punpckhdq, "punpckhdq Pq, Qd"); 2975 return FNIEMOP_CALL_1(iemOpCommonMmx_HighHigh_To_Full, &g_iemAImpl_punpckhdq); 2976 } 2977 2978 /** Opcode 0x66 0x0f 0x6a - vpunpckhdq Vx, Hx, W */ 2979 FNIEMOP_DEF(iemOp_vpunpckhdq_Vx_Hx_W) 2980 { 2981 IEMOP_MNEMONIC(vpunpckhdq, "vpunpckhdq Vx, Hx, W"); 2982 return FNIEMOP_CALL_1(iemOpCommonSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq); 2983 } 2984 /* Opcode 0xf3 0x0f 0x6a - invalid */ 2985 2955 2986 2956 2987 /** Opcode 0x0f 0x6b. */ … … 2971 3002 2972 3003 2973 /** Opcode 0x0f 0x6d. */ 2974 FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq) 3004 /* Opcode 0x0f 0x6d - invalid */ 3005 3006 /** Opcode 0x66 0x0f 0x6d - vpunpckhqdq Vx, Hx, W */ 3007 FNIEMOP_DEF(iemOp_vpunpckhqdq_Vx_Hx_W) 2975 3008 { 2976 3009 IEMOP_MNEMONIC(punpckhqdq, "punpckhqdq"); 2977 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq); 2978 } 3010 return FNIEMOP_CALL_1(iemOpCommonSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq); 3011 } 3012 3013 /* Opcode 0xf3 0x0f 0x6d - invalid */ 3014 2979 3015 2980 3016 … … 8081 8117 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq, iemOp_vpcmpgtd_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8082 8118 /* 0x67 */ iemOp_packuswb_Pq_Qq, iemOp_vpackuswb_Vx_Hx_W, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8083 /* 0x68 */ IEMOP_X4(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq),8084 /* 0x69 */ IEMOP_X4(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq),8085 /* 0x6a */ IEMOP_X4(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq),8119 /* 0x68 */ iemOp_punpckhbw_Pq_Qd, iemOp_vpunpckhbw_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8120 /* 0x69 */ iemOp_punpckhwd_Pq_Qd, iemOp_vpunpckhwd_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8121 /* 0x6a */ iemOp_punpckhdq_Pq_Qd, iemOp_vpunpckhdq_Vx_Hx_W, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8086 8122 /* 0x6b */ IEMOP_X4(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq), 8087 8123 /* 0x6c */ iemOp_InvalidNeedRM, iemOp_vpunpcklqdq_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8088 /* 0x6d */ IEMOP_X4(iemOp_punpckhqdq_Vdq_Wdq),8124 /* 0x6d */ iemOp_InvalidNeedRM, iemOp_vpunpckhqdq_Vx_Hx_W, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8089 8125 /* 0x6e */ IEMOP_X4(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey), 8090 8126 /* 0x6f */ IEMOP_X4(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq),
Note:
See TracChangeset
for help on using the changeset viewer.