Changeset 47385 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jul 25, 2013 11:06:30 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r47383 r47385 649 649 iemAImpl_shrd_u64, 650 650 }; 651 652 653 /** Function table for the PUNPCKLBW instruction */ 654 static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 }; 655 /** Function table for the PUNPCKLBD instruction */ 656 static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 }; 657 /** Function table for the PUNPCKLDQ instruction */ 658 static const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 }; 659 /** Function table for the PUNPCKLQDQ instruction */ 660 static const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 }; 661 662 /** Function table for the PUNPCKHBW instruction */ 663 static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 }; 664 /** Function table for the PUNPCKHBD instruction */ 665 static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 }; 666 /** Function table for the PUNPCKHDQ instruction */ 667 static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 }; 668 /** Function table for the PUNPCKHQDQ instruction */ 669 static const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 }; 670 671 /** Function table for the PXOR instruction */ 672 static const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 }; 651 673 652 674 … … 3905 3927 //# error "Implement me" 3906 3928 #endif 3929 } 3930 3931 3932 /** 3933 * Hook for preparing to use the host FPU for SSE 3934 * 3935 * This is necessary in ring-0 and raw-mode context. 3936 * 3937 * @param pIemCpu The IEM per CPU data. 3938 */ 3939 DECLINLINE(void) iemFpuPrepareUsageSse(PIEMCPU pIemCpu) 3940 { 3941 iemFpuPrepareUsage(pIemCpu); 3907 3942 } 3908 3943 … … 5702 5737 5703 5738 /** 5739 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE). 5740 * 5741 * @returns Strict VBox status code. 5742 * @param pIemCpu The IEM per CPU data. 5743 * @param pu64Dst Where to return the qword. 5744 * @param iSegReg The index of the segment register to use for 5745 * this access. The base and limits are checked. 5746 * @param GCPtrMem The address of the guest memory. 5747 */ 5748 static VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 5749 { 5750 /* The lazy approach for now... */ 5751 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 5752 if (RT_UNLIKELY(GCPtrMem & 15)) 5753 return iemRaiseGeneralProtectionFault0(pIemCpu); 5754 5755 uint64_t const *pu64Src; 5756 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 5757 if (rc == VINF_SUCCESS) 5758 { 5759 *pu64Dst = *pu64Src; 5760 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu64Src, IEM_ACCESS_DATA_R); 5761 } 5762 return rc; 5763 } 5764 5765 5766 /** 5704 5767 * Fetches a data tword. 5705 5768 * … … 5723 5786 return rc; 5724 5787 } 5788 5789 5790 /** 5791 * Fetches a data dqword (double qword), generally SSE related. 5792 * 5793 * @returns Strict VBox status code. 5794 * @param pIemCpu The IEM per CPU data. 5795 * @param pu128Dst Where to return the qword. 5796 * @param iSegReg The index of the segment register to use for 5797 * this access. The base and limits are checked. 5798 * @param GCPtrMem The address of the guest memory. 5799 */ 5800 static VBOXSTRICTRC iemMemFetchDataU128(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 5801 { 5802 /* The lazy approach for now... */ 5803 uint128_t const *pu128Src; 5804 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 5805 if (rc == VINF_SUCCESS) 5806 { 5807 *pu128Dst = *pu128Src; 5808 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 5809 } 5810 return rc; 5811 } 5812 5813 5814 /** 5815 * Fetches a data dqword (double qword) at an aligned address, generally SSE 5816 * related. 5817 * 5818 * Raises GP(0) if not aligned. 5819 * 5820 * @returns Strict VBox status code. 5821 * @param pIemCpu The IEM per CPU data. 5822 * @param pu128Dst Where to return the qword. 5823 * @param iSegReg The index of the segment register to use for 5824 * this access. The base and limits are checked. 5825 * @param GCPtrMem The address of the guest memory. 5826 */ 5827 static VBOXSTRICTRC iemMemFetchDataU128Aligned(PIEMCPU pIemCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) 5828 { 5829 /* The lazy approach for now... */ 5830 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */ 5831 if (RT_UNLIKELY(GCPtrMem & 15)) 5832 return iemRaiseGeneralProtectionFault0(pIemCpu); 5833 5834 uint128_t const *pu128Src; 5835 VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R); 5836 if (rc == VINF_SUCCESS) 5837 { 5838 *pu128Dst = *pu128Src; 5839 rc = iemMemCommitAndUnmap(pIemCpu, (void *)pu128Src, IEM_ACCESS_DATA_R); 5840 } 5841 return rc; 5842 } 5843 5844 5725 5845 5726 5846 … … 6857 6977 #define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \ 6858 6978 do { pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0) 6979 #define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \ 6980 (a_pu64Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx) 6981 #define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \ 6982 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx) 6983 #define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \ 6984 (a_pu32Dst) = ((uint32_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aRegs[(a_iMReg)].mmx) 6859 6985 6860 6986 #define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \ … … 6866 6992 pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[1] = 0; \ 6867 6993 } while (0) 6868 6994 #define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \ 6995 (a_pu128Dst) = (&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm) 6996 #define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \ 6997 (a_pu128Dst) = ((uint128_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].xmm) 6998 #define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \ 6999 (a_pu64Dst) = ((uint64_t const *)&pIemCpu->CTX_SUFF(pCtx)->fpu.aXMM[(a_iXReg)].au64[0]) 6869 7000 6870 7001 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \ … … 6896 7027 #define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ 6897 7028 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp))) 7029 #define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 7030 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 6898 7031 6899 7032 #define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \ … … 6903 7036 #define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \ 6904 7037 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pIemCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))) 7038 7039 #define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \ 7040 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 7041 #define IEM_MC_FETCH_MEM_U128_ALIGN(a_u128Dst, a_iSeg, a_GCPtrMem) \ 7042 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128Aligned(pIemCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))) 7043 6905 7044 6906 7045 … … 7314 7453 #define IEM_MC_USED_FPU() \ 7315 7454 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM) 7455 7456 /** 7457 * Calls a MMX assembly implementation taking two visible arguments. 7458 * 7459 * @param a_pfnAImpl Pointer to the assembly MMX routine. 7460 * @param a0 The first extra argument. 7461 * @param a1 The second extra argument. 7462 */ 7463 #define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \ 7464 do { \ 7465 iemFpuPrepareUsage(pIemCpu); \ 7466 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \ 7467 } while (0) 7468 7469 7470 /** 7471 * Calls a SSE assembly implementation taking two visible arguments. 7472 * 7473 * @param a_pfnAImpl Pointer to the assembly MMX routine. 7474 * @param a0 The first extra argument. 7475 * @param a1 The second extra argument. 7476 */ 7477 #define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \ 7478 do { \ 7479 iemFpuPrepareUsageSse(pIemCpu); \ 7480 a_pfnAImpl(&pIemCpu->CTX_SUFF(pCtx)->fpu, (a0), (a1)); \ 7481 } while (0) 7482 7316 7483 7317 7484 /** @note Not for IOPL or IF testing. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r47319 r47385 2756 2756 IEMIMPL_FPU_R80_R80 fsincos 2757 2757 2758 2759 2760 2761 ;---------------------- SSE and MMX Operations ---------------------- 2762 2763 ;; @todo what do we need to do for MMX? 2764 %macro IEMIMPL_MMX_PROLOGUE 0 2765 %endmacro 2766 %macro IEMIMPL_MMX_EPILOGUE 0 2767 %endmacro 2768 2769 ;; @todo what do we need to do for SSE? 2770 %macro IEMIMPL_SSE_PROLOGUE 0 2771 %endmacro 2772 %macro IEMIMPL_SSE_EPILOGUE 0 2773 %endmacro 2774 2775 2776 ;; 2777 ; Media instruction working on two full sized registers. 2778 ; 2779 ; @param 1 The instruction 2780 ; 2781 ; @param A0 FPU context (fxsave). 2782 ; @param A1 Pointer to the first media register size operand (input/output). 2783 ; @param A2 Pointer to the second media register size operand (input). 2784 ; 2785 %macro IEMIMPL_MEDIA_F2 1 2786 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 2787 PROLOGUE_3_ARGS 2788 IEMIMPL_MMX_PROLOGUE 2789 2790 movq mm0, [A1] 2791 movq mm1, [A2] 2792 %1 mm0, mm1 2793 movq [A1], mm0 2794 2795 IEMIMPL_MMX_EPILOGUE 2796 EPILOGUE_3_ARGS 2797 ENDPROC iemAImpl_ %+ %1 %+ _u64 2798 2799 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u128, 12 2800 PROLOGUE_3_ARGS 2801 IEMIMPL_SSE_PROLOGUE 2802 2803 movdqu xmm0, [A1] 2804 movdqu xmm1, [A2] 2805 %1 xmm0, xmm1 2806 movdqu [A1], xmm0 2807 2808 IEMIMPL_SSE_EPILOGUE 2809 EPILOGUE_3_ARGS 2810 ENDPROC iemAImpl_ %+ %1 %+ _u128 2811 %endmacro 2812 2813 IEMIMPL_MEDIA_F2 pxor 2814 2815 2816 ;; 2817 ; Media instruction working on one full sized and one half sized register (lower half). 2818 ; 2819 ; @param 1 The instruction 2820 ; @param 2 1 if MMX is included, 0 if not. 2821 ; 2822 ; @param A0 FPU context (fxsave). 2823 ; @param A1 Pointer to the first full sized media register operand (input/output). 2824 ; @param A2 Pointer to the second half sized media register operand (input). 2825 ; 2826 %macro IEMIMPL_MEDIA_F1L1 2 2827 %if %2 != 0 2828 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 2829 PROLOGUE_3_ARGS 2830 IEMIMPL_MMX_PROLOGUE 2831 2832 movq mm0, [A1] 2833 movd mm1, [A2] 2834 %1 mm0, mm1 2835 movq [A1], mm0 2836 2837 IEMIMPL_MMX_EPILOGUE 2838 EPILOGUE_3_ARGS 2839 ENDPROC iemAImpl_ %+ %1 %+ _u64 2840 %endif 2841 2842 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u128, 12 2843 PROLOGUE_3_ARGS 2844 IEMIMPL_SSE_PROLOGUE 2845 2846 movdqu xmm0, [A1] 2847 movq xmm1, [A2] 2848 %1 xmm0, xmm1 2849 movdqu [A1], xmm0 2850 2851 IEMIMPL_SSE_EPILOGUE 2852 EPILOGUE_3_ARGS 2853 ENDPROC iemAImpl_ %+ %1 %+ _u128 2854 %endmacro 2855 2856 IEMIMPL_MEDIA_F1L1 punpcklbw, 1 2857 IEMIMPL_MEDIA_F1L1 punpcklwd, 1 2858 IEMIMPL_MEDIA_F1L1 punpckldq, 1 2859 IEMIMPL_MEDIA_F1L1 punpcklqdq, 0 2860 2861 2862 ;; 2863 ; Media instruction working on one full sized and one half sized register (high half). 2864 ; 2865 ; @param 1 The instruction 2866 ; @param 2 1 if MMX is included, 0 if not. 2867 ; 2868 ; @param A0 FPU context (fxsave). 2869 ; @param A1 Pointer to the first full sized media register operand (input/output). 2870 ; @param A2 Pointer to the second full sized media register operand, where we 2871 ; will only use the upper half (input). 2872 ; 2873 %macro IEMIMPL_MEDIA_F1H1 2 2874 %if %2 != 0 2875 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 2876 PROLOGUE_3_ARGS 2877 IEMIMPL_MMX_PROLOGUE 2878 2879 movq mm0, [A1] 2880 movq mm1, [A2] 2881 %1 mm0, mm1 2882 movq [A1], mm0 2883 2884 IEMIMPL_MMX_EPILOGUE 2885 EPILOGUE_3_ARGS 2886 ENDPROC iemAImpl_ %+ %1 %+ _u64 2887 %endif 2888 2889 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u128, 12 2890 PROLOGUE_3_ARGS 2891 IEMIMPL_SSE_PROLOGUE 2892 2893 movdqu xmm0, [A1] 2894 movdqu xmm1, [A2] 2895 %1 xmm0, xmm1 2896 movdqu [A1], xmm0 2897 2898 IEMIMPL_SSE_EPILOGUE 2899 EPILOGUE_3_ARGS 2900 ENDPROC iemAImpl_ %+ %1 %+ _u128 2901 %endmacro 2902 2903 IEMIMPL_MEDIA_F1L1 punpckhbw, 1 2904 IEMIMPL_MEDIA_F1L1 punpckhwd, 1 2905 IEMIMPL_MEDIA_F1L1 punpckhdq, 1 2906 IEMIMPL_MEDIA_F1L1 punpckhqdq, 0 2907 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r47382 r47385 1821 1821 /** Opcode 0x0f 0x5f. */ 1822 1822 FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd); 1823 1824 1825 /** 1826 * Common worker for SSE2 and MMX instructions on the forms: 1827 * pxxxx xmm1, xmm2/mem128 1828 * pxxxx mm1, mm2/mem32 1829 * 1830 * The 2nd operand is the first half of a register, which in the memory case 1831 * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit 1832 * memory accessed for MMX. 1833 * 1834 * Exceptions type 4. 1835 */ 1836 FNIEMOP_DEF_1(iemOpCommonMmxSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl) 1837 { 1838 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1839 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 1840 { 1841 case IEM_OP_PRF_SIZE_OP: /* SSE */ 1842 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1843 { 1844 /* 1845 * Register, register. 1846 */ 1847 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1848 IEM_MC_BEGIN(2, 0); 1849 IEM_MC_ARG(uint128_t *, pDst, 0); 1850 IEM_MC_ARG(uint64_t const *, pSrc, 1); 1851 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 1852 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 1853 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 1854 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 1855 IEM_MC_ADVANCE_RIP(); 1856 IEM_MC_END(); 1857 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 1858 } 1859 else 1860 { 1861 /* 1862 * Register, memory. 1863 */ 1864 IEM_MC_BEGIN(2, 2); 1865 IEM_MC_ARG(uint128_t *, pDst, 0); 1866 IEM_MC_LOCAL(uint64_t, uSrc); 1867 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 1868 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 1869 1870 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1871 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1872 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 1873 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); 1874 1875 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 1876 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 1877 1878 IEM_MC_ADVANCE_RIP(); 1879 IEM_MC_END(); 1880 } 1881 return VINF_SUCCESS; 1882 1883 case 0: /* MMX */ 1884 if (!pImpl->pfnU64) 1885 return IEMOP_RAISE_INVALID_OPCODE(); 1886 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1887 { 1888 /* 1889 * Register, register. 1890 */ 1891 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 1892 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 1893 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1894 IEM_MC_BEGIN(2, 0); 1895 IEM_MC_ARG(uint64_t *, pDst, 0); 1896 IEM_MC_ARG(uint32_t const *, pSrc, 1); 1897 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 1898 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 1899 IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 1900 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 1901 IEM_MC_ADVANCE_RIP(); 1902 IEM_MC_END(); 1903 } 1904 else 1905 { 1906 /* 1907 * Register, memory. 1908 */ 1909 IEM_MC_BEGIN(2, 2); 1910 IEM_MC_ARG(uint64_t *, pDst, 0); 1911 IEM_MC_LOCAL(uint32_t, uSrc); 1912 IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1); 1913 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 1914 1915 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1916 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1917 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 1918 IEM_MC_FETCH_MEM_U32(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); 1919 1920 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 1921 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 1922 1923 IEM_MC_ADVANCE_RIP(); 1924 IEM_MC_END(); 1925 } 1926 return VINF_SUCCESS; 1927 1928 default: 1929 return IEMOP_RAISE_INVALID_OPCODE(); 1930 } 1931 } 1932 1933 1823 1934 /** Opcode 0x0f 0x60. */ 1824 FNIEMOP_STUB(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq); // NEXT 1935 FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq) 1936 { 1937 IEMOP_MNEMONIC("punpcklbw"); 1938 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw); 1939 } 1940 1941 1825 1942 /** Opcode 0x0f 0x61. */ 1826 FNIEMOP_STUB(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq); 1943 FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq) 1944 { 1945 IEMOP_MNEMONIC("punpcklwd"); 1946 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd); 1947 } 1948 1949 1827 1950 /** Opcode 0x0f 0x62. */ 1828 FNIEMOP_STUB(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq); 1951 FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq) 1952 { 1953 IEMOP_MNEMONIC("punpckldq"); 1954 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq); 1955 } 1956 1957 1829 1958 /** Opcode 0x0f 0x63. */ 1830 1959 FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq); … … 1837 1966 /** Opcode 0x0f 0x67. */ 1838 1967 FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq); 1968 1969 1970 /** 1971 * Common worker for SSE2 and MMX instructions on the forms: 1972 * pxxxx xmm1, xmm2/mem128 1973 * pxxxx mm1, mm2/mem64 1974 * 1975 * The 2nd operand is the second half of a register, which in the memory case 1976 * means a 64-bit memory access for MMX, and for MMX a 128-bit aligned access 1977 * where it may read the full 128 bits or only the upper 64 bits. 1978 * 1979 * Exceptions type 4. 1980 */ 1981 FNIEMOP_DEF_1(iemOpCommonMmxSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl) 1982 { 1983 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1984 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 1985 { 1986 case IEM_OP_PRF_SIZE_OP: /* SSE */ 1987 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1988 { 1989 /* 1990 * Register, register. 1991 */ 1992 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 1993 IEM_MC_BEGIN(2, 0); 1994 IEM_MC_ARG(uint128_t *, pDst, 0); 1995 IEM_MC_ARG(uint128_t const *, pSrc, 1); 1996 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 1997 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 1998 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 1999 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2000 IEM_MC_ADVANCE_RIP(); 2001 IEM_MC_END(); 2002 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2003 } 2004 else 2005 { 2006 /* 2007 * Register, memory. 2008 */ 2009 IEM_MC_BEGIN(2, 2); 2010 IEM_MC_ARG(uint128_t *, pDst, 0); 2011 IEM_MC_LOCAL(uint128_t, uSrc); 2012 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); 2013 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2014 2015 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2016 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2017 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2018 IEM_MC_FETCH_MEM_U128_ALIGN(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */ 2019 2020 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2021 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2022 2023 IEM_MC_ADVANCE_RIP(); 2024 IEM_MC_END(); 2025 } 2026 return VINF_SUCCESS; 2027 2028 case 0: /* MMX */ 2029 if (!pImpl->pfnU64) 2030 return IEMOP_RAISE_INVALID_OPCODE(); 2031 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2032 { 2033 /* 2034 * Register, register. 2035 */ 2036 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 2037 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 2038 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2039 IEM_MC_BEGIN(2, 0); 2040 IEM_MC_ARG(uint64_t *, pDst, 0); 2041 IEM_MC_ARG(uint64_t const *, pSrc, 1); 2042 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2043 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2044 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 2045 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2046 IEM_MC_ADVANCE_RIP(); 2047 IEM_MC_END(); 2048 } 2049 else 2050 { 2051 /* 2052 * Register, memory. 2053 */ 2054 IEM_MC_BEGIN(2, 2); 2055 IEM_MC_ARG(uint64_t *, pDst, 0); 2056 IEM_MC_LOCAL(uint64_t, uSrc); 2057 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 2058 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2059 2060 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2061 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2062 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2063 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); 2064 2065 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2066 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2067 2068 IEM_MC_ADVANCE_RIP(); 2069 IEM_MC_END(); 2070 } 2071 return VINF_SUCCESS; 2072 2073 default: 2074 return IEMOP_RAISE_INVALID_OPCODE(); 2075 } 2076 } 2077 2078 1839 2079 /** Opcode 0x0f 0x68. */ 1840 FNIEMOP_STUB(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq); 2080 FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq) 2081 { 2082 IEMOP_MNEMONIC("punpckhbw"); 2083 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw); 2084 } 2085 2086 1841 2087 /** Opcode 0x0f 0x69. */ 1842 FNIEMOP_STUB(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq); 2088 FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq) 2089 { 2090 IEMOP_MNEMONIC("punpckhwd"); 2091 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd); 2092 } 2093 2094 1843 2095 /** Opcode 0x0f 0x6a. */ 1844 FNIEMOP_STUB(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq); 2096 FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq) 2097 { 2098 IEMOP_MNEMONIC("punpckhdq"); 2099 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq); 2100 } 2101 1845 2102 /** Opcode 0x0f 0x6b. */ 1846 2103 FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq); 2104 2105 1847 2106 /** Opcode 0x0f 0x6c. */ 1848 FNIEMOP_STUB(iemOp_punpcklqdq_Vdq_Wdq); 2107 FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq) 2108 { 2109 IEMOP_MNEMONIC("punpcklqdq"); 2110 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq); 2111 } 2112 2113 1849 2114 /** Opcode 0x0f 0x6d. */ 1850 FNIEMOP_STUB(iemOp_punpckhqdq_Vdq_Wdq); 2115 FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq) 2116 { 2117 IEMOP_MNEMONIC("punpckhqdq"); 2118 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq); 2119 } 1851 2120 1852 2121 … … 1855 2124 { 1856 2125 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2126 2127 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2128 { 2129 case 0: /* MMX */ 2130 case IEM_OP_PRF_SIZE_OP: /* SSE */ 2131 break; 2132 default: 2133 return IEMOP_RAISE_INVALID_OPCODE(); 2134 } 2135 1857 2136 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1858 2137 { … … 1885 2164 else 1886 2165 IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 1887 IEM_MC_STORE_MREG_U64(( (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);2166 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); 1888 2167 } 1889 2168 IEM_MC_ADVANCE_RIP(); … … 1924 2203 IEM_MC_LOCAL(uint64_t, u64Tmp); 1925 2204 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); 1926 IEM_MC_STORE_MREG_U64(( (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);2205 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp); 1927 2206 } 1928 2207 else … … 1930 2209 IEM_MC_LOCAL(uint32_t, u32Tmp); 1931 2210 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); 1932 IEM_MC_STORE_MREG_U32_ZX_U64(( (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);2211 IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp); 1933 2212 } 1934 2213 } … … 5523 5802 /** Opcode 0x0f 0xee. */ 5524 5803 FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq); 5804 5805 5806 /** 5807 * Common worker for SSE2 and MMX instructions on the form: 5808 * pxxxx [x]mmreg, [x]mmreg/mem[128|64] 5809 * 5810 * The 128-bit accesses must be aligned, i.e. exceptions type 4. 5811 */ 5812 FNIEMOP_DEF_1(iemOpCommonMmxSse_PqVdq_QqWdq, PCIEMOPMEDIAF2, pImpl) 5813 { 5814 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 5815 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 5816 { 5817 case IEM_OP_PRF_SIZE_OP: /* SSE */ 5818 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 5819 { 5820 /* 5821 * Register, register. 5822 */ 5823 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5824 IEM_MC_BEGIN(2, 0); 5825 IEM_MC_ARG(uint128_t *, pDst, 0); 5826 IEM_MC_ARG(uint128_t const *, pSrc, 1); 5827 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 5828 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 5829 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 5830 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 5831 IEM_MC_ADVANCE_RIP(); 5832 IEM_MC_END(); 5833 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 5834 } 5835 else 5836 { 5837 /* 5838 * Register, memory. 5839 */ 5840 IEM_MC_BEGIN(2, 2); 5841 IEM_MC_ARG(uint128_t *, pDst, 0); 5842 IEM_MC_LOCAL(uint128_t, uSrc); 5843 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1); 5844 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 5845 5846 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 5847 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5848 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 5849 IEM_MC_FETCH_MEM_U128_ALIGN(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); 5850 5851 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 5852 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 5853 5854 IEM_MC_ADVANCE_RIP(); 5855 IEM_MC_END(); 5856 } 5857 return VINF_SUCCESS; 5858 5859 case 0: /* MMX */ 5860 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 5861 { 5862 /* 5863 * Register, register. 5864 */ 5865 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 5866 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 5867 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5868 IEM_MC_BEGIN(2, 0); 5869 IEM_MC_ARG(uint64_t *, pDst, 0); 5870 IEM_MC_ARG(uint64_t const *, pSrc, 1); 5871 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 5872 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 5873 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 5874 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 5875 IEM_MC_ADVANCE_RIP(); 5876 IEM_MC_END(); 5877 } 5878 else 5879 { 5880 /* 5881 * Register, memory. 5882 */ 5883 IEM_MC_BEGIN(2, 2); 5884 IEM_MC_ARG(uint64_t *, pDst, 0); 5885 IEM_MC_LOCAL(uint64_t, uSrc); 5886 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 5887 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 5888 5889 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 5890 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5891 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 5892 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); 5893 5894 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 5895 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 5896 5897 IEM_MC_ADVANCE_RIP(); 5898 IEM_MC_END(); 5899 } 5900 return VINF_SUCCESS; 5901 5902 default: 5903 return IEMOP_RAISE_INVALID_OPCODE(); 5904 } 5905 } 5906 5907 5525 5908 /** Opcode 0x0f 0xef. */ 5526 FNIEMOP_STUB(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq); // NEXT 5909 FNIEMOP_DEF(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq) 5910 { 5911 IEMOP_MNEMONIC("pxor"); 5912 return FNIEMOP_CALL_1(iemOpCommonMmxSse_PqVdq_QqWdq, &g_iemAImpl_pxor); 5913 } 5914 5915 5527 5916 /** Opcode 0x0f 0xf0. */ 5528 5917 FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq); -
trunk/src/VBox/VMM/include/IEMInternal.h
r47382 r47385 1057 1057 IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW, 1058 1058 int64_t *pi32Val, PCRTFLOAT80U pr80Val)); 1059 /** @} */ 1059 /** @} */ 1060 1061 1062 /** Temporary type representing a 256-bit vector register. */ 1063 typedef struct {uint64_t au64[4]; } IEMVMM256; 1064 /** Temporary type pointing to a 256-bit vector register. */ 1065 typedef IEMVMM256 *PIEMVMM256; 1066 /** Temporary type pointing to a const 256-bit vector register. */ 1067 typedef IEMVMM256 *PCIEMVMM256; 1068 1069 1070 /** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1. 1071 * @{ */ 1072 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src)); 1073 typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64; 1074 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst, uint128_t const *pu128Src)); 1075 typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128; 1076 FNIEMAIMPLMEDIAF2U64 iemAImpl_pxor_u64; 1077 FNIEMAIMPLMEDIAF2U128 iemAImpl_pxor_u128; 1078 /** @} */ 1079 1080 /** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1. 1081 * @{ */ 1082 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint32_t const *pu32Src)); 1083 typedef FNIEMAIMPLMEDIAF1L1U64 *PFNIEMAIMPLMEDIAF1L1U64; 1084 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U128,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst, uint64_t const *pu64Src)); 1085 typedef FNIEMAIMPLMEDIAF1L1U128 *PFNIEMAIMPLMEDIAF1L1U128; 1086 FNIEMAIMPLMEDIAF1L1U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64; 1087 FNIEMAIMPLMEDIAF1L1U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128; 1088 /** @} */ 1089 1090 /** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1. 1091 * @{ */ 1092 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src)); 1093 typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF1H1U64; 1094 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U128,(PCX86FXSTATE pFpuState, uint128_t *pu128Dst, uint128_t const *pu128Src)); 1095 typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF1H1U128; 1096 FNIEMAIMPLMEDIAF1H1U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64; 1097 FNIEMAIMPLMEDIAF1H1U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128; 1098 /** @} */ 1099 1060 1100 1061 1101 … … 1135 1175 /** Pointer to a double precision shift function table. */ 1136 1176 typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES; 1177 1178 1179 /** 1180 * Function table for media instruction taking two full sized media registers, 1181 * optionally the 2nd being a memory reference (only modifying the first op.) 1182 */ 1183 typedef struct IEMOPMEDIAF2 1184 { 1185 PFNIEMAIMPLMEDIAF2U64 pfnU64; 1186 PFNIEMAIMPLMEDIAF2U128 pfnU128; 1187 } IEMOPMEDIAF2; 1188 /** Pointer to a media operation function table for full sized ops. */ 1189 typedef IEMOPMEDIAF2 const *PCIEMOPMEDIAF2; 1190 1191 /** 1192 * Function table for media instruction taking taking one full and one lower 1193 * half media register. 1194 */ 1195 typedef struct IEMOPMEDIAF1L1 1196 { 1197 PFNIEMAIMPLMEDIAF1L1U64 pfnU64; 1198 PFNIEMAIMPLMEDIAF1L1U128 pfnU128; 1199 } IEMOPMEDIAF1L1; 1200 /** Pointer to a media operation function table for lowhalf+lowhalf -> full. */ 1201 typedef IEMOPMEDIAF1L1 const *PCIEMOPMEDIAF1L1; 1202 1203 /** 1204 * Function table for media instruction taking taking one full and one high half 1205 * media register. 1206 */ 1207 typedef struct IEMOPMEDIAF1H1 1208 { 1209 PFNIEMAIMPLMEDIAF1H1U64 pfnU64; 1210 PFNIEMAIMPLMEDIAF1H1U128 pfnU128; 1211 } IEMOPMEDIAF1H1; 1212 /** Pointer to a media operation function table for hihalf+hihalf -> full. */ 1213 typedef IEMOPMEDIAF1H1 const *PCIEMOPMEDIAF1H1; 1137 1214 1138 1215 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r47382 r47385 177 177 IEMOPSHIFTDBLSIZES g_iemAImpl_shld; 178 178 IEMOPSHIFTDBLSIZES g_iemAImpl_shrd; 179 IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw; 180 IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd; 181 IEMOPMEDIAF1L1 g_iemAImpl_punpckldq; 182 IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq; 183 IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw; 184 IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd; 185 IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq; 186 IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq; 187 IEMOPMEDIAF2 g_iemAImpl_pxor; 179 188 180 189 … … 445 454 #define IEM_MC_CLEAR_FSW_EX() do { } while (0) 446 455 447 #define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); } while (0) 448 #define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { CHK_TYPE(uint32_t, a_u32Value); } while (0) 456 #define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); } while (0) 457 #define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { CHK_TYPE(uint32_t, a_u32Value); } while (0) 458 #define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) do { (a_pu64Dst) = (uint64_t *)((uintptr_t)0); CHK_PTYPE(uint64_t *, a_pu64Dst); } while (0) 459 #define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) do { (a_pu64Dst) = (uint64_t const *)((uintptr_t)0); CHK_PTYPE(uint64_t const *, a_pu64Dst); } while (0) 460 #define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) do { (a_pu32Dst) = (uint32_t const *)((uintptr_t)0); CHK_PTYPE(uint32_t const *, a_pu32Dst); } while (0) 461 449 462 #define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); } while (0) 450 463 #define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) do { CHK_TYPE(uint32_t, a_u32Value); } while (0) 464 #define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) do { (a_pu128Dst) = (uint128_t *)((uintptr_t)0); CHK_PTYPE(uint128_t *, a_pu128Dst); } while (0) 465 #define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) do { (a_pu128Dst) = (uint128_t const *)((uintptr_t)0); CHK_PTYPE(uint128_t const *, a_pu128Dst); } while (0) 466 #define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) do { (a_pu64Dst) = (uint64_t const *)((uintptr_t)0); CHK_PTYPE(uint64_t const *, a_pu64Dst); } while (0) 451 467 452 468 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); } while (0) … … 459 475 #define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); } while (0) 460 476 #define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); } while (0) 477 #define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); } while (0) 461 478 462 479 #define IEM_MC_FETCH_MEM_U8_DISP(a_u8Dst, a_iSeg, a_GCPtrMem, a_offDisp) \ … … 472 489 #define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTFLOAT64U, a_r64Dst);} while (0) 473 490 #define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTFLOAT80U, a_r80Dst);} while (0) 491 492 #define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(uint128_t, a_u128Dst);} while (0) 493 #define IEM_MC_FETCH_MEM_U128_ALIGN(a_u128Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(uint128_t, a_u128Dst);} while (0) 474 494 475 495 #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); } while (0) … … 560 580 #define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) do { } while (0) 561 581 #define IEM_MC_USED_FPU() do { } while (0) 582 583 #define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) do { } while (0) 584 #define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) do { } while (0) 562 585 563 586 #define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (g_fRandom) {
Note:
See TracChangeset
for help on using the changeset viewer.