Changeset 103739 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Mar 9, 2024 12:03:05 AM (12 months ago)
- svn:sync-xref-src-repo-rev:
- 162108
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r103731 r103739 823 823 * a byte immediate. 824 824 */ 825 #define IEMOP_BODY_BINARY_AL_Ib(a_fnNormalU8) \ 825 #define IEMOP_BODY_BINARY_AL_Ib(a_InsNm, a_fNativeArchs) \ 826 IEM_MC_BEGIN(3, 3, 0, 0); \ 826 827 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); \ 827 \828 IEM_MC_BEGIN(3, 0, 0, 0); \829 828 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 830 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 831 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1); \ 832 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 833 \ 834 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX); \ 835 IEM_MC_REF_EFLAGS(pEFlags); \ 836 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU8, pu8Dst, u8Src, pEFlags); \ 837 \ 829 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 830 IEM_MC_LOCAL(uint8_t, u8Dst); \ 831 IEM_MC_FETCH_GREG_U8(u8Dst, X86_GREG_xAX); \ 832 IEM_MC_LOCAL(uint32_t, uEFlags); \ 833 IEM_MC_FETCH_EFLAGS(uEFlags); \ 834 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 835 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 836 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Dst); \ 837 } IEM_MC_NATIVE_ELSE() { \ 838 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1); \ 839 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 840 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX); \ 841 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 842 IEM_MC_REF_EFLAGS(pEFlags); \ 843 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u8), pu8Dst, u8Src, pEFlags); \ 844 } IEM_MC_NATIVE_ENDIF(); \ 838 845 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 839 846 IEM_MC_END() … … 843 850 * AX/EAX/RAX with a word/dword immediate. 844 851 */ 845 #define IEMOP_BODY_BINARY_rAX_Iz (a_fnNormalU16, a_fnNormalU32, a_fnNormalU64, a_fModifiesDstReg) \852 #define IEMOP_BODY_BINARY_rAX_Iz_RW(a_InsNm, a_fNativeArchs) \ 846 853 switch (pVCpu->iem.s.enmEffOpSize) \ 847 854 { \ 848 855 case IEMMODE_16BIT: \ 849 856 { \ 857 IEM_MC_BEGIN(3, 2, 0, 0); \ 850 858 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); \ 851 \852 IEM_MC_BEGIN(3, 0, 0, 0); \853 859 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 854 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 855 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1); \ 856 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 857 \ 858 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX); \ 859 IEM_MC_REF_EFLAGS(pEFlags); \ 860 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \ 861 \ 860 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 861 IEM_MC_LOCAL(uint16_t, u16Dst); \ 862 IEM_MC_FETCH_GREG_U16(u16Dst, X86_GREG_xAX); \ 863 IEM_MC_LOCAL(uint32_t, uEFlags); \ 864 IEM_MC_FETCH_EFLAGS(uEFlags); \ 865 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 866 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 867 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Dst); \ 868 } IEM_MC_NATIVE_ELSE() { \ 869 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1); \ 870 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 871 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX); \ 872 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 873 IEM_MC_REF_EFLAGS(pEFlags); \ 874 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \ 875 } IEM_MC_NATIVE_ENDIF(); \ 862 876 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 863 877 IEM_MC_END(); \ … … 866 880 case IEMMODE_32BIT: \ 867 881 { \ 882 IEM_MC_BEGIN(3, 2, IEM_MC_F_MIN_386, 0); \ 868 883 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); \ 869 \870 IEM_MC_BEGIN(3, 0, IEM_MC_F_MIN_386, 0); \871 884 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 872 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 873 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1); \ 874 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 875 \ 876 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX); \ 877 IEM_MC_REF_EFLAGS(pEFlags); \ 878 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \ 879 \ 880 if (a_fModifiesDstReg) \ 885 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 886 IEM_MC_LOCAL(uint32_t, u32Dst); \ 887 IEM_MC_FETCH_GREG_U32(u32Dst, X86_GREG_xAX); \ 888 IEM_MC_LOCAL(uint32_t, uEFlags); \ 889 IEM_MC_FETCH_EFLAGS(uEFlags); \ 890 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 891 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 892 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Dst); \ 893 } IEM_MC_NATIVE_ELSE() { \ 894 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1); \ 895 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 896 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX); \ 897 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 898 IEM_MC_REF_EFLAGS(pEFlags); \ 899 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \ 881 900 IEM_MC_CLEAR_HIGH_GREG_U64(X86_GREG_xAX); \ 901 } IEM_MC_NATIVE_ENDIF(); \ 882 902 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 883 903 IEM_MC_END(); \ … … 886 906 case IEMMODE_64BIT: \ 887 907 { \ 908 IEM_MC_BEGIN(3, 2, IEM_MC_F_64BIT, 0); \ 888 909 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); \ 889 \890 IEM_MC_BEGIN(3, 0, IEM_MC_F_64BIT, 0); \891 910 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 892 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 893 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1); \ 894 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 895 \ 896 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX); \ 897 IEM_MC_REF_EFLAGS(pEFlags); \ 898 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \ 899 \ 911 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 912 IEM_MC_LOCAL(uint64_t, u64Dst); \ 913 IEM_MC_FETCH_GREG_U64(u64Dst, X86_GREG_xAX); \ 914 IEM_MC_LOCAL(uint32_t, uEFlags); \ 915 IEM_MC_FETCH_EFLAGS(uEFlags); \ 916 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 917 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 918 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Dst); \ 919 } IEM_MC_NATIVE_ELSE() { \ 920 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1); \ 921 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 922 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX); \ 923 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 924 IEM_MC_REF_EFLAGS(pEFlags); \ 925 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \ 926 } IEM_MC_NATIVE_ENDIF(); \ 927 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 928 IEM_MC_END(); \ 929 } \ 930 \ 931 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \ 932 } \ 933 (void)0 934 935 /** 936 * Body for the instructions CMP and TEST working on AX/EAX/RAX with a 937 * word/dword immediate. 938 */ 939 #define IEMOP_BODY_BINARY_rAX_Iz_RO(a_InsNm, a_fNativeArchs) \ 940 switch (pVCpu->iem.s.enmEffOpSize) \ 941 { \ 942 case IEMMODE_16BIT: \ 943 { \ 944 IEM_MC_BEGIN(3, 2, 0, 0); \ 945 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); \ 946 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 947 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 948 IEM_MC_LOCAL(uint16_t, u16Dst); \ 949 IEM_MC_FETCH_GREG_U16(u16Dst, X86_GREG_xAX); \ 950 IEM_MC_LOCAL(uint32_t, uEFlags); \ 951 IEM_MC_FETCH_EFLAGS(uEFlags); \ 952 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 953 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 954 } IEM_MC_NATIVE_ELSE() { \ 955 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1); \ 956 IEM_MC_ARG(uint16_t const *,pu16Dst, 0); \ 957 IEM_MC_REF_GREG_U16_CONST(pu16Dst, X86_GREG_xAX); \ 958 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 959 IEM_MC_REF_EFLAGS(pEFlags); \ 960 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \ 961 } IEM_MC_NATIVE_ENDIF(); \ 962 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 963 IEM_MC_END(); \ 964 } \ 965 \ 966 case IEMMODE_32BIT: \ 967 { \ 968 IEM_MC_BEGIN(3, 2, IEM_MC_F_MIN_386, 0); \ 969 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); \ 970 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 971 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 972 IEM_MC_LOCAL(uint32_t, u32Dst); \ 973 IEM_MC_FETCH_GREG_U32(u32Dst, X86_GREG_xAX); \ 974 IEM_MC_LOCAL(uint32_t, uEFlags); \ 975 IEM_MC_FETCH_EFLAGS(uEFlags); \ 976 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 977 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 978 } IEM_MC_NATIVE_ELSE() { \ 979 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1); \ 980 IEM_MC_ARG(uint32_t const *,pu32Dst, 0); \ 981 IEM_MC_REF_GREG_U32_CONST(pu32Dst, X86_GREG_xAX); \ 982 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 983 IEM_MC_REF_EFLAGS(pEFlags); \ 984 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \ 985 } IEM_MC_NATIVE_ENDIF(); \ 986 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 987 IEM_MC_END(); \ 988 } \ 989 \ 990 case IEMMODE_64BIT: \ 991 { \ 992 IEM_MC_BEGIN(3, 2, IEM_MC_F_64BIT, 0); \ 993 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); \ 994 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 995 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 996 IEM_MC_LOCAL(uint64_t, u64Dst); \ 997 IEM_MC_FETCH_GREG_U64(u64Dst, X86_GREG_xAX); \ 998 IEM_MC_LOCAL(uint32_t, uEFlags); \ 999 IEM_MC_FETCH_EFLAGS(uEFlags); \ 1000 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 1001 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 1002 } IEM_MC_NATIVE_ELSE() { \ 1003 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1); \ 1004 IEM_MC_ARG(uint64_t const *,pu64Dst, 0); \ 1005 IEM_MC_REF_GREG_U64_CONST(pu64Dst, X86_GREG_xAX); \ 1006 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 1007 IEM_MC_REF_EFLAGS(pEFlags); \ 1008 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \ 1009 } IEM_MC_NATIVE_ENDIF(); \ 900 1010 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 901 1011 IEM_MC_END(); \ … … 989 1099 { 990 1100 IEMOP_MNEMONIC2(FIXED, ADD, add, AL, Ib, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 991 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_add_u8);1101 IEMOP_BODY_BINARY_AL_Ib(add, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64); 992 1102 } 993 1103 … … 1005 1115 { 1006 1116 IEMOP_MNEMONIC2(FIXED, ADD, add, rAX, Iz, DISOPTYPE_HARMLESS, 0); 1007 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_add_u16, iemAImpl_add_u32, iemAImpl_add_u64, 1);1117 IEMOP_BODY_BINARY_rAX_Iz_RW(add, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64); 1008 1118 } 1009 1119 … … 1120 1230 IEMOP_MNEMONIC2(FIXED, OR, or, AL, Ib, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 1121 1231 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1122 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_or_u8);1232 IEMOP_BODY_BINARY_AL_Ib(or, 0); 1123 1233 } 1124 1234 … … 1140 1250 IEMOP_MNEMONIC2(FIXED, OR, or, rAX, Iz, DISOPTYPE_HARMLESS, 0); 1141 1251 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1142 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_or_u16, iemAImpl_or_u32, iemAImpl_or_u64, 1);1252 IEMOP_BODY_BINARY_rAX_Iz_RW(or, 0); 1143 1253 } 1144 1254 … … 1276 1386 { 1277 1387 IEMOP_MNEMONIC2(FIXED, ADC, adc, AL, Ib, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 1278 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_adc_u8);1388 IEMOP_BODY_BINARY_AL_Ib(adc, 0); 1279 1389 } 1280 1390 … … 1289 1399 { 1290 1400 IEMOP_MNEMONIC2(FIXED, ADC, adc, rAX, Iz, DISOPTYPE_HARMLESS, 0); 1291 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_adc_u16, iemAImpl_adc_u32, iemAImpl_adc_u64, 1);1401 IEMOP_BODY_BINARY_rAX_Iz_RW(adc, 0); 1292 1402 } 1293 1403 … … 1383 1493 { 1384 1494 IEMOP_MNEMONIC2(FIXED, SBB, sbb, AL, Ib, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 1385 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_sbb_u8);1495 IEMOP_BODY_BINARY_AL_Ib(sbb, 0); 1386 1496 } 1387 1497 … … 1395 1505 { 1396 1506 IEMOP_MNEMONIC2(FIXED, SBB, sbb, rAX, Iz, DISOPTYPE_HARMLESS, 0); 1397 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_sbb_u16, iemAImpl_sbb_u32, iemAImpl_sbb_u64, 1);1507 IEMOP_BODY_BINARY_rAX_Iz_RW(sbb, 0); 1398 1508 } 1399 1509 … … 1496 1606 IEMOP_MNEMONIC2(FIXED, AND, and, AL, Ib, DISOPTYPE_HARMLESS, 0); 1497 1607 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1498 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_and_u8);1608 IEMOP_BODY_BINARY_AL_Ib(and, 0); 1499 1609 } 1500 1610 … … 1509 1619 IEMOP_MNEMONIC2(FIXED, AND, and, rAX, Iz, DISOPTYPE_HARMLESS, 0); 1510 1620 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1511 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_and_u16, iemAImpl_and_u32, iemAImpl_and_u64, 1);1621 IEMOP_BODY_BINARY_rAX_Iz_RW(and, 0); 1512 1622 } 1513 1623 … … 1614 1724 { 1615 1725 IEMOP_MNEMONIC2(FIXED, SUB, sub, AL, Ib, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 1616 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_sub_u8);1726 IEMOP_BODY_BINARY_AL_Ib(sub, 0); 1617 1727 } 1618 1728 … … 1626 1736 { 1627 1737 IEMOP_MNEMONIC2(FIXED, SUB, sub, rAX, Iz, DISOPTYPE_HARMLESS, 0); 1628 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_sub_u16, iemAImpl_sub_u32, iemAImpl_sub_u64, 1);1738 IEMOP_BODY_BINARY_rAX_Iz_RW(sub, 0); 1629 1739 } 1630 1740 … … 1736 1846 IEMOP_MNEMONIC2(FIXED, XOR, xor, AL, Ib, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 1737 1847 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1738 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_xor_u8);1848 IEMOP_BODY_BINARY_AL_Ib(xor, 0); 1739 1849 } 1740 1850 … … 1749 1859 IEMOP_MNEMONIC2(FIXED, XOR, xor, rAX, Iz, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 1750 1860 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 1751 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_xor_u16, iemAImpl_xor_u32, iemAImpl_xor_u64, 1);1861 IEMOP_BODY_BINARY_rAX_Iz_RW(xor, 0); 1752 1862 } 1753 1863 … … 1877 1987 { 1878 1988 IEMOP_MNEMONIC(cmp_al_Ib, "cmp al,Ib"); 1879 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_cmp_u8);1989 IEMOP_BODY_BINARY_AL_Ib(cmp, 0); 1880 1990 } 1881 1991 … … 1888 1998 { 1889 1999 IEMOP_MNEMONIC(cmp_rAX_Iz, "cmp rAX,Iz"); 1890 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_cmp_u16, iemAImpl_cmp_u32, iemAImpl_cmp_u64, 0);2000 IEMOP_BODY_BINARY_rAX_Iz_RO(cmp, 0); 1891 2001 } 1892 2002 … … 4120 4230 * iemOp_Grp1_Eb_Ib_80. 4121 4231 */ 4122 #define IEMOP_BODY_BINARY_Eb_Ib_RW(a_ fnNormalU8) \4232 #define IEMOP_BODY_BINARY_Eb_Ib_RW(a_InsNm, a_fRegNativeArchs, a_fMemNativeArchs) \ 4123 4233 if (IEM_IS_MODRM_REG_MODE(bRm)) \ 4124 4234 { \ 4125 4235 /* register target */ \ 4236 IEM_MC_BEGIN(3, 2, 0, 0); \ 4126 4237 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); \ 4127 IEM_MC_BEGIN(3, 0, 0, 0); \4128 4238 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4129 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 4130 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \ 4131 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4132 \ 4133 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4134 IEM_MC_REF_EFLAGS(pEFlags); \ 4135 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU8, pu8Dst, u8Src, pEFlags); \ 4136 \ 4239 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 4240 IEM_MC_LOCAL(uint8_t, u8Dst); \ 4241 IEM_MC_FETCH_GREG_U8(u8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4242 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4243 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4244 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 4245 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_RM(pVCpu, bRm), u8Dst); \ 4246 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4247 } IEM_MC_NATIVE_ELSE() { \ 4248 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \ 4249 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 4250 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4251 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4252 IEM_MC_REF_EFLAGS(pEFlags); \ 4253 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u8), pu8Dst, u8Src, pEFlags); \ 4254 } IEM_MC_NATIVE_ENDIF(); \ 4137 4255 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4138 4256 IEM_MC_END(); \ … … 4156 4274 IEM_MC_MEM_MAP_U8_RW(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4157 4275 IEM_MC_FETCH_EFLAGS(EFlags); \ 4158 IEM_MC_CALL_VOID_AIMPL_3( a_fnNormalU8, pu8Dst, u8Src, pEFlags); \4276 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u8), pu8Dst, u8Src, pEFlags); \ 4159 4277 \ 4160 4278 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ … … 4165 4283 else \ 4166 4284 { \ 4167 (void)04168 4169 #define IEMOP_BODY_BINARY_Eb_Ib_LOCKED(a_fnLockedU8) \4170 4285 IEM_MC_BEGIN(3, 3, 0, 0); \ 4171 4286 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ … … 4181 4296 IEM_MC_MEM_MAP_U8_ATOMIC(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4182 4297 IEM_MC_FETCH_EFLAGS(EFlags); \ 4183 IEM_MC_CALL_VOID_AIMPL_3( a_fnLockedU8, pu8Dst, u8Src, pEFlags); \4298 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u8_locked), pu8Dst, u8Src, pEFlags); \ 4184 4299 \ 4185 4300 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ … … 4191 4306 (void)0 4192 4307 4193 #define IEMOP_BODY_BINARY_Eb_Ib_RO(a_ fnNormalU8) \4308 #define IEMOP_BODY_BINARY_Eb_Ib_RO(a_InsNm, a_fNativeArchs) \ 4194 4309 if (IEM_IS_MODRM_REG_MODE(bRm)) \ 4195 4310 { \ 4196 4311 /* register target */ \ 4312 IEM_MC_BEGIN(3, 2, 0, 0); \ 4197 4313 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); \ 4198 IEM_MC_BEGIN(3, 0, 0, 0); \4199 4314 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4200 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ 4201 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \ 4202 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4203 \ 4204 IEM_MC_REF_GREG_U8(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4205 IEM_MC_REF_EFLAGS(pEFlags); \ 4206 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU8, pu8Dst, u8Src, pEFlags); \ 4207 \ 4315 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4316 IEM_MC_LOCAL(uint8_t, u8Dst); \ 4317 IEM_MC_FETCH_GREG_U8(u8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4318 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4319 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4320 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 4321 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4322 } IEM_MC_NATIVE_ELSE() { \ 4323 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \ 4324 IEM_MC_ARG(uint8_t const *, pu8Dst, 0); \ 4325 IEM_MC_REF_GREG_U8_CONST(pu8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4326 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4327 IEM_MC_REF_EFLAGS(pEFlags); \ 4328 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u8), pu8Dst, u8Src, pEFlags); \ 4329 } IEM_MC_NATIVE_ENDIF(); \ 4208 4330 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4209 4331 IEM_MC_END(); \ … … 4215 4337 { \ 4216 4338 IEM_MC_BEGIN(3, 3, 0, 0); \ 4217 IEM_MC_ARG(uint8_t const *, pu8Dst, 0); \4218 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \4219 4339 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 4220 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \4221 \4222 4340 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1); \ 4223 4341 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm); \ 4224 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \4225 4342 IEMOP_HLP_DONE_DECODING(); \ 4226 \ 4227 IEM_MC_MEM_MAP_U8_RO(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4228 IEM_MC_FETCH_EFLAGS(EFlags); \ 4229 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU8, pu8Dst, u8Src, pEFlags); \ 4230 \ 4231 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4232 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4343 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4344 IEM_MC_LOCAL(uint8_t, u8Dst); \ 4345 IEM_MC_FETCH_MEM_U8(u8Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4346 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4347 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4348 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 4349 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4350 } IEM_MC_NATIVE_ELSE() { \ 4351 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4352 IEM_MC_ARG(uint8_t const *, pu8Dst, 0); \ 4353 IEM_MC_MEM_MAP_U8_RO(pu8Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4354 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4355 IEM_MC_FETCH_EFLAGS(EFlags); \ 4356 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \ 4357 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u8), pu8Dst, u8Src, pEFlags); \ 4358 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4359 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4360 } IEM_MC_NATIVE_ENDIF(); \ 4233 4361 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4234 4362 IEM_MC_END(); \ … … 4236 4364 else \ 4237 4365 { \ 4238 (void)04239 4240 #define IEMOP_BODY_BINARY_Eb_Ib_NO_LOCK() \4241 4366 IEMOP_HLP_DONE_DECODING(); \ 4242 4367 IEMOP_RAISE_INVALID_LOCK_PREFIX_RET(); \ … … 4255 4380 { 4256 4381 IEMOP_MNEMONIC(add_Eb_Ib, "add Eb,Ib"); 4257 IEMOP_BODY_BINARY_Eb_Ib_RW( iemAImpl_add_u8); 4258 IEMOP_BODY_BINARY_Eb_Ib_LOCKED(iemAImpl_add_u8_locked); 4382 IEMOP_BODY_BINARY_Eb_Ib_RW(add, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64, 0); 4259 4383 } 4260 4384 … … 4268 4392 { 4269 4393 IEMOP_MNEMONIC(or_Eb_Ib, "or Eb,Ib"); 4270 IEMOP_BODY_BINARY_Eb_Ib_RW( iemAImpl_or_u8); 4271 IEMOP_BODY_BINARY_Eb_Ib_LOCKED(iemAImpl_or_u8_locked); 4394 IEMOP_BODY_BINARY_Eb_Ib_RW(or, 0, 0); 4272 4395 } 4273 4396 … … 4281 4404 { 4282 4405 IEMOP_MNEMONIC(adc_Eb_Ib, "adc Eb,Ib"); 4283 IEMOP_BODY_BINARY_Eb_Ib_RW( iemAImpl_adc_u8); 4284 IEMOP_BODY_BINARY_Eb_Ib_LOCKED(iemAImpl_adc_u8_locked); 4406 IEMOP_BODY_BINARY_Eb_Ib_RW(adc, 0, 0); 4285 4407 } 4286 4408 … … 4294 4416 { 4295 4417 IEMOP_MNEMONIC(sbb_Eb_Ib, "sbb Eb,Ib"); 4296 IEMOP_BODY_BINARY_Eb_Ib_RW( iemAImpl_sbb_u8); 4297 IEMOP_BODY_BINARY_Eb_Ib_LOCKED(iemAImpl_sbb_u8_locked); 4418 IEMOP_BODY_BINARY_Eb_Ib_RW(sbb, 0, 0); 4298 4419 } 4299 4420 … … 4307 4428 { 4308 4429 IEMOP_MNEMONIC(and_Eb_Ib, "and Eb,Ib"); 4309 IEMOP_BODY_BINARY_Eb_Ib_RW( iemAImpl_and_u8); 4310 IEMOP_BODY_BINARY_Eb_Ib_LOCKED(iemAImpl_and_u8_locked); 4430 IEMOP_BODY_BINARY_Eb_Ib_RW(and, 0, 0); 4311 4431 } 4312 4432 … … 4320 4440 { 4321 4441 IEMOP_MNEMONIC(sub_Eb_Ib, "sub Eb,Ib"); 4322 IEMOP_BODY_BINARY_Eb_Ib_RW( iemAImpl_sub_u8); 4323 IEMOP_BODY_BINARY_Eb_Ib_LOCKED(iemAImpl_sub_u8_locked); 4442 IEMOP_BODY_BINARY_Eb_Ib_RW(sub, 0, 0); 4324 4443 } 4325 4444 … … 4333 4452 { 4334 4453 IEMOP_MNEMONIC(xor_Eb_Ib, "xor Eb,Ib"); 4335 IEMOP_BODY_BINARY_Eb_Ib_RW( iemAImpl_xor_u8); 4336 IEMOP_BODY_BINARY_Eb_Ib_LOCKED(iemAImpl_xor_u8_locked); 4454 IEMOP_BODY_BINARY_Eb_Ib_RW(xor, 0, 0); 4337 4455 } 4338 4456 … … 4346 4464 { 4347 4465 IEMOP_MNEMONIC(cmp_Eb_Ib, "cmp Eb,Ib"); 4348 IEMOP_BODY_BINARY_Eb_Ib_RO(iemAImpl_cmp_u8); 4349 IEMOP_BODY_BINARY_Eb_Ib_NO_LOCK(); 4466 IEMOP_BODY_BINARY_Eb_Ib_RO(cmp, 0); 4350 4467 } 4351 4468 … … 4375 4492 * Body for a group 1 binary operator. 4376 4493 */ 4377 #define IEMOP_BODY_BINARY_Ev_Iz_RW(a_ fnNormalU16, a_fnNormalU32, a_fnNormalU64) \4494 #define IEMOP_BODY_BINARY_Ev_Iz_RW(a_InsNm, a_fRegNativeArchs, a_fMemNativeArchs) \ 4378 4495 if (IEM_IS_MODRM_REG_MODE(bRm)) \ 4379 4496 { \ … … 4383 4500 case IEMMODE_16BIT: \ 4384 4501 { \ 4502 IEM_MC_BEGIN(3, 2, 0, 0); \ 4385 4503 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); \ 4386 IEM_MC_BEGIN(3, 0, 0, 0); \4387 4504 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4388 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 4389 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1); \ 4390 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4391 \ 4392 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4393 IEM_MC_REF_EFLAGS(pEFlags); \ 4394 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \ 4395 \ 4505 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 4506 IEM_MC_LOCAL(uint16_t, u16Dst); \ 4507 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4508 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4509 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4510 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 4511 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4512 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_RM(pVCpu, bRm), u16Dst); \ 4513 } IEM_MC_NATIVE_ELSE() { \ 4514 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 4515 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4516 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4517 IEM_MC_REF_EFLAGS(pEFlags); \ 4518 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1); \ 4519 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \ 4520 } IEM_MC_NATIVE_ENDIF(); \ 4396 4521 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4397 4522 IEM_MC_END(); \ … … 4401 4526 case IEMMODE_32BIT: \ 4402 4527 { \ 4528 IEM_MC_BEGIN(3, 2, IEM_MC_F_MIN_386, 0); \ 4403 4529 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); \ 4404 IEM_MC_BEGIN(3, 0, IEM_MC_F_MIN_386, 0); \4405 4530 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4406 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 4407 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1); \ 4408 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4409 \ 4410 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4411 IEM_MC_REF_EFLAGS(pEFlags); \ 4412 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \ 4413 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4414 \ 4531 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 4532 IEM_MC_LOCAL(uint32_t, u32Dst); \ 4533 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4534 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4535 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4536 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 4537 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4538 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_RM(pVCpu, bRm), u32Dst); \ 4539 } IEM_MC_NATIVE_ELSE() { \ 4540 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 4541 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4542 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4543 IEM_MC_REF_EFLAGS(pEFlags); \ 4544 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1); \ 4545 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \ 4546 IEM_MC_CLEAR_HIGH_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4547 } IEM_MC_NATIVE_ENDIF(); \ 4415 4548 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4416 4549 IEM_MC_END(); \ … … 4420 4553 case IEMMODE_64BIT: \ 4421 4554 { \ 4555 IEM_MC_BEGIN(3, 2, IEM_MC_F_64BIT, 0); \ 4422 4556 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); \ 4423 IEM_MC_BEGIN(3, 0, IEM_MC_F_64BIT, 0); \4424 4557 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4425 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 4426 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1); \ 4427 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4428 \ 4429 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4430 IEM_MC_REF_EFLAGS(pEFlags); \ 4431 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \ 4432 \ 4558 IEM_MC_NATIVE_IF(a_fRegNativeArchs) { \ 4559 IEM_MC_LOCAL(uint64_t, u64Dst); \ 4560 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4561 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4562 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4563 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 4564 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4565 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm), u64Dst); \ 4566 } IEM_MC_NATIVE_ELSE() { \ 4567 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 4568 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4569 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4570 IEM_MC_REF_EFLAGS(pEFlags); \ 4571 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1); \ 4572 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \ 4573 } IEM_MC_NATIVE_ENDIF(); \ 4433 4574 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4434 4575 IEM_MC_END(); \ … … 4462 4603 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4463 4604 IEM_MC_FETCH_EFLAGS(EFlags); \ 4464 IEM_MC_CALL_VOID_AIMPL_3( a_fnNormalU16, pu16Dst, u16Src, pEFlags); \4605 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \ 4465 4606 \ 4466 4607 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ … … 4487 4628 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4488 4629 IEM_MC_FETCH_EFLAGS(EFlags); \ 4489 IEM_MC_CALL_VOID_AIMPL_3( a_fnNormalU32, pu32Dst, u32Src, pEFlags); \4630 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \ 4490 4631 \ 4491 4632 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ … … 4513 4654 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4514 4655 IEM_MC_FETCH_EFLAGS(EFlags); \ 4515 IEM_MC_CALL_VOID_AIMPL_3( a_fnNormalU64, pu64Dst, u64Src, pEFlags); \4656 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \ 4516 4657 \ 4517 4658 IEM_MC_MEM_COMMIT_AND_UNMAP_RW(bUnmapInfo); \ … … 4527 4668 else \ 4528 4669 { \ 4529 (void)04530 /* This must be a separate macro due to parsing restrictions in IEMAllInstPython.py. */4531 #define IEMOP_BODY_BINARY_Ev_Iz_LOCKED(a_fnLockedU16, a_fnLockedU32, a_fnLockedU64) \4532 4670 switch (pVCpu->iem.s.enmEffOpSize) \ 4533 4671 { \ … … 4548 4686 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4549 4687 IEM_MC_FETCH_EFLAGS(EFlags); \ 4550 IEM_MC_CALL_VOID_AIMPL_3( a_fnLockedU16, pu16Dst, u16Src, pEFlags); \4688 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16_locked), pu16Dst, u16Src, pEFlags); \ 4551 4689 \ 4552 4690 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ … … 4573 4711 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4574 4712 IEM_MC_FETCH_EFLAGS(EFlags); \ 4575 IEM_MC_CALL_VOID_AIMPL_3( a_fnLockedU32, pu32Dst, u32Src, pEFlags); \4713 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32_locked), pu32Dst, u32Src, pEFlags); \ 4576 4714 \ 4577 4715 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ … … 4598 4736 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4599 4737 IEM_MC_FETCH_EFLAGS(EFlags); \ 4600 IEM_MC_CALL_VOID_AIMPL_3( a_fnLockedU64, pu64Dst, u64Src, pEFlags); \4738 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64_locked), pu64Dst, u64Src, pEFlags); \ 4601 4739 \ 4602 4740 IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(bUnmapInfo); \ … … 4614 4752 4615 4753 /* read-only version */ 4616 #define IEMOP_BODY_BINARY_Ev_Iz_RO(a_ fnNormalU16, a_fnNormalU32, a_fnNormalU64) \4754 #define IEMOP_BODY_BINARY_Ev_Iz_RO(a_InsNm, a_fNativeArchs) \ 4617 4755 if (IEM_IS_MODRM_REG_MODE(bRm)) \ 4618 4756 { \ … … 4622 4760 case IEMMODE_16BIT: \ 4623 4761 { \ 4762 IEM_MC_BEGIN(3, 2, 0, 0); \ 4624 4763 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); \ 4625 IEM_MC_BEGIN(3, 0, 0, 0); \4626 4764 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4627 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ 4628 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1); \ 4629 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4630 \ 4631 IEM_MC_REF_GREG_U16(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4632 IEM_MC_REF_EFLAGS(pEFlags); \ 4633 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \ 4634 \ 4765 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4766 IEM_MC_LOCAL(uint16_t, u16Dst); \ 4767 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4768 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4769 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4770 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 4771 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4772 } IEM_MC_NATIVE_ELSE() { \ 4773 IEM_MC_ARG(uint16_t const *,pu16Dst, 0); \ 4774 IEM_MC_REF_GREG_U16_CONST(pu16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4775 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4776 IEM_MC_REF_EFLAGS(pEFlags); \ 4777 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1); \ 4778 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \ 4779 } IEM_MC_NATIVE_ENDIF(); \ 4635 4780 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4636 4781 IEM_MC_END(); \ … … 4640 4785 case IEMMODE_32BIT: \ 4641 4786 { \ 4787 IEM_MC_BEGIN(3, 2, IEM_MC_F_MIN_386, 0); \ 4642 4788 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); \ 4643 IEM_MC_BEGIN(3, 0, IEM_MC_F_MIN_386, 0); \4644 4789 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4645 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ 4646 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1); \ 4647 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4648 \ 4649 IEM_MC_REF_GREG_U32(pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4650 IEM_MC_REF_EFLAGS(pEFlags); \ 4651 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \ 4652 \ 4790 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4791 IEM_MC_LOCAL(uint32_t, u32Dst); \ 4792 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4793 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4794 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4795 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 4796 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4797 } IEM_MC_NATIVE_ELSE() { \ 4798 IEM_MC_ARG(uint32_t const *,pu32Dst, 0); \ 4799 IEM_MC_REF_GREG_U32_CONST (pu32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4800 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4801 IEM_MC_REF_EFLAGS(pEFlags); \ 4802 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1); \ 4803 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \ 4804 } IEM_MC_NATIVE_ENDIF(); \ 4653 4805 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4654 4806 IEM_MC_END(); \ … … 4658 4810 case IEMMODE_64BIT: \ 4659 4811 { \ 4812 IEM_MC_BEGIN(3, 2, IEM_MC_F_64BIT, 0); \ 4660 4813 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); \ 4661 IEM_MC_BEGIN(3, 0, IEM_MC_F_64BIT, 0); \4662 4814 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); \ 4663 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ 4664 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1); \ 4665 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4666 \ 4667 IEM_MC_REF_GREG_U64(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4668 IEM_MC_REF_EFLAGS(pEFlags); \ 4669 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \ 4670 \ 4815 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4816 IEM_MC_LOCAL(uint64_t, u64Dst); \ 4817 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4818 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4819 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4820 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 4821 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4822 } IEM_MC_NATIVE_ELSE() { \ 4823 IEM_MC_ARG(uint64_t const *,pu64Dst, 0); \ 4824 IEM_MC_REF_GREG_U64_CONST(pu64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4825 IEM_MC_ARG(uint32_t *, pEFlags, 2); \ 4826 IEM_MC_REF_EFLAGS(pEFlags); \ 4827 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1); \ 4828 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \ 4829 } IEM_MC_NATIVE_ENDIF(); \ 4671 4830 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4672 4831 IEM_MC_END(); \ … … 4689 4848 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 4690 4849 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2); \ 4691 \4692 4850 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm); \ 4693 4851 IEMOP_HLP_DONE_DECODING(); \ 4694 \ 4695 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4696 IEM_MC_ARG(uint16_t const *, pu16Dst, 0); \ 4697 IEM_MC_MEM_MAP_U16_RO(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4698 \ 4699 IEM_MC_ARG_CONST(uint16_t, u16Src, u16Imm, 1); \ 4700 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4701 IEM_MC_FETCH_EFLAGS(EFlags); \ 4702 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU16, pu16Dst, u16Src, pEFlags); \ 4703 \ 4704 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4705 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4852 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4853 IEM_MC_LOCAL(uint16_t, u16Dst); \ 4854 IEM_MC_FETCH_MEM_U16(u16Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4855 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4856 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4857 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 4858 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4859 } IEM_MC_NATIVE_ELSE() { \ 4860 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4861 IEM_MC_ARG(uint16_t const *, pu16Dst, 0); \ 4862 IEM_MC_MEM_MAP_U16_RO(pu16Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4863 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4864 IEM_MC_FETCH_EFLAGS(EFlags); \ 4865 IEM_MC_ARG_CONST(uint16_t, u16Src, u16Imm, 1); \ 4866 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u16), pu16Dst, u16Src, pEFlags); \ 4867 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4868 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4869 } IEM_MC_NATIVE_ENDIF(); \ 4706 4870 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4707 4871 IEM_MC_END(); \ … … 4714 4878 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 4715 4879 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); \ 4716 \4717 4880 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm); \ 4718 4881 IEMOP_HLP_DONE_DECODING(); \ 4719 \ 4720 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4721 IEM_MC_ARG(uint32_t const *, pu32Dst, 0); \ 4722 IEM_MC_MEM_MAP_U32_RO(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4723 \ 4724 IEM_MC_ARG_CONST(uint32_t, u32Src, u32Imm, 1); \ 4725 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4726 IEM_MC_FETCH_EFLAGS(EFlags); \ 4727 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU32, pu32Dst, u32Src, pEFlags); \ 4728 \ 4729 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4730 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4882 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4883 IEM_MC_LOCAL(uint32_t, u32Dst); \ 4884 IEM_MC_FETCH_MEM_U32(u32Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4885 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4886 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4887 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 4888 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4889 } IEM_MC_NATIVE_ELSE() { \ 4890 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4891 IEM_MC_ARG(uint32_t const *, pu32Dst, 0); \ 4892 IEM_MC_MEM_MAP_U32_RO(pu32Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4893 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4894 IEM_MC_FETCH_EFLAGS(EFlags); \ 4895 IEM_MC_ARG_CONST(uint32_t, u32Src, u32Imm, 1); \ 4896 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u32), pu32Dst, u32Src, pEFlags); \ 4897 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4898 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4899 } IEM_MC_NATIVE_ENDIF(); \ 4731 4900 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4732 4901 IEM_MC_END(); \ … … 4739 4908 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); \ 4740 4909 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4); \ 4741 \4742 4910 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm); \ 4743 4911 IEMOP_HLP_DONE_DECODING(); \ 4744 \ 4745 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4746 IEM_MC_ARG(uint64_t const *, pu64Dst, 0); \ 4747 IEM_MC_MEM_MAP_U64_RO(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4748 \ 4749 IEM_MC_ARG_CONST(uint64_t, u64Src, u64Imm, 1); \ 4750 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4751 IEM_MC_FETCH_EFLAGS(EFlags); \ 4752 IEM_MC_CALL_VOID_AIMPL_3(a_fnNormalU64, pu64Dst, u64Src, pEFlags); \ 4753 \ 4754 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4755 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4912 IEM_MC_NATIVE_IF(a_fNativeArchs) { \ 4913 IEM_MC_LOCAL(uint64_t, u64Dst); \ 4914 IEM_MC_FETCH_MEM_U64(u64Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4915 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4916 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4917 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 4918 IEM_MC_COMMIT_EFLAGS(uEFlags); \ 4919 } IEM_MC_NATIVE_ELSE() { \ 4920 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ 4921 IEM_MC_ARG(uint64_t const *, pu64Dst, 0); \ 4922 IEM_MC_MEM_MAP_U64_RO(pu64Dst, bUnmapInfo, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4923 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2); \ 4924 IEM_MC_FETCH_EFLAGS(EFlags); \ 4925 IEM_MC_ARG_CONST(uint64_t, u64Src, u64Imm, 1); \ 4926 IEM_MC_CALL_VOID_AIMPL_3(RT_CONCAT3(iemAImpl_,a_InsNm,_u64), pu64Dst, u64Src, pEFlags); \ 4927 IEM_MC_MEM_COMMIT_AND_UNMAP_RO(bUnmapInfo); \ 4928 IEM_MC_COMMIT_EFLAGS(EFlags); \ 4929 } IEM_MC_NATIVE_ENDIF(); \ 4756 4930 IEM_MC_ADVANCE_RIP_AND_FINISH(); \ 4757 4931 IEM_MC_END(); \ … … 4779 4953 { 4780 4954 IEMOP_MNEMONIC(add_Ev_Iz, "add Ev,Iz"); 4781 IEMOP_BODY_BINARY_Ev_Iz_RW( iemAImpl_add_u16, iemAImpl_add_u32, iemAImpl_add_u64); 4782 IEMOP_BODY_BINARY_Ev_Iz_LOCKED(iemAImpl_add_u16_locked, iemAImpl_add_u32_locked, iemAImpl_add_u64_locked); 4955 IEMOP_BODY_BINARY_Ev_Iz_RW(add, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64, 0); 4783 4956 } 4784 4957 … … 4792 4965 { 4793 4966 IEMOP_MNEMONIC(or_Ev_Iz, "or Ev,Iz"); 4794 IEMOP_BODY_BINARY_Ev_Iz_RW( iemAImpl_or_u16, iemAImpl_or_u32, iemAImpl_or_u64); 4795 IEMOP_BODY_BINARY_Ev_Iz_LOCKED(iemAImpl_or_u16_locked, iemAImpl_or_u32_locked, iemAImpl_or_u64_locked); 4967 IEMOP_BODY_BINARY_Ev_Iz_RW(or, 0, 0); 4796 4968 } 4797 4969 … … 4805 4977 { 4806 4978 IEMOP_MNEMONIC(adc_Ev_Iz, "adc Ev,Iz"); 4807 IEMOP_BODY_BINARY_Ev_Iz_RW( iemAImpl_adc_u16, iemAImpl_adc_u32, iemAImpl_adc_u64); 4808 IEMOP_BODY_BINARY_Ev_Iz_LOCKED(iemAImpl_adc_u16_locked, iemAImpl_adc_u32_locked, iemAImpl_adc_u64_locked); 4979 IEMOP_BODY_BINARY_Ev_Iz_RW(adc, 0, 0); 4809 4980 } 4810 4981 … … 4818 4989 { 4819 4990 IEMOP_MNEMONIC(sbb_Ev_Iz, "sbb Ev,Iz"); 4820 IEMOP_BODY_BINARY_Ev_Iz_RW( iemAImpl_sbb_u16, iemAImpl_sbb_u32, iemAImpl_sbb_u64); 4821 IEMOP_BODY_BINARY_Ev_Iz_LOCKED(iemAImpl_sbb_u16_locked, iemAImpl_sbb_u32_locked, iemAImpl_sbb_u64_locked); 4991 IEMOP_BODY_BINARY_Ev_Iz_RW(sbb, 0, 0); 4822 4992 } 4823 4993 … … 4831 5001 { 4832 5002 IEMOP_MNEMONIC(and_Ev_Iz, "and Ev,Iz"); 4833 IEMOP_BODY_BINARY_Ev_Iz_RW( iemAImpl_and_u16, iemAImpl_and_u32, iemAImpl_and_u64); 4834 IEMOP_BODY_BINARY_Ev_Iz_LOCKED(iemAImpl_and_u16_locked, iemAImpl_and_u32_locked, iemAImpl_and_u64_locked); 5003 IEMOP_BODY_BINARY_Ev_Iz_RW(and, 0, 0); 4835 5004 } 4836 5005 … … 4844 5013 { 4845 5014 IEMOP_MNEMONIC(sub_Ev_Iz, "sub Ev,Iz"); 4846 IEMOP_BODY_BINARY_Ev_Iz_RW( iemAImpl_sub_u16, iemAImpl_sub_u32, iemAImpl_sub_u64); 4847 IEMOP_BODY_BINARY_Ev_Iz_LOCKED(iemAImpl_sub_u16_locked, iemAImpl_sub_u32_locked, iemAImpl_sub_u64_locked); 5015 IEMOP_BODY_BINARY_Ev_Iz_RW(sub, 0, 0); 4848 5016 } 4849 5017 … … 4857 5025 { 4858 5026 IEMOP_MNEMONIC(xor_Ev_Iz, "xor Ev,Iz"); 4859 IEMOP_BODY_BINARY_Ev_Iz_RW( iemAImpl_xor_u16, iemAImpl_xor_u32, iemAImpl_xor_u64); 4860 IEMOP_BODY_BINARY_Ev_Iz_LOCKED(iemAImpl_xor_u16_locked, iemAImpl_xor_u32_locked, iemAImpl_xor_u64_locked); 5027 IEMOP_BODY_BINARY_Ev_Iz_RW(xor, 0, 0); 4861 5028 } 4862 5029 … … 4870 5037 { 4871 5038 IEMOP_MNEMONIC(cmp_Ev_Iz, "cmp Ev,Iz"); 4872 IEMOP_BODY_BINARY_Ev_Iz_RO( iemAImpl_cmp_u16, iemAImpl_cmp_u32, iemAImpl_cmp_u64);5039 IEMOP_BODY_BINARY_Ev_Iz_RO(cmp, 0); 4873 5040 } 4874 5041 … … 7570 7737 IEMOP_MNEMONIC(test_al_Ib, "test al,Ib"); 7571 7738 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 7572 IEMOP_BODY_BINARY_AL_Ib( iemAImpl_test_u8);7739 IEMOP_BODY_BINARY_AL_Ib(test, 0); 7573 7740 } 7574 7741 … … 7582 7749 IEMOP_MNEMONIC(test_rAX_Iz, "test rAX,Iz"); 7583 7750 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF); 7584 IEMOP_BODY_BINARY_rAX_Iz (iemAImpl_test_u16, iemAImpl_test_u32, iemAImpl_test_u64, 0);7751 IEMOP_BODY_BINARY_rAX_Iz_RO(test, 0); 7585 7752 } 7586 7753 -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp
r103667 r103739 439 439 #define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) IEM_LIVENESS_GPR_MODIFY(a_iGRegEx & 15) 440 440 #define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) IEM_LIVENESS_GPR_MODIFY(a_iGRegEx & 15) 441 #define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGRegEx) IEM_LIVENESS_GPR_INPUT(a_iGRegEx & 15) 441 442 #define IEM_MC_ADD_GREG_U8_TO_LOCAL_THREADED(a_u8Value, a_iGRegEx) IEM_LIVENESS_GPR_INPUT(a_iGRegEx & 15) 442 443 #define IEM_MC_AND_GREG_U8_THREADED(a_iGRegEx, a_u8Value) IEM_LIVENESS_GPR_MODIFY(a_iGRegEx & 15) -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r103613 r103739 118 118 'IEM_MC_FETCH_GREG_U8_ZX_U64_THREADED': (None, False, False, True, ), 119 119 'IEM_MC_REF_GREG_U8_THREADED': (None, True, True, True, ), 120 'IEM_MC_REF_GREG_U8_CONST_THREADED': (None, True, True, True, ), 120 121 121 122 'IEM_MC_REF_EFLAGS_EX': (None, False, False, True, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r103729 r103739 11855 11855 off = iemNativeEmitRefGregU8(pReNative, off, a_pu8Dst, a_iGRegEx, false /*fConst*/) 11856 11856 11857 #define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGReg ) \11857 #define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGRegEx) \ 11858 11858 off = iemNativeEmitRefGregU8(pReNative, off, a_pu8Dst, a_iGRegEx, true /*fConst*/) 11859 11859 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp
r103233 r103739 418 418 /** Variant of IEM_MC_REF_GREG_U8 with extended (20) register index. */ 419 419 #define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \ 420 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) 421 #undef IEM_MC_REF_GREG_U8 422 423 /** Variant of IEM_MC_REF_GREG_U8_CONST with extended (20) register index. */ 424 #define IEM_MC_REF_GREG_U8_CONST_THREADED(a_pu8Dst, a_iGRegEx) \ 420 425 (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) 421 426 #undef IEM_MC_REF_GREG_U8 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h
r103693 r103739 114 114 } 115 115 116 117 /** 118 * Emits one of three opcodes with an immediate. 119 * 120 * These are expected to be a /idxRegReg form. 121 */ 122 DECL_FORCE_INLINE(uint32_t) 123 iemNativeEmitAmd64OneByteModRmInstrRIEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t bOpcode8, uint8_t bOpcodeOtherImm8, 124 uint8_t bOpcodeOther, uint8_t cOpBits, uint8_t cImmBits, uint8_t idxRegReg, 125 uint8_t idxRegRm, uint64_t uImmOp) 126 { 127 Assert(idxRegReg < 8); Assert(idxRegRm < 16); 128 if (cImmBits == 8 || uImmOp <= (uint64_t)0x7f) 129 { 130 switch (cOpBits) 131 { 132 case 16: 133 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 134 RT_FALL_THRU(); 135 case 32: 136 if (idxRegRm >= 8) 137 pCodeBuf[off++] = X86_OP_REX_B; 138 pCodeBuf[off++] = bOpcodeOtherImm8; 139 break; 140 141 default: AssertFailed(); RT_FALL_THRU(); 142 case 64: 143 pCodeBuf[off++] = X86_OP_REX_W | (idxRegRm >= 8 ? X86_OP_REX_B : 0); 144 pCodeBuf[off++] = bOpcodeOtherImm8; 145 break; 146 147 case 8: 148 if (idxRegRm >= 8) 149 pCodeBuf[off++] = X86_OP_REX_B; 150 else if (idxRegRm >= 4) 151 pCodeBuf[off++] = X86_OP_REX; 152 pCodeBuf[off++] = bOpcode8; 153 break; 154 } 155 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegReg, idxRegRm & 7); 156 pCodeBuf[off++] = (uint8_t)uImmOp; 157 } 158 else 159 { 160 switch (cOpBits) 161 { 162 case 32: 163 if (idxRegRm >= 8) 164 pCodeBuf[off++] = X86_OP_REX_B; 165 break; 166 167 default: AssertFailed(); RT_FALL_THRU(); 168 case 64: 169 pCodeBuf[off++] = X86_OP_REX_W | (idxRegRm >= 8 ? X86_OP_REX_B : 0); 170 break; 171 172 case 16: 173 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP; 174 if (idxRegRm >= 8) 175 pCodeBuf[off++] = X86_OP_REX_B; 176 pCodeBuf[off++] = bOpcodeOther; 177 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegReg, idxRegRm & 7); 178 pCodeBuf[off++] = RT_BYTE1(uImmOp); 179 pCodeBuf[off++] = RT_BYTE2(uImmOp); 180 Assert(cImmBits == 16); 181 return off; 182 } 183 pCodeBuf[off++] = bOpcodeOther; 184 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegReg, idxRegRm & 7); 185 pCodeBuf[off++] = RT_BYTE1(uImmOp); 186 pCodeBuf[off++] = RT_BYTE2(uImmOp); 187 pCodeBuf[off++] = RT_BYTE3(uImmOp); 188 pCodeBuf[off++] = RT_BYTE4(uImmOp); 189 Assert(cImmBits == 32); 190 } 191 return off; 192 } 193 116 194 #endif /* RT_ARCH_AMD64 */ 117 195 … … 215 293 #ifndef RT_ARCH_AMD64 216 294 , uint8_t cOpBits, uint8_t idxRegResult, uint8_t idxRegDstIn, uint8_t idxRegSrc 217 , bool fInvertCarry 295 , bool fInvertCarry, uint64_t uImmSrc 218 296 #endif 219 297 ) … … 286 364 pCodeBuf[off++] = Armv8A64MkInstrBfxil(idxRegEfl, idxRegResult, cOpBits, 1, false /*f64Bit*/); 287 365 288 /* The overflow flag is more work. See IEM_EFL_UPDATE_STATUS_BITS_FOR_ARITHMETIC. */ 289 if (fInvertCarry) /* sbb: ~((a_uDst) ^ ~(a_uSrcOf)) -> (a_uDst) ^ (a_uSrcOf); HACK ALERT: fInvertCarry == sbb */ 290 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxRegDstIn, idxRegSrc, false); 291 else /* adc: ~((a_uDst) ^ (a_uSrcOf)) -> (a_uDst) ^ ~(a_uSrcOf) */ 292 pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg, idxRegDstIn, idxRegSrc, false); 293 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg2, idxRegDstIn, idxRegResult, false); /* (a_uDst) ^ (a_uResult) */ 294 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpReg, idxTmpReg, idxTmpReg2, false /*f64Bit*/); 366 /* The overflow flag is more work. See IEM_EFL_UPDATE_STATUS_BITS_FOR_ARITHMETIC. 367 It is a bit simpler when the right side is constant. */ 368 if (idxRegSrc != UINT8_MAX) 369 { 370 if (fInvertCarry) /* sbb: ~((a_uDst) ^ ~(a_uSrcOf)) -> (a_uDst) ^ (a_uSrcOf); HACK ALERT: fInvertCarry == sbb */ 371 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxRegDstIn, idxRegSrc, false); 372 else /* adc: ~((a_uDst) ^ (a_uSrcOf)) -> (a_uDst) ^ ~(a_uSrcOf) */ 373 pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg, idxRegDstIn, idxRegSrc, false); 374 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg2, idxRegDstIn, idxRegResult, false); /* (a_uDst) ^ (a_uResult) */ 375 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpReg, idxTmpReg, idxTmpReg2, false /*f64Bit*/); 376 } 377 else if (uImmSrc & RT_BIT_32(cOpBits - 1)) 378 { 379 if (fInvertCarry) /* sbb w/ top right 1: ~a_uDst & a_uResult ; HACK ALERT: fInvertCarry == sbb */ 380 pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg, idxRegResult, idxRegDstIn, false); 381 else /* adc w/ top right 1: a_uDst & ~a_uResult */ 382 pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg, idxRegDstIn, idxRegResult, false); 383 } 384 else 385 { 386 if (fInvertCarry) /* sbb w/ top right 0: a_uDst & ~a_uResult ; HACK ALERT: fInvertCarry == sbb */ 387 pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg, idxRegDstIn, idxRegResult, false); 388 else /* adc w/ top right 0: ~a_uDst & a_uResult */ 389 pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg, idxRegResult, idxRegDstIn, false); 390 } 295 391 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, cOpBits - 1, false /*f64Bit*/); 296 392 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_OF_BIT, 1); … … 309 405 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_PF_BIT, 1, false /*f64Bit*/); 310 406 311 /* Calculate auxilary carry/borrow. This is related to 8-bit BCD.*/ 312 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxRegDstIn, idxRegSrc, false /*f64Bit*/); 313 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxTmpReg, idxRegResult, false /*f64Bit*/); 407 /* Calculate auxilary carry/borrow. This is related to 8-bit BCD. 408 General formula: ((uint32_t)(a_uResult) ^ (uint32_t)(a_uSrc) ^ (uint32_t)(a_uDst)) & X86_EFL_AF; 409 S D R 410 0 0 0 -> 0; \ 411 0 0 1 -> 1; \ regular 412 0 1 0 -> 1; / xor R, D 413 0 1 1 -> 0; / 414 1 0 0 -> 1; \ 415 1 0 1 -> 0; \ invert one of the two 416 1 1 0 -> 0; / xor not(R), D 417 1 1 1 -> 1; / 418 a_uSrc[bit 4]=0: ((uint32_t)(a_uResult) ^ (uint32_t)(a_uDst)) & X86_EFL_AF; 419 a_uSrc[bit 4]=1: ((uint32_t)~(a_uResult) ^ (uint32_t)(a_uDst)) & X86_EFL_AF; 420 */ 421 422 if (idxRegSrc != UINT8_MAX) 423 { 424 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxRegDstIn, idxRegSrc, false /*f64Bit*/); 425 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxTmpReg, idxRegResult, false /*f64Bit*/); 426 } 427 else if (uImmSrc & X86_EFL_AF) 428 pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg, idxRegDstIn, idxRegResult, false /*f64Bit*/); 429 else 430 pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg, idxRegDstIn, idxRegResult, false /*f64Bit*/); 314 431 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, X86_EFL_AF_BIT, false /*f64Bit*/); 315 432 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_AF_BIT, 1, false /*f64Bit*/); … … 359 476 off = iemNativeEmitEFlagsForLogical(pReNative, off, idxVarEfl, cOpBits, idxRegDst); 360 477 iemNativeVarRegisterRelease(pReNative, idxVarDst); 478 return off; 479 } 480 481 482 /** 483 * The AND instruction with immediate value as right operand. 484 */ 485 DECL_INLINE_THROW(uint32_t) 486 iemNativeEmit_and_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 487 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 488 { 489 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 361 490 return off; 362 491 } … … 408 537 409 538 /** 539 * The TEST instruction with immediate value as right operand. 540 */ 541 DECL_INLINE_THROW(uint32_t) 542 iemNativeEmit_test_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 543 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 544 { 545 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 546 return off; 547 } 548 549 550 /** 410 551 * The OR instruction will clear OF, CF and AF (latter is undefined) and 411 552 * set the other flags according to the result. … … 441 582 442 583 /** 584 * The OR instruction with immediate value as right operand. 585 */ 586 DECL_INLINE_THROW(uint32_t) 587 iemNativeEmit_or_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 588 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 589 { 590 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 591 return off; 592 } 593 594 595 /** 443 596 * The XOR instruction will clear OF, CF and AF (latter is undefined) and 444 597 * set the other flags according to the result. … … 469 622 off = iemNativeEmitEFlagsForLogical(pReNative, off, idxVarEfl, cOpBits, idxRegDst); 470 623 iemNativeVarRegisterRelease(pReNative, idxVarDst); 624 return off; 625 } 626 627 628 /** 629 * The XOR instruction with immediate value as right operand. 630 */ 631 DECL_INLINE_THROW(uint32_t) 632 iemNativeEmit_xor_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 633 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 634 { 635 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 471 636 return off; 472 637 } … … 519 684 520 685 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst, 521 idxRegDstIn, idxRegSrc, false /*fInvertCarry*/ );686 idxRegDstIn, idxRegSrc, false /*fInvertCarry*/, 0); 522 687 523 688 iemNativeRegFreeTmp(pReNative, idxRegDstIn); 524 689 iemNativeVarRegisterRelease(pReNative, idxVarSrc); 525 690 iemNativeVarRegisterRelease(pReNative, idxVarDst); 691 692 #else 693 # error "port me" 694 #endif 695 return off; 696 } 697 698 699 /** 700 * The ADD instruction with immediate value as right operand. 701 */ 702 DECL_INLINE_THROW(uint32_t) 703 iemNativeEmit_add_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 704 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 705 { 706 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); 707 708 #ifdef RT_ARCH_AMD64 709 /* On AMD64 we just use the correctly sized ADD instruction to get the right EFLAGS.SF value. */ 710 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 711 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 0, idxRegDst, uImmOp); 712 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 713 714 iemNativeVarRegisterRelease(pReNative, idxVarDst); 715 716 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX); 717 718 #elif defined(RT_ARCH_ARM64) 719 /* On ARM64 we'll need the two input operands as well as the result in order 720 to calculate the right flags, even if we use ADDS and translates NZCV into 721 OF, CF, ZF and SF. */ 722 uint8_t const idxRegDstIn = iemNativeRegAllocTmp(pReNative, &off); 723 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 724 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst); 725 if (cOpBits >= 32) 726 { 727 if (uImmOp <= 0xfffU) 728 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 729 else if (uImmOp <= 0xfff000U && !(uImmOp & 0xfff)) 730 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/, 731 true /*fShift12*/); 732 else 733 { 734 uint8_t const idxTmpImmReg = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 735 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 736 pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxRegDst, idxTmpImmReg, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 737 iemNativeRegFreeTmpImm(pReNative, idxTmpImmReg); 738 } 739 } 740 else 741 { 742 /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */ 743 uint32_t const cShift = 32 - cOpBits; 744 uint8_t const idxTmpImmReg = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp << cShift); 745 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 746 pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxTmpImmReg, idxRegDstIn, false /*f64Bit*/, true /*fSetFlags*/, cShift); 747 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDst, idxRegDst, cShift, false /*f64Bit*/); 748 cOpBits = 32; 749 iemNativeRegFreeTmpImm(pReNative, idxTmpImmReg); 750 } 751 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 752 753 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst, 754 idxRegDstIn, UINT8_MAX, false /*fInvertCarry*/, uImmOp); 755 756 iemNativeRegFreeTmp(pReNative, idxRegDstIn); 757 iemNativeVarRegisterRelease(pReNative, idxVarDst); 758 RT_NOREF(cImmBits); 526 759 527 760 #else … … 581 814 582 815 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst, 583 idxRegDstIn, idxRegSrc, false /*fInvertCarry*/ );816 idxRegDstIn, idxRegSrc, false /*fInvertCarry*/, 0); 584 817 585 818 iemNativeRegFreeTmp(pReNative, idxRegDstIn); … … 593 826 #endif 594 827 iemNativeVarRegisterRelease(pReNative, idxVarEfl); 828 return off; 829 } 830 831 832 /** 833 * The ADC instruction with immediate value as right operand. 834 */ 835 DECL_INLINE_THROW(uint32_t) 836 iemNativeEmit_adc_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 837 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 838 { 839 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 595 840 return off; 596 841 } … … 643 888 644 889 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst, 645 idxRegDstIn, idxRegSrc, true /*fInvertCarry*/ );890 idxRegDstIn, idxRegSrc, true /*fInvertCarry*/, 0); 646 891 647 892 iemNativeRegFreeTmp(pReNative, idxRegDstIn); … … 652 897 # error "port me" 653 898 #endif 899 return off; 900 } 901 902 903 /** 904 * The SUB instruction with immediate value as right operand. 905 */ 906 DECL_INLINE_THROW(uint32_t) 907 iemNativeEmit_sub_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 908 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 909 { 910 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 654 911 return off; 655 912 } … … 698 955 699 956 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegResult, 700 idxRegDst, idxRegSrc, true /*fInvertCarry*/ );957 idxRegDst, idxRegSrc, true /*fInvertCarry*/, 0); 701 958 702 959 iemNativeRegFreeTmp(pReNative, idxRegResult); … … 707 964 # error "port me" 708 965 #endif 966 return off; 967 } 968 969 970 /** 971 * The CMP instruction with immediate value as right operand. 972 */ 973 DECL_INLINE_THROW(uint32_t) 974 iemNativeEmit_cmp_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 975 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 976 { 977 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 709 978 return off; 710 979 } … … 762 1031 763 1032 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst, 764 idxRegDstIn, idxRegSrc, true /*fInvertCarry*/ );1033 idxRegDstIn, idxRegSrc, true /*fInvertCarry*/, 0); 765 1034 766 1035 iemNativeRegFreeTmp(pReNative, idxRegDstIn); … … 774 1043 #endif 775 1044 iemNativeVarRegisterRelease(pReNative, idxVarEfl); 1045 return off; 1046 } 1047 1048 1049 /** 1050 * The SBB instruction with immediate value as right operand. 1051 */ 1052 DECL_INLINE_THROW(uint32_t) 1053 iemNativeEmit_sbb_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1054 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits) 1055 { 1056 RT_NOREF(pReNative, off, idxVarDst, uImmOp, idxVarEfl, cOpBits, cImmBits); 776 1057 return off; 777 1058 } -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r103737 r103739 918 918 #if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8) 919 919 /** 920 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}920 * @callback_method_impl{FNDBGFINFOARGVINT, tb} 921 921 */ 922 922 static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs) -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r103728 r103739 40 40 /** @def IEMNATIVE_WITH_TB_DEBUG_INFO 41 41 * Enables generating internal debug info for better TB disassembly dumping. */ 42 #if defined(DEBUG) || defined(DOXYGEN_RUNNING) 42 #if defined(DEBUG) || defined(DOXYGEN_RUNNING) || 1 43 43 # define IEMNATIVE_WITH_TB_DEBUG_INFO 44 44 #endif
Note:
See TracChangeset
for help on using the changeset viewer.