Changeset 106199 in vbox for trunk/src/VBox
- Timestamp:
- Oct 1, 2024 11:08:47 PM (6 months ago)
- svn:sync-xref-src-repo-rev:
- 164983
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r106179 r106199 779 779 IEM_MC_LOCAL(uint32_t, uEFlags); \ 780 780 IEM_MC_FETCH_EFLAGS(uEFlags); \ 781 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \781 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(8, 8), u8Dst, u8Imm, uEFlags); \ 782 782 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Dst); \ 783 783 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 810 810 IEM_MC_LOCAL(uint32_t, uEFlags); \ 811 811 IEM_MC_FETCH_EFLAGS(uEFlags); \ 812 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \812 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 16), u16Dst, u16Imm, uEFlags); \ 813 813 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Dst); \ 814 814 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 835 835 IEM_MC_LOCAL(uint32_t, uEFlags); \ 836 836 IEM_MC_FETCH_EFLAGS(uEFlags); \ 837 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \837 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 32), u32Dst, u32Imm, uEFlags); \ 838 838 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Dst); \ 839 839 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 861 861 IEM_MC_LOCAL(uint32_t, uEFlags); \ 862 862 IEM_MC_FETCH_EFLAGS(uEFlags); \ 863 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \863 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 32), u64Dst, u64Imm, uEFlags); \ 864 864 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Dst); \ 865 865 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 897 897 IEM_MC_LOCAL(uint32_t, uEFlags); \ 898 898 IEM_MC_FETCH_EFLAGS(uEFlags); \ 899 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \899 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 16), u16Dst, u16Imm, uEFlags); \ 900 900 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 901 901 } IEM_MC_NATIVE_ELSE() { \ … … 921 921 IEM_MC_LOCAL(uint32_t, uEFlags); \ 922 922 IEM_MC_FETCH_EFLAGS(uEFlags); \ 923 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \923 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 32), u32Dst, u32Imm, uEFlags); \ 924 924 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 925 925 } IEM_MC_NATIVE_ELSE() { \ … … 945 945 IEM_MC_LOCAL(uint32_t, uEFlags); \ 946 946 IEM_MC_FETCH_EFLAGS(uEFlags); \ 947 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \947 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 32), u64Dst, u64Imm, uEFlags); \ 948 948 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 949 949 } IEM_MC_NATIVE_ELSE() { \ … … 4409 4409 IEM_MC_FETCH_GREG_U8(u8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4410 4410 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 4411 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \4411 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(8, 8), u8Dst, u8Imm, uEFlags); \ 4412 4412 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_RM(pVCpu, bRm), u8Dst); \ 4413 4413 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 4480 4480 IEM_MC_FETCH_GREG_U8(u8Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 4481 4481 IEM_MC_LOCAL_EFLAGS(uEFlags); \ 4482 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \4482 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(8, 8), u8Dst, u8Imm, uEFlags); \ 4483 4483 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4484 4484 } IEM_MC_NATIVE_ELSE() { \ … … 4507 4507 IEM_MC_FETCH_MEM_U8(u8Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 4508 4508 IEM_MC_LOCAL_EFLAGS(uEFlags); \ 4509 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \4509 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(8, 8), u8Dst, u8Imm, uEFlags); \ 4510 4510 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4511 4511 } IEM_MC_NATIVE_ELSE() { \ … … 4668 4668 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4669 4669 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4670 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \4670 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 16), u16Dst, u16Imm, uEFlags); \ 4671 4671 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_RM(pVCpu, bRm), u16Dst); \ 4672 4672 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 4694 4694 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4695 4695 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4696 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \4696 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 32), u32Dst, u32Imm, uEFlags); \ 4697 4697 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_RM(pVCpu, bRm), u32Dst); \ 4698 4698 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 4721 4721 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4722 4722 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4723 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \4723 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 32), u64Dst, u64Imm, uEFlags); \ 4724 4724 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm), u64Dst); \ 4725 4725 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 4922 4922 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4923 4923 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4924 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \4924 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 16), u16Dst, u16Imm, uEFlags); \ 4925 4925 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4926 4926 } IEM_MC_NATIVE_ELSE() { \ … … 4947 4947 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4948 4948 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4949 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \4949 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 32), u32Dst, u32Imm, uEFlags); \ 4950 4950 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4951 4951 } IEM_MC_NATIVE_ELSE() { \ … … 4972 4972 IEM_MC_LOCAL(uint32_t, uEFlags); \ 4973 4973 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4974 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \4974 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 32), u64Dst, u64Imm, uEFlags); \ 4975 4975 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4976 4976 } IEM_MC_NATIVE_ELSE() { \ … … 5008 5008 IEM_MC_FETCH_MEM_U16(u16Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5009 5009 IEM_MC_LOCAL_EFLAGS(uEFlags); \ 5010 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \5010 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 16), u16Dst, u16Imm, uEFlags); \ 5011 5011 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5012 5012 } IEM_MC_NATIVE_ELSE() { \ … … 5036 5036 IEM_MC_FETCH_MEM_U32(u32Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5037 5037 IEM_MC_LOCAL_EFLAGS(uEFlags); \ 5038 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \5038 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 32), u32Dst, u32Imm, uEFlags); \ 5039 5039 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5040 5040 } IEM_MC_NATIVE_ELSE() { \ … … 5064 5064 IEM_MC_FETCH_MEM_U64(u64Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5065 5065 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 5066 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \5066 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 32), u64Dst, u64Imm, uEFlags); \ 5067 5067 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5068 5068 } IEM_MC_NATIVE_ELSE() { \ … … 5243 5243 IEM_MC_LOCAL(uint32_t, uEFlags); \ 5244 5244 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5245 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags, 16, 8); \5245 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 8), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags); \ 5246 5246 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_RM(pVCpu, bRm), u16Dst); \ 5247 5247 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 5266 5266 IEM_MC_LOCAL(uint32_t, uEFlags); \ 5267 5267 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5268 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags, 32, 8); \5268 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 8), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags); \ 5269 5269 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_RM(pVCpu, bRm), u32Dst); \ 5270 5270 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 5290 5290 IEM_MC_LOCAL(uint32_t, uEFlags); \ 5291 5291 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5292 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags, 64, 8); \5292 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 8), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags); \ 5293 5293 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm), u64Dst); \ 5294 5294 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ … … 5479 5479 IEM_MC_FETCH_GREG_U16(u16Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 5480 5480 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 5481 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags, 16, 8); \5481 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 8), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags); \ 5482 5482 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5483 5483 } IEM_MC_NATIVE_ELSE() { \ … … 5500 5500 IEM_MC_FETCH_GREG_U32(u32Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 5501 5501 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 5502 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags, 32, 8); \5502 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 8), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags); \ 5503 5503 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5504 5504 } IEM_MC_NATIVE_ELSE() { \ … … 5521 5521 IEM_MC_FETCH_GREG_U64(u64Dst, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 5522 5522 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 5523 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags, 64, 8); \5523 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 8), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags); \ 5524 5524 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5525 5525 } IEM_MC_NATIVE_ELSE() { \ … … 5557 5557 IEM_MC_FETCH_MEM_U16(u16Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5558 5558 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 5559 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags, 16, 8); \5559 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(16, 8), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags); \ 5560 5560 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5561 5561 } IEM_MC_NATIVE_ELSE() { \ … … 5583 5583 IEM_MC_FETCH_MEM_U32(u32Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5584 5584 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 5585 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags, 32, 8); \5585 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(32, 8), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags); \ 5586 5586 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5587 5587 } IEM_MC_NATIVE_ELSE() { \ … … 5609 5609 IEM_MC_FETCH_MEM_U64(u64Dst, pVCpu->iem.s.iEffSeg, GCPtrEffDst); \ 5610 5610 IEM_MC_LOCAL_EFLAGS( uEFlags); \ 5611 IEM_MC_NATIVE_EMIT_ 5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags, 64, 8); \5611 IEM_MC_NATIVE_EMIT_3(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl)IEM_TEMPL_ARG_2(64, 8), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags); \ 5612 5612 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5613 5613 } IEM_MC_NATIVE_ELSE() { \ -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h
r106198 r106199 873 873 iemNativeVarRegisterRelease(pReNative, idxVarSrc); 874 874 875 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst , true /*fNativeFlags*/);875 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst); 876 876 #else 877 877 # error "Port me" … … 885 885 * The AND instruction with immediate value as right operand. 886 886 */ 887 DECL_INLINE_THROW(uint32_t) 888 iemNativeEmit_and_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 889 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)887 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 888 DECL_INLINE_THROW(uint32_t) 889 iemNativeEmit_and_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 890 890 { 891 891 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 893 893 /* On AMD64 we just use the correctly sized AND instruction harvest the EFLAGS. */ 894 894 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 895 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 4, idxRegDst, uImmOp);896 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 897 898 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);895 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 4, idxRegDst, uImmOp); 896 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 897 898 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst); 899 899 900 900 #elif defined(RT_ARCH_ARM64) … … 902 902 course the immediate variant when possible to save a register load. */ 903 903 uint32_t uImmSizeLen, uImmRotations; 904 if ( cOpBits > 32904 if ( a_cOpBits > 32 905 905 ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations) 906 906 : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)) 907 907 { 908 908 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 909 if ( cOpBits >= 32)910 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);909 if (a_cOpBits >= 32) 910 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/); 911 911 else 912 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);912 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/); 913 913 } 914 914 else … … 916 916 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 917 917 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 918 if (cOpBits >= 32)919 pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);918 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 919 pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/); 920 920 else 921 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);921 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/); 922 922 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 923 923 } 924 924 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 925 925 926 if (cOpBits >= 32) 927 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst); 928 else 929 off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, cOpBits, idxRegDst); 930 RT_NOREF_PV(cImmBits); 926 off = iemNativeEmitEFlagsForLogical<a_cOpBits < 32>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst); 931 927 932 928 #else … … 989 985 * The TEST instruction with immediate value as right operand. 990 986 */ 991 DECL_INLINE_THROW(uint32_t) 992 iemNativeEmit_test_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 993 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)987 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 988 DECL_INLINE_THROW(uint32_t) 989 iemNativeEmit_test_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 994 990 { 995 991 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 997 993 /* On AMD64 we just use the correctly sized AND instruction harvest the EFLAGS. */ 998 994 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 999 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0xf6, 0xcc, 0xf7, cOpBits,cImmBits, 0, idxRegDst, uImmOp);995 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0xf6, 0xcc, 0xf7, a_cOpBits, a_cImmBits, 0, idxRegDst, uImmOp); 1000 996 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1001 997 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1002 998 1003 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, UINT8_MAX);999 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, UINT8_MAX); 1004 1000 1005 1001 #elif defined(RT_ARCH_ARM64) … … 1009 1005 uint8_t const idxRegResult = iemNativeRegAllocTmp(pReNative, &off); 1010 1006 uint32_t uImmSizeLen, uImmRotations; 1011 if ( cOpBits > 321007 if ( a_cOpBits > 32 1012 1008 ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations) 1013 1009 : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)) 1014 1010 { 1015 1011 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1016 if (cOpBits >= 32)1017 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);1012 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 1013 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/); 1018 1014 else 1019 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);1015 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/); 1020 1016 } 1021 1017 else … … 1023 1019 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1024 1020 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1025 if (cOpBits >= 32)1026 pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegResult, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);1021 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 1022 pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegResult, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/); 1027 1023 else 1028 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegResult, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);1024 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegResult, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/); 1029 1025 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1030 1026 } … … 1032 1028 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1033 1029 1034 if (cOpBits >= 32) 1035 off = iemNativeEmitEFlagsForLogical<>(pReNative, off, idxVarEfl, cOpBits, idxRegResult); 1036 else 1037 off = iemNativeEmitEFlagsForLogical<>(pReNative, off, idxVarEfl, cOpBits, idxRegResult); 1030 off = iemNativeEmitEFlagsForLogical<a_cOpBits < 32>(pReNative, off, idxVarEfl, a_cOpBits, idxRegResult); 1038 1031 1039 1032 iemNativeRegFreeTmp(pReNative, idxRegResult); 1040 RT_NOREF_PV(cImmBits);1041 1033 1042 1034 #else … … 1086 1078 * The OR instruction with immediate value as right operand. 1087 1079 */ 1088 DECL_INLINE_THROW(uint32_t) 1089 iemNativeEmit_or_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1090 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)1080 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 1081 DECL_INLINE_THROW(uint32_t) 1082 iemNativeEmit_or_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 1091 1083 { 1092 1084 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 1094 1086 /* On AMD64 we just use the correctly sized OR instruction harvest the EFLAGS. */ 1095 1087 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1096 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 1, idxRegDst, uImmOp);1097 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1098 1099 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);1088 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 1, idxRegDst, uImmOp); 1089 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1090 1091 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst); 1100 1092 1101 1093 #elif defined(RT_ARCH_ARM64) … … 1103 1095 course the immediate variant when possible to save a register load. */ 1104 1096 uint32_t uImmSizeLen, uImmRotations; 1105 if ( cOpBits > 321097 if ( a_cOpBits > 32 1106 1098 ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations) 1107 1099 : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)) 1108 1100 { 1109 1101 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1110 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);1102 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/); 1111 1103 } 1112 1104 else … … 1114 1106 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1115 1107 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1116 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);1108 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/); 1117 1109 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1118 1110 } 1119 1111 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1120 1112 1121 off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, cOpBits, idxRegDst); 1122 RT_NOREF_PV(cImmBits); 1113 off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst); 1123 1114 1124 1115 #else … … 1169 1160 * The XOR instruction with immediate value as right operand. 1170 1161 */ 1171 DECL_INLINE_THROW(uint32_t) 1172 iemNativeEmit_xor_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1173 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)1162 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 1163 DECL_INLINE_THROW(uint32_t) 1164 iemNativeEmit_xor_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 1174 1165 { 1175 1166 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 1177 1168 /* On AMD64 we just use the correctly sized XOR instruction harvest the EFLAGS. */ 1178 1169 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1179 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 6, idxRegDst, uImmOp);1180 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1181 1182 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);1170 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 6, idxRegDst, uImmOp); 1171 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1172 1173 off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst); 1183 1174 1184 1175 #elif defined(RT_ARCH_ARM64) … … 1186 1177 course the immediate variant when possible to save a register load. */ 1187 1178 uint32_t uImmSizeLen, uImmRotations; 1188 if ( cOpBits > 321179 if ( a_cOpBits > 32 1189 1180 ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations) 1190 1181 : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)) 1191 1182 { 1192 1183 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1193 pCodeBuf[off++] = Armv8A64MkInstrEorImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);1184 pCodeBuf[off++] = Armv8A64MkInstrEorImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/); 1194 1185 } 1195 1186 else … … 1197 1188 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1198 1189 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1199 pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);1190 pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/); 1200 1191 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1201 1192 } 1202 1193 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1203 1194 1204 off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, cOpBits, idxRegDst); 1205 RT_NOREF_PV(cImmBits); 1195 off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst); 1206 1196 1207 1197 #else … … 1280 1270 * The ADD instruction with immediate value as right operand. 1281 1271 */ 1282 DECL_INLINE_THROW(uint32_t) 1283 iemNativeEmit_add_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1284 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)1272 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 1273 DECL_INLINE_THROW(uint32_t) 1274 iemNativeEmit_add_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 1285 1275 { 1286 1276 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 1289 1279 /* On AMD64 we just use the correctly sized ADD instruction to get the right EFLAGS.SF value. */ 1290 1280 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1291 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 0, idxRegDst, uImmOp);1281 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 0, idxRegDst, uImmOp); 1292 1282 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1293 1283 … … 1303 1293 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1304 1294 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst); 1305 if (cOpBits >= 32)1295 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 1306 1296 { 1307 1297 if (uImmOp <= 0xfffU) 1308 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 1298 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp, a_cOpBits > 32 /*f64Bit*/, 1299 true /*fSetFlags*/); 1309 1300 else if (uImmOp <= 0xfff000U && !(uImmOp & 0xfff)) 1310 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp >> 12, cOpBits > 32 /*f64Bit*/,1301 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp >> 12, a_cOpBits > 32 /*f64Bit*/, 1311 1302 true /*fSetFlags*/, true /*fShift12*/); 1312 1303 else … … 1314 1305 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1315 1306 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1316 pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 1307 pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/, 1308 true /*fSetFlags*/); 1317 1309 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1318 1310 } … … 1321 1313 { 1322 1314 /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */ 1323 uint32_t const cShift = 32 - cOpBits;1315 uint32_t const cShift = 32 - a_cOpBits; 1324 1316 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp << cShift); 1325 1317 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 1326 1318 pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxRegTmpImm, idxRegDstIn, false /*f64Bit*/, true /*fSetFlags*/, cShift); 1327 1319 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDst, idxRegDst, cShift, false /*f64Bit*/); 1328 cOpBits = 32;1329 1320 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1330 1321 } 1331 1322 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1332 1323 1333 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst,1324 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, a_cOpBits > 32 ? a_cOpBits : 32, idxRegDst, 1334 1325 idxRegDstIn, UINT8_MAX, false /*fInvertCarry*/, uImmOp); 1335 1326 1336 1327 iemNativeRegFreeTmp(pReNative, idxRegDstIn); 1337 1328 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1338 RT_NOREF(cImmBits);1339 1329 1340 1330 #else … … 1413 1403 * The ADC instruction with immediate value as right operand. 1414 1404 */ 1415 DECL_INLINE_THROW(uint32_t) 1416 iemNativeEmit_adc_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1417 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)1405 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 1406 DECL_INLINE_THROW(uint32_t) 1407 iemNativeEmit_adc_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 1418 1408 { 1419 1409 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 1428 1418 pCodeBuf[off++] = X86_EFL_CF_BIT; 1429 1419 1430 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 2, idxRegDst, uImmOp);1420 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 2, idxRegDst, uImmOp); 1431 1421 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1432 1422 … … 1445 1435 pCodeBuf[off++] = Armv8A64MkInstrRmif(idxRegEfl, (X86_EFL_CF_BIT - 1) & 63, RT_BIT_32(1) /*fMask=C*/); 1446 1436 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst); 1447 if (cOpBits >= 32)1448 pCodeBuf[off++] = Armv8A64MkInstrAdcs(idxRegDst, idxRegDst, idxRegImm, cOpBits > 32 /*f64Bit*/);1437 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 1438 pCodeBuf[off++] = Armv8A64MkInstrAdcs(idxRegDst, idxRegDst, idxRegImm, a_cOpBits > 32 /*f64Bit*/); 1449 1439 else 1450 1440 { … … 1452 1442 doesn't work. So, we have to calculate carry & overflow manually. */ 1453 1443 pCodeBuf[off++] = Armv8A64MkInstrAdc(idxRegDst, idxRegDst, idxRegImm, false /*f64Bit*/); 1454 pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, cOpBits > 8); /* NZ are okay, CV aren't.*/1444 pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, a_cOpBits > 8); /* NZ are okay, CV aren't.*/ 1455 1445 } 1456 1446 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); … … 1458 1448 iemNativeRegFreeTmp(pReNative, idxRegImm); 1459 1449 1460 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst,1450 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, a_cOpBits, idxRegDst, 1461 1451 idxRegDstIn, UINT8_MAX, false /*fInvertCarry*/, uImmOp); 1462 1452 1463 1453 iemNativeRegFreeTmp(pReNative, idxRegDstIn); 1464 if (cOpBits < 32)1465 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32( cOpBits) - 1U);1454 if RT_CONSTEXPR_IF(a_cOpBits < 32) 1455 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(a_cOpBits) - 1U); 1466 1456 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1467 RT_NOREF(cImmBits);1468 1457 1469 1458 #else … … 1537 1526 * The SUB instruction with immediate value as right operand. 1538 1527 */ 1539 DECL_INLINE_THROW(uint32_t) 1540 iemNativeEmit_sub_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1541 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)1528 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 1529 DECL_INLINE_THROW(uint32_t) 1530 iemNativeEmit_sub_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 1542 1531 { 1543 1532 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 1546 1535 /* On AMD64 we just use the correctly sized SUB instruction to get the right EFLAGS.SF value. */ 1547 1536 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1548 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 5, idxRegDst, uImmOp);1537 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 5, idxRegDst, uImmOp); 1549 1538 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1550 1539 … … 1560 1549 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1561 1550 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst); 1562 if (cOpBits >= 32)1551 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 1563 1552 { 1564 1553 if (uImmOp <= 0xfffU) 1565 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 1554 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp, a_cOpBits > 32 /*f64Bit*/, 1555 true /*fSetFlags*/); 1566 1556 else if (uImmOp <= 0xfff000U && !(uImmOp & 0xfff)) 1567 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp >> 12, cOpBits > 32 /*f64Bit*/,1557 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp >> 12, a_cOpBits > 32 /*f64Bit*/, 1568 1558 true /*fSetFlags*/, true /*fShift12*/); 1569 1559 else … … 1571 1561 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1572 1562 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1573 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 1563 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/, 1564 true /*fSetFlags*/); 1574 1565 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1575 1566 } … … 1578 1569 { 1579 1570 /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */ 1580 uint32_t const cShift = 32 - cOpBits;1571 uint32_t const cShift = 32 - a_cOpBits; 1581 1572 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1582 1573 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); … … 1585 1576 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDstIn, idxRegDstIn, cShift, false /*f64Bit*/); 1586 1577 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDst, idxRegDst, cShift, false /*f64Bit*/); 1587 cOpBits = 32;1588 1578 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1589 1579 } 1590 1580 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1591 1581 1592 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst,1582 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, a_cOpBits > 32 ? a_cOpBits : 32, idxRegDst, 1593 1583 idxRegDstIn, UINT8_MAX, true /*fInvertCarry*/, uImmOp); 1594 1584 1595 1585 iemNativeRegFreeTmp(pReNative, idxRegDstIn); 1596 1586 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1597 RT_NOREF(cImmBits);1598 1587 1599 1588 #else … … 1662 1651 * The CMP instruction with immediate value as right operand. 1663 1652 */ 1664 DECL_INLINE_THROW(uint32_t) 1665 iemNativeEmit_cmp_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1666 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)1653 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 1654 DECL_INLINE_THROW(uint32_t) 1655 iemNativeEmit_cmp_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 1667 1656 { 1668 1657 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 1671 1660 /* On AMD64 we just use the correctly sized CMP instruction to get the right EFLAGS.SF value. */ 1672 1661 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1673 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 7, idxRegDst, uImmOp);1662 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 7, idxRegDst, uImmOp); 1674 1663 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1675 1664 … … 1684 1673 uint8_t const idxRegResult = iemNativeRegAllocTmp(pReNative, &off); 1685 1674 PIEMNATIVEINSTR pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8); 1686 if (cOpBits >= 32)1675 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 1687 1676 { 1688 1677 if (uImmOp <= 0xfffU) 1689 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 1678 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp, a_cOpBits > 32 /*f64Bit*/, 1679 true /*fSetFlags*/); 1690 1680 else if (uImmOp <= 0xfff000U && !(uImmOp & 0xfff)) 1691 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp >> 12, cOpBits > 32 /*f64Bit*/,1681 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp >> 12, a_cOpBits > 32 /*f64Bit*/, 1692 1682 true /*fSetFlags*/, true /*fShift12*/); 1693 1683 else … … 1695 1685 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1696 1686 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1697 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 1687 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/, 1688 true /*fSetFlags*/); 1698 1689 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1699 1690 } … … 1702 1693 { 1703 1694 /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */ 1704 uint32_t const cShift = 32 - cOpBits;1695 uint32_t const cShift = 32 - a_cOpBits; 1705 1696 uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp); 1706 1697 pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); … … 1708 1699 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegResult, idxRegTmpImm, false /*f64Bit*/, true /*fSetFlags*/, cShift); 1709 1700 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegResult, idxRegResult, cShift, false /*f64Bit*/); 1710 cOpBits = 32;1711 1701 iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm); 1712 1702 } 1713 1703 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1714 1704 1715 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegResult,1705 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, a_cOpBits > 32 ? a_cOpBits : 32, idxRegResult, 1716 1706 idxRegDst, UINT8_MAX, true /*fInvertCarry*/, uImmOp); 1717 1707 1718 1708 iemNativeRegFreeTmp(pReNative, idxRegResult); 1719 1709 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1720 RT_NOREF(cImmBits);1721 1710 1722 1711 #else … … 1796 1785 * The SBB instruction with immediate value as right operand. 1797 1786 */ 1798 DECL_INLINE_THROW(uint32_t) 1799 iemNativeEmit_sbb_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 1800 uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)1787 template<uint8_t const a_cOpBits, uint8_t const a_cImmBits> 1788 DECL_INLINE_THROW(uint32_t) 1789 iemNativeEmit_sbb_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl) 1801 1790 { 1802 1791 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); … … 1811 1800 pCodeBuf[off++] = X86_EFL_CF_BIT; 1812 1801 1813 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits,cImmBits, 3, idxRegDst, uImmOp);1802 off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 3, idxRegDst, uImmOp); 1814 1803 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1815 1804 … … 1830 1819 pCodeBuf[off++] = ARMV8_A64_INSTR_CFINV; 1831 1820 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst); 1832 if (cOpBits >= 32)1833 pCodeBuf[off++] = Armv8A64MkInstrSbcs(idxRegDst, idxRegDst, idxRegImm, cOpBits > 32 /*f64Bit*/);1821 if RT_CONSTEXPR_IF(a_cOpBits >= 32) 1822 pCodeBuf[off++] = Armv8A64MkInstrSbcs(idxRegDst, idxRegDst, idxRegImm, a_cOpBits > 32 /*f64Bit*/); 1834 1823 else 1835 1824 { … … 1837 1826 doesn't work. So, we have to calculate carry & overflow manually. */ 1838 1827 pCodeBuf[off++] = Armv8A64MkInstrSbc(idxRegDst, idxRegDst, idxRegImm, false /*f64Bit*/); 1839 pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, cOpBits > 8); /* NZ are okay, CV aren't.*/1828 pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, a_cOpBits > 8); /* NZ are okay, CV aren't.*/ 1840 1829 } 1841 1830 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); … … 1843 1832 iemNativeRegFreeTmp(pReNative, idxRegImm); 1844 1833 1845 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst,1834 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, a_cOpBits, idxRegDst, 1846 1835 idxRegDstIn, UINT8_MAX, true /*fInvertCarry*/, uImmOp); 1847 1836 1848 1837 iemNativeRegFreeTmp(pReNative, idxRegDstIn); 1849 if (cOpBits < 32)1850 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32( cOpBits) - 1U);1838 if RT_CONSTEXPR_IF(a_cOpBits < 32) 1839 off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(a_cOpBits) - 1U); 1851 1840 iemNativeVarRegisterRelease(pReNative, idxVarDst); 1852 RT_NOREF(cImmBits);1853 1841 1854 1842 #else -
trunk/src/VBox/VMM/include/IEMInternal.h
r106192 r106199 352 352 # define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback) 353 353 #endif 354 355 /** @name Helpers for passing C++ template arguments to an 356 * IEM_MC_NATIVE_EMIT_3/4/5 style macro. 357 * @{ 358 */ 359 #define IEM_TEMPL_ARG_1(a1) <a1> 360 #define IEM_TEMPL_ARG_2(a1, a2) <a1,a2> 361 #define IEM_TEMPL_ARG_3(a1, a2, a3) <a1,a2,a3> 362 /** @} */ 354 363 355 364
Note:
See TracChangeset
for help on using the changeset viewer.