VirtualBox

Ignore:
Timestamp:
Oct 1, 2024 11:08:47 PM (4 months ago)
Author:
vboxsync
Message:

VMM/IEM: Refactored the xxxxx_r_i_efl functions to take the constant arguments (cOpBits & cImmBits) as template arguments. Fixed some arm build issues from pervious commit. bugref:10720

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h

    r106198 r106199  
    873873    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
    874874
    875     off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst, true /*fNativeFlags*/);
     875    off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
    876876#else
    877877# error "Port me"
     
    885885 * The AND instruction with immediate value as right operand.
    886886 */
    887 DECL_INLINE_THROW(uint32_t)
    888 iemNativeEmit_and_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    889                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     887template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     888DECL_INLINE_THROW(uint32_t)
     889iemNativeEmit_and_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    890890{
    891891    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    893893    /* On AMD64 we just use the correctly sized AND instruction harvest the EFLAGS. */
    894894    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
    895     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 4, idxRegDst, uImmOp);
    896     IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    897 
    898     off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
     895    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 4, idxRegDst, uImmOp);
     896    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     897
     898    off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst);
    899899
    900900#elif defined(RT_ARCH_ARM64)
     
    902902       course the immediate variant when possible to save a register load. */
    903903    uint32_t uImmSizeLen, uImmRotations;
    904     if (  cOpBits > 32
     904    if (  a_cOpBits > 32
    905905        ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)
    906906        : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations))
    907907    {
    908908        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    909         if (cOpBits >= 32)
    910             pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);
     909        if (a_cOpBits >= 32)
     910            pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/);
    911911        else
    912             pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);
     912            pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/);
    913913    }
    914914    else
     
    916916        uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    917917        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    918         if (cOpBits >= 32)
    919             pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);
     918        if RT_CONSTEXPR_IF(a_cOpBits >= 32)
     919            pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/);
    920920        else
    921             pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);
     921            pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/);
    922922        iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    923923    }
    924924    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    925925
    926     if (cOpBits >= 32)
    927         off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
    928     else
    929         off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
    930     RT_NOREF_PV(cImmBits);
     926    off = iemNativeEmitEFlagsForLogical<a_cOpBits < 32>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst);
    931927
    932928#else
     
    989985 * The TEST instruction with immediate value as right operand.
    990986 */
    991 DECL_INLINE_THROW(uint32_t)
    992 iemNativeEmit_test_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    993                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     987template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     988DECL_INLINE_THROW(uint32_t)
     989iemNativeEmit_test_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    994990{
    995991    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    997993    /* On AMD64 we just use the correctly sized AND instruction harvest the EFLAGS. */
    998994    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
    999     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0xf6, 0xcc, 0xf7, cOpBits, cImmBits, 0, idxRegDst, uImmOp);
     995    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0xf6, 0xcc, 0xf7, a_cOpBits, a_cImmBits, 0, idxRegDst, uImmOp);
    1000996    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    1001997    iemNativeVarRegisterRelease(pReNative, idxVarDst);
    1002998
    1003     off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, UINT8_MAX);
     999    off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, UINT8_MAX);
    10041000
    10051001#elif defined(RT_ARCH_ARM64)
     
    10091005    uint8_t const         idxRegResult = iemNativeRegAllocTmp(pReNative, &off);
    10101006    uint32_t uImmSizeLen, uImmRotations;
    1011     if (  cOpBits > 32
     1007    if (  a_cOpBits > 32
    10121008        ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)
    10131009        : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations))
    10141010    {
    10151011        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1016         if (cOpBits >= 32)
    1017             pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);
     1012        if RT_CONSTEXPR_IF(a_cOpBits >= 32)
     1013            pCodeBuf[off++] = Armv8A64MkInstrAndsImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/);
    10181014        else
    1019             pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);
     1015            pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegResult, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/);
    10201016    }
    10211017    else
     
    10231019        uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    10241020        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1025         if (cOpBits >= 32)
    1026             pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegResult, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);
     1021        if RT_CONSTEXPR_IF(a_cOpBits >= 32)
     1022            pCodeBuf[off++] = Armv8A64MkInstrAnds(idxRegResult, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/);
    10271023        else
    1028             pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegResult, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);
     1024            pCodeBuf[off++] = Armv8A64MkInstrAnd(idxRegResult, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/);
    10291025        iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    10301026    }
     
    10321028    iemNativeVarRegisterRelease(pReNative, idxVarDst);
    10331029
    1034     if (cOpBits >= 32)
    1035         off = iemNativeEmitEFlagsForLogical<>(pReNative, off, idxVarEfl, cOpBits, idxRegResult);
    1036     else
    1037         off = iemNativeEmitEFlagsForLogical<>(pReNative, off, idxVarEfl, cOpBits, idxRegResult);
     1030    off = iemNativeEmitEFlagsForLogical<a_cOpBits < 32>(pReNative, off, idxVarEfl, a_cOpBits, idxRegResult);
    10381031
    10391032    iemNativeRegFreeTmp(pReNative, idxRegResult);
    1040     RT_NOREF_PV(cImmBits);
    10411033
    10421034#else
     
    10861078 * The OR instruction with immediate value as right operand.
    10871079 */
    1088 DECL_INLINE_THROW(uint32_t)
    1089 iemNativeEmit_or_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1090                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     1080template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     1081DECL_INLINE_THROW(uint32_t)
     1082iemNativeEmit_or_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    10911083{
    10921084    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    10941086    /* On AMD64 we just use the correctly sized OR instruction harvest the EFLAGS. */
    10951087    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
    1096     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 1, idxRegDst, uImmOp);
    1097     IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    1098 
    1099     off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
     1088    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 1, idxRegDst, uImmOp);
     1089    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1090
     1091    off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst);
    11001092
    11011093#elif defined(RT_ARCH_ARM64)
     
    11031095       course the immediate variant when possible to save a register load.  */
    11041096    uint32_t uImmSizeLen, uImmRotations;
    1105     if (  cOpBits > 32
     1097    if (  a_cOpBits > 32
    11061098        ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)
    11071099        : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations))
    11081100    {
    11091101        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1110         pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);
     1102        pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/);
    11111103    }
    11121104    else
     
    11141106        uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    11151107        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1116         pCodeBuf[off++] = Armv8A64MkInstrOrr(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);
     1108        pCodeBuf[off++] = Armv8A64MkInstrOrr(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/);
    11171109        iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    11181110    }
    11191111    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    11201112
    1121     off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
    1122     RT_NOREF_PV(cImmBits);
     1113    off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst);
    11231114
    11241115#else
     
    11691160 * The XOR instruction with immediate value as right operand.
    11701161 */
    1171 DECL_INLINE_THROW(uint32_t)
    1172 iemNativeEmit_xor_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1173                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     1162template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     1163DECL_INLINE_THROW(uint32_t)
     1164iemNativeEmit_xor_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    11741165{
    11751166    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    11771168    /* On AMD64 we just use the correctly sized XOR instruction harvest the EFLAGS. */
    11781169    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
    1179     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 6, idxRegDst, uImmOp);
    1180     IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    1181 
    1182     off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
     1170    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 6, idxRegDst, uImmOp);
     1171    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1172
     1173    off = iemNativeEmitEFlagsForLogical<false>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst);
    11831174
    11841175#elif defined(RT_ARCH_ARM64)
     
    11861177       course the immediate variant when possible to save a register load.  */
    11871178    uint32_t uImmSizeLen, uImmRotations;
    1188     if (  cOpBits > 32
     1179    if (  a_cOpBits > 32
    11891180        ? Armv8A64ConvertMask64ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations)
    11901181        : Armv8A64ConvertMask32ToImmRImmS(uImmOp, &uImmSizeLen, &uImmRotations))
    11911182    {
    11921183        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1193         pCodeBuf[off++] = Armv8A64MkInstrEorImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, cOpBits > 32 /*f64Bit*/);
     1184        pCodeBuf[off++] = Armv8A64MkInstrEorImm(idxRegDst, idxRegDst, uImmSizeLen, uImmRotations, a_cOpBits > 32 /*f64Bit*/);
    11941185    }
    11951186    else
     
    11971188        uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    11981189        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1199         pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/);
     1190        pCodeBuf[off++] = Armv8A64MkInstrEor(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/);
    12001191        iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    12011192    }
    12021193    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    12031194
    1204     off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, cOpBits, idxRegDst);
    1205     RT_NOREF_PV(cImmBits);
     1195    off = iemNativeEmitEFlagsForLogical<true>(pReNative, off, idxVarEfl, a_cOpBits, idxRegDst);
    12061196
    12071197#else
     
    12801270 * The ADD instruction with immediate value as right operand.
    12811271 */
    1282 DECL_INLINE_THROW(uint32_t)
    1283 iemNativeEmit_add_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1284                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     1272template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     1273DECL_INLINE_THROW(uint32_t)
     1274iemNativeEmit_add_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    12851275{
    12861276    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    12891279    /* On AMD64 we just use the correctly sized ADD instruction to get the right EFLAGS.SF value. */
    12901280    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
    1291     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 0, idxRegDst, uImmOp);
     1281    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 0, idxRegDst, uImmOp);
    12921282    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    12931283
     
    13031293    PIEMNATIVEINSTR pCodeBuf     = iemNativeInstrBufEnsure(pReNative, off, 8);
    13041294    off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst);
    1305     if (cOpBits >= 32)
     1295    if RT_CONSTEXPR_IF(a_cOpBits >= 32)
    13061296    {
    13071297        if (uImmOp <= 0xfffU)
    1308             pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/);
     1298            pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp, a_cOpBits > 32 /*f64Bit*/,
     1299                                                       true /*fSetFlags*/);
    13091300        else if (uImmOp <= 0xfff000U && !(uImmOp & 0xfff))
    1310             pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp >> 12, cOpBits > 32 /*f64Bit*/,
     1301            pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegDst, idxRegDst, uImmOp >> 12, a_cOpBits > 32 /*f64Bit*/,
    13111302                                                       true /*fSetFlags*/, true /*fShift12*/);
    13121303        else
     
    13141305            uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    13151306            pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1316             pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/);
     1307            pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/,
     1308                                                    true /*fSetFlags*/);
    13171309            iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    13181310        }
     
    13211313    {
    13221314        /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */
    1323         uint32_t const cShift = 32 - cOpBits;
     1315        uint32_t const cShift = 32 - a_cOpBits;
    13241316        uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp << cShift);
    13251317        pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
    13261318        pCodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegDst, idxRegTmpImm, idxRegDstIn, false /*f64Bit*/, true /*fSetFlags*/, cShift);
    13271319        pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDst, idxRegDst, cShift, false /*f64Bit*/);
    1328         cOpBits = 32;
    13291320        iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    13301321    }
    13311322    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    13321323
    1333     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst,
     1324    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, a_cOpBits > 32 ? a_cOpBits : 32, idxRegDst,
    13341325                                           idxRegDstIn, UINT8_MAX, false /*fInvertCarry*/, uImmOp);
    13351326
    13361327    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
    13371328    iemNativeVarRegisterRelease(pReNative, idxVarDst);
    1338     RT_NOREF(cImmBits);
    13391329
    13401330#else
     
    14131403 * The ADC instruction with immediate value as right operand.
    14141404 */
    1415 DECL_INLINE_THROW(uint32_t)
    1416 iemNativeEmit_adc_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1417                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     1405template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     1406DECL_INLINE_THROW(uint32_t)
     1407iemNativeEmit_adc_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    14181408{
    14191409    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    14281418    pCodeBuf[off++] = X86_EFL_CF_BIT;
    14291419
    1430     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 2, idxRegDst, uImmOp);
     1420    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 2, idxRegDst, uImmOp);
    14311421    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    14321422
     
    14451435    pCodeBuf[off++] = Armv8A64MkInstrRmif(idxRegEfl, (X86_EFL_CF_BIT - 1) & 63, RT_BIT_32(1) /*fMask=C*/);
    14461436    off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst);
    1447     if (cOpBits >= 32)
    1448         pCodeBuf[off++] = Armv8A64MkInstrAdcs(idxRegDst, idxRegDst, idxRegImm, cOpBits > 32 /*f64Bit*/);
     1437    if RT_CONSTEXPR_IF(a_cOpBits >= 32)
     1438        pCodeBuf[off++] = Armv8A64MkInstrAdcs(idxRegDst, idxRegDst, idxRegImm, a_cOpBits > 32 /*f64Bit*/);
    14491439    else
    14501440    {
     
    14521442           doesn't work. So, we have to calculate carry & overflow manually. */
    14531443        pCodeBuf[off++] = Armv8A64MkInstrAdc(idxRegDst, idxRegDst, idxRegImm, false /*f64Bit*/);
    1454         pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, cOpBits > 8); /* NZ are okay, CV aren't.*/
     1444        pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, a_cOpBits > 8); /* NZ are okay, CV aren't.*/
    14551445    }
    14561446    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     
    14581448    iemNativeRegFreeTmp(pReNative, idxRegImm);
    14591449
    1460     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst,
     1450    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, a_cOpBits, idxRegDst,
    14611451                                           idxRegDstIn, UINT8_MAX, false /*fInvertCarry*/, uImmOp);
    14621452
    14631453    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
    1464     if (cOpBits < 32)
    1465         off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(cOpBits) - 1U);
     1454    if RT_CONSTEXPR_IF(a_cOpBits < 32)
     1455        off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(a_cOpBits) - 1U);
    14661456    iemNativeVarRegisterRelease(pReNative, idxVarDst);
    1467     RT_NOREF(cImmBits);
    14681457
    14691458#else
     
    15371526 * The SUB instruction with immediate value as right operand.
    15381527 */
    1539 DECL_INLINE_THROW(uint32_t)
    1540 iemNativeEmit_sub_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1541                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     1528template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     1529DECL_INLINE_THROW(uint32_t)
     1530iemNativeEmit_sub_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    15421531{
    15431532    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    15461535    /* On AMD64 we just use the correctly sized SUB instruction to get the right EFLAGS.SF value. */
    15471536    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
    1548     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 5, idxRegDst, uImmOp);
     1537    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 5, idxRegDst, uImmOp);
    15491538    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    15501539
     
    15601549    PIEMNATIVEINSTR pCodeBuf     = iemNativeInstrBufEnsure(pReNative, off, 8);
    15611550    off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst);
    1562     if (cOpBits >= 32)
     1551    if RT_CONSTEXPR_IF(a_cOpBits >= 32)
    15631552    {
    15641553        if (uImmOp <= 0xfffU)
    1565             pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/);
     1554            pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp, a_cOpBits > 32 /*f64Bit*/,
     1555                                                       true /*fSetFlags*/);
    15661556        else if (uImmOp <= 0xfff000U && !(uImmOp & 0xfff))
    1567             pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp >> 12, cOpBits > 32 /*f64Bit*/,
     1557            pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegDst, idxRegDst, uImmOp >> 12, a_cOpBits > 32 /*f64Bit*/,
    15681558                                                       true /*fSetFlags*/, true /*fShift12*/);
    15691559        else
     
    15711561            uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    15721562            pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1573             pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegDst, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/);
     1563            pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegDst, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/,
     1564                                                    true /*fSetFlags*/);
    15741565            iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    15751566        }
     
    15781569    {
    15791570        /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */
    1580         uint32_t const cShift       = 32 - cOpBits;
     1571        uint32_t const cShift       = 32 - a_cOpBits;
    15811572        uint8_t const  idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    15821573        pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
     
    15851576        pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDstIn, idxRegDstIn, cShift, false /*f64Bit*/);
    15861577        pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDst,   idxRegDst,   cShift, false /*f64Bit*/);
    1587         cOpBits = 32;
    15881578        iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    15891579    }
    15901580    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    15911581
    1592     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst,
     1582    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, a_cOpBits > 32 ? a_cOpBits : 32, idxRegDst,
    15931583                                           idxRegDstIn, UINT8_MAX, true /*fInvertCarry*/, uImmOp);
    15941584
    15951585    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
    15961586    iemNativeVarRegisterRelease(pReNative, idxVarDst);
    1597     RT_NOREF(cImmBits);
    15981587
    15991588#else
     
    16621651 * The CMP instruction with immediate value as right operand.
    16631652 */
    1664 DECL_INLINE_THROW(uint32_t)
    1665 iemNativeEmit_cmp_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1666                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     1653template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     1654DECL_INLINE_THROW(uint32_t)
     1655iemNativeEmit_cmp_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    16671656{
    16681657    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    16711660    /* On AMD64 we just use the correctly sized CMP instruction to get the right EFLAGS.SF value. */
    16721661    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
    1673     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 7, idxRegDst, uImmOp);
     1662    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 7, idxRegDst, uImmOp);
    16741663    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    16751664
     
    16841673    uint8_t const   idxRegResult = iemNativeRegAllocTmp(pReNative, &off);
    16851674    PIEMNATIVEINSTR pCodeBuf     = iemNativeInstrBufEnsure(pReNative, off, 8);
    1686     if (cOpBits >= 32)
     1675    if RT_CONSTEXPR_IF(a_cOpBits >= 32)
    16871676    {
    16881677        if (uImmOp <= 0xfffU)
    1689             pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/);
     1678            pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp, a_cOpBits > 32 /*f64Bit*/,
     1679                                                       true /*fSetFlags*/);
    16901680        else if (uImmOp <= 0xfff000U && !(uImmOp & 0xfff))
    1691             pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp >> 12, cOpBits > 32 /*f64Bit*/,
     1681            pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(idxRegResult, idxRegDst, uImmOp >> 12, a_cOpBits > 32 /*f64Bit*/,
    16921682                                                       true /*fSetFlags*/, true /*fShift12*/);
    16931683        else
     
    16951685            uint8_t const idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    16961686            pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    1697             pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegDst, idxRegTmpImm, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/);
     1687            pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegDst, idxRegTmpImm, a_cOpBits > 32 /*f64Bit*/,
     1688                                                    true /*fSetFlags*/);
    16981689            iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    16991690        }
     
    17021693    {
    17031694        /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */
    1704         uint32_t const cShift       = 32 - cOpBits;
     1695        uint32_t const cShift       = 32 - a_cOpBits;
    17051696        uint8_t const  idxRegTmpImm = iemNativeRegAllocTmpImm(pReNative, &off, uImmOp);
    17061697        pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     
    17081699        pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegResult, idxRegTmpImm, false /*f64Bit*/, true /*fSetFlags*/, cShift);
    17091700        pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegResult, idxRegResult, cShift, false /*f64Bit*/);
    1710         cOpBits = 32;
    17111701        iemNativeRegFreeTmpImm(pReNative, idxRegTmpImm);
    17121702    }
    17131703    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    17141704
    1715     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegResult,
     1705    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, a_cOpBits > 32 ? a_cOpBits : 32, idxRegResult,
    17161706                                           idxRegDst, UINT8_MAX, true /*fInvertCarry*/, uImmOp);
    17171707
    17181708    iemNativeRegFreeTmp(pReNative, idxRegResult);
    17191709    iemNativeVarRegisterRelease(pReNative, idxVarDst);
    1720     RT_NOREF(cImmBits);
    17211710
    17221711#else
     
    17961785 * The SBB instruction with immediate value as right operand.
    17971786 */
    1798 DECL_INLINE_THROW(uint32_t)
    1799 iemNativeEmit_sbb_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    1800                           uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl, uint8_t cOpBits, uint8_t cImmBits)
     1787template<uint8_t const a_cOpBits, uint8_t const a_cImmBits>
     1788DECL_INLINE_THROW(uint32_t)
     1789iemNativeEmit_sbb_r_i_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarDst, uint64_t uImmOp, uint8_t idxVarEfl)
    18011790{
    18021791    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     
    18111800    pCodeBuf[off++] = X86_EFL_CF_BIT;
    18121801
    1813     off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, cOpBits, cImmBits, 3, idxRegDst, uImmOp);
     1802    off = iemNativeEmitAmd64OneByteModRmInstrRIEx(pCodeBuf, off, 0x80, 0x83, 0x81, a_cOpBits, a_cImmBits, 3, idxRegDst, uImmOp);
    18141803    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    18151804
     
    18301819    pCodeBuf[off++] = ARMV8_A64_INSTR_CFINV;
    18311820    off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst);
    1832     if (cOpBits >= 32)
    1833         pCodeBuf[off++] = Armv8A64MkInstrSbcs(idxRegDst, idxRegDst, idxRegImm, cOpBits > 32 /*f64Bit*/);
     1821    if RT_CONSTEXPR_IF(a_cOpBits >= 32)
     1822        pCodeBuf[off++] = Armv8A64MkInstrSbcs(idxRegDst, idxRegDst, idxRegImm, a_cOpBits > 32 /*f64Bit*/);
    18341823    else
    18351824    {
     
    18371826           doesn't work. So, we have to calculate carry & overflow manually. */
    18381827        pCodeBuf[off++] = Armv8A64MkInstrSbc(idxRegDst, idxRegDst, idxRegImm, false /*f64Bit*/);
    1839         pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, cOpBits > 8); /* NZ are okay, CV aren't.*/
     1828        pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, a_cOpBits > 8); /* NZ are okay, CV aren't.*/
    18401829    }
    18411830    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     
    18431832    iemNativeRegFreeTmp(pReNative, idxRegImm);
    18441833
    1845     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst,
     1834    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, a_cOpBits, idxRegDst,
    18461835                                           idxRegDstIn, UINT8_MAX, true /*fInvertCarry*/, uImmOp);
    18471836
    18481837    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
    1849     if (cOpBits < 32)
    1850         off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(cOpBits) - 1U);
     1838    if RT_CONSTEXPR_IF(a_cOpBits < 32)
     1839        off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(a_cOpBits) - 1U);
    18511840    iemNativeVarRegisterRelease(pReNative, idxVarDst);
    1852     RT_NOREF(cImmBits);
    18531841
    18541842#else
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette