VirtualBox

Changeset 103691 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Mar 5, 2024 7:31:03 PM (9 months ago)
Author:
vboxsync
Message:

VMM/IEM: Implemented iemNativeEmit_adc_r_r_efl and enabled it on both hosts. bugref:10376

Location:
trunk/src/VBox/VMM/VMMAll
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp

    r103558 r103691  
    101101 * @param   a_uResult       Unsigned result value.
    102102 * @param   a_uSrc          The source value (for AF calc).
    103  * @param   a_uDst          The original destination value (for AF calc).
     103 * @param   a_uDst          The original destination value (for AF+OF calc).
    104104 * @param   a_cBitsWidth    The width of the result (8, 16, 32, 64).
    105105 * @param   a_CfExpr        Bool expression for the carry flag (CF).
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h

    r103683 r103691  
    11541154    IEMOP_MNEMONIC2(MR, ADC, adc, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED);
    11551155    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1156     IEMOP_BODY_BINARY_rm_r8_RW(bRm, adc, 0, 0);
     1156    IEMOP_BODY_BINARY_rm_r8_RW(bRm, adc, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64, 0);
    11571157}
    11581158
     
    11721172    IEMOP_MNEMONIC2(MR, ADC, adc, Ev, Gv, DISOPTYPE_HARMLESS, IEMOPHINT_LOCK_ALLOWED);
    11731173    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1174     IEMOP_BODY_BINARY_rm_rv_RW(    bRm, adc, 0, 0);
     1174    IEMOP_BODY_BINARY_rm_rv_RW(    bRm, adc, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64, 0);
    11751175    IEMOP_BODY_BINARY_rm_rv_LOCKED(bRm, adc);
    11761176}
     
    11871187    IEMOP_MNEMONIC2(RM, ADC, adc, Gb, Eb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES);
    11881188    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1189     IEMOP_BODY_BINARY_r8_rm(bRm, adc, 0);
     1189    IEMOP_BODY_BINARY_r8_rm(bRm, adc, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64);
    11901190}
    11911191
     
    12011201    IEMOP_MNEMONIC2(RM, ADC, adc, Gv, Ev, DISOPTYPE_HARMLESS, 0);
    12021202    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1203     IEMOP_BODY_BINARY_rv_rm(bRm, iemAImpl_adc_u16, iemAImpl_adc_u32, iemAImpl_adc_u64, 1, 0, adc, 0);
     1203    IEMOP_BODY_BINARY_rv_rm(bRm, iemAImpl_adc_u16, iemAImpl_adc_u32, iemAImpl_adc_u64, 1, 0, adc, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64);
    12041204}
    12051205
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h

    r103683 r103691  
    3434
    3535#ifdef RT_ARCH_AMD64
     36
     37/**
     38 * Emits an ModR/M instruction with one opcode byte and only register operands.
     39 */
    3640DECL_FORCE_INLINE(uint32_t)
    37 iemNativeEmitAmd64ModRmInstrRREx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t bOpcode8, uint8_t bOpcodeOther,
    38                                  uint8_t cOpBits, uint8_t idxRegReg, uint8_t idxRegRm)
     41iemNativeEmitAmd64OneByteModRmInstrRREx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t bOpcode8, uint8_t bOpcodeOther,
     42                                        uint8_t cOpBits, uint8_t idxRegReg, uint8_t idxRegRm)
    3943{
    4044    Assert(idxRegReg < 16); Assert(idxRegRm < 16);
     
    6771    return off;
    6872}
     73
     74
     75/**
     76 * Emits an ModR/M instruction with two opcode bytes and only register operands.
     77 */
     78DECL_FORCE_INLINE(uint32_t)
     79iemNativeEmitAmd64TwoByteModRmInstrRREx(PIEMNATIVEINSTR pCodeBuf, uint32_t off,
     80                                        uint8_t bOpcode0, uint8_t bOpcode8, uint8_t bOpcodeOther,
     81                                        uint8_t cOpBits, uint8_t idxRegReg, uint8_t idxRegRm)
     82{
     83    Assert(idxRegReg < 16); Assert(idxRegRm < 16);
     84    switch (cOpBits)
     85    {
     86        case 16:
     87            pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
     88            RT_FALL_THRU();
     89        case 32:
     90            if (idxRegReg >= 8 || idxRegRm >= 8)
     91                pCodeBuf[off++] = (idxRegReg >= 8 ? X86_OP_REX_R : 0) | (idxRegRm >= 8 ? X86_OP_REX_B : 0);
     92            pCodeBuf[off++] = bOpcode0;
     93            pCodeBuf[off++] = bOpcodeOther;
     94            break;
     95
     96        default: AssertFailed(); RT_FALL_THRU();
     97        case 64:
     98            pCodeBuf[off++] = X86_OP_REX_W | (idxRegReg >= 8 ? X86_OP_REX_R : 0) | (idxRegRm >= 8 ? X86_OP_REX_B : 0);
     99            pCodeBuf[off++] = bOpcode0;
     100            pCodeBuf[off++] = bOpcodeOther;
     101            break;
     102
     103        case 8:
     104            if (idxRegReg >= 8 || idxRegRm >= 8)
     105                pCodeBuf[off++] = (idxRegReg >= 8 ? X86_OP_REX_R : 0) | (idxRegRm >= 8 ? X86_OP_REX_B : 0);
     106            else if (idxRegReg >= 4 || idxRegRm >= 4)
     107                pCodeBuf[off++] = X86_OP_REX;
     108            pCodeBuf[off++] = bOpcode0;
     109            pCodeBuf[off++] = bOpcode8;
     110            break;
     111    }
     112    pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxRegReg & 7, idxRegRm & 7);
     113    return off;
     114}
     115
    69116#endif /* RT_ARCH_AMD64 */
    70 
    71117
    72118/**
     
    166212 */
    167213DECL_FORCE_INLINE_THROW(uint32_t)
    168 iemNativeEmitEFlagsForArithmetic(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEfl
     214iemNativeEmitEFlagsForArithmetic(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEfl, uint8_t idxRegEflIn
    169215#ifndef RT_ARCH_AMD64
    170216                                 , uint8_t cOpBits, uint8_t idxRegResult, uint8_t idxRegDstIn, uint8_t idxRegSrc
    171                                  , bool fNativeFlags, bool fInvertCarry
     217                                 , bool fInvertCarry
    172218#endif
    173219                                 )
     
    185231        pCodeBuf[off++] = 0x9c;
    186232
    187         uint8_t const idxRegEfl = iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
     233        uint8_t const idxRegEfl = idxRegEflIn != UINT8_MAX ? idxRegEflIn
     234                                : iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
    188235        uint8_t const idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
    189236        pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2 + 7 + 7 + 3);
     
    198245        /* OR in the flags we collected. */
    199246        off = iemNativeEmitOrGpr32ByGprEx(pCodeBuf, off, idxRegEfl, idxTmpReg);
    200         iemNativeVarRegisterRelease(pReNative, idxVarEfl);
     247        if (idxRegEflIn != idxRegEfl)
     248            iemNativeVarRegisterRelease(pReNative, idxVarEfl);
    201249        iemNativeRegFreeTmp(pReNative, idxTmpReg);
    202250
     
    205253         * Calculate flags.
    206254         */
    207         uint8_t const         idxRegEfl = iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
    208         uint8_t const         idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
    209         PIEMNATIVEINSTR const pCodeBuf  = iemNativeInstrBufEnsure(pReNative, off, 18);
    210 
    211         if (fNativeFlags && cOpBits >= 32)
     255        uint8_t const         idxRegEfl  = idxRegEflIn != UINT8_MAX ? idxRegEflIn
     256                                         : iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
     257        uint8_t const         idxTmpReg  = iemNativeRegAllocTmp(pReNative, &off);
     258        uint8_t const         idxTmpReg2 = cOpBits >= 32 ? UINT8_MAX : iemNativeRegAllocTmp(pReNative, &off);
     259        PIEMNATIVEINSTR const pCodeBuf   = iemNativeInstrBufEnsure(pReNative, off, 20);
     260
     261        /* Invert CF (stored inved on ARM) and load the flags into the temporary register. */
     262        if (fInvertCarry)
     263            pCodeBuf[off++] = ARMV8_A64_INSTR_CFINV;
     264        pCodeBuf[off++] = Armv8A64MkInstrMrs(idxTmpReg, ARMV8_AARCH64_SYSREG_NZCV); /* Bits: 31=N; 30=Z; 29=C; 28=V; */
     265
     266        if (cOpBits >= 32)
    212267        {
    213             /* Invert CF (stored inved on ARM) and load the flags into the temporary register. */
    214             if (fInvertCarry)
    215                 pCodeBuf[off++] = ARMV8_A64_INSTR_CFINV;
    216             pCodeBuf[off++] = Armv8A64MkInstrMrs(idxTmpReg, ARMV8_AARCH64_SYSREG_NZCV); /* Bits: 31=N; 30=Z; 29=C; 28=V; */
    217 
    218268            /* V -> OF */
    219269            pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, 28);
     
    223273            pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, 1);
    224274            pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_CF_BIT, 1, false /*f64Bit*/);
    225 
    226             /* N,Z -> SF,ZF */
    227             pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, 1);
    228             pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_ZF_BIT, 2, false /*f64Bit*/);
    229275        }
    230         else
     276
     277        /* N,Z -> SF,ZF */
     278        pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, cOpBits >= 32 ? 1 : 30);
     279        pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_ZF_BIT, 2, false /*f64Bit*/);
     280
     281        /* For ADC and SBB we have to calculate overflow and carry our selves. */
     282        if (cOpBits < 32)
    231283        {
    232 #if 0
    233             pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegResult, cOpBits > 8);
    234             pCodeBuf[off++] = Armv8A64MkInstrMrs(idxTmpReg, ARMV8_AARCH64_SYSREG_NZCV); /* Bits: 31=N; 30=Z; 29=C; 28=V; */
    235 
    236             /* V -> OF */
    237             pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, 28);
    238             pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_OF_BIT, 1, false /*f64Bit*/);
    239 
    240             /* N,Z -> SF,ZF */
    241             pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, 2);
    242             pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_ZF_BIT, 2, false /*f64Bit*/);
    243 #else
    244             pCodeBuf[off++] = Armv8A64MkInstrBrk(0x1010);
    245 #endif
     284            /* Since the carry flag is the zero'th flag, we just use BFXIL got copy it over. */
     285            AssertCompile(X86_EFL_CF_BIT == 0);
     286            pCodeBuf[off++] = Armv8A64MkInstrBfxil(idxRegEfl, idxRegResult, cOpBits, 1, false /*f64Bit*/);
     287
     288            /* The overflow flag is more work. See IEM_EFL_UPDATE_STATUS_BITS_FOR_ARITHMETIC. */
     289            pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg,  idxRegDstIn, idxRegSrc,    false); /* ~((a_uDst) ^ (a_uSrcOf)) */
     290            pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg2, idxRegDstIn, idxRegResult, false); /*  (a_uDst) ^ (a_uResult) */
     291            pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpReg,  idxTmpReg,   idxTmpReg2,   false /*f64Bit*/);
     292            pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, cOpBits - 1,  false /*f64Bit*/);
     293            pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl,  idxTmpReg, X86_EFL_OF_BIT, 1);
     294            iemNativeRegFreeTmp(pReNative, idxTmpReg2);
    246295        }
    247296
     
    263312        pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl, idxTmpReg, X86_EFL_AF_BIT, 1,  false /*f64Bit*/);
    264313
    265         iemNativeVarRegisterRelease(pReNative, idxVarEfl);
     314        if (idxRegEflIn != idxRegEfl)
     315            iemNativeVarRegisterRelease(pReNative, idxVarEfl);
    266316        iemNativeRegFreeTmp(pReNative, idxTmpReg);
    267317#else
     
    287337#ifdef RT_ARCH_AMD64
    288338    /* On AMD64 we just use the correctly size AND instruction harvest the EFLAGS. */
    289     off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
    290                                            0x22, 0x23, cOpBits, idxRegDst, idxRegSrc);
     339    off = iemNativeEmitAmd64OneByteModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
     340                                                  0x22, 0x23, cOpBits, idxRegDst, idxRegSrc);
    291341    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    292342
     
    323373#ifdef RT_ARCH_AMD64
    324374    /* On AMD64 we just use the correctly size TEST instruction harvest the EFLAGS. */
    325     off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
    326                                            0x84, 0x85, cOpBits, idxRegSrc, idxRegDst);
     375    off = iemNativeEmitAmd64OneByteModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
     376                                                  0x84, 0x85, cOpBits, idxRegSrc, idxRegDst);
    327377    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    328378
     
    366416#ifdef RT_ARCH_AMD64
    367417    /* On AMD64 we just use the correctly size OR instruction harvest the EFLAGS. */
    368     off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
    369                                            0x0a, 0x0b, cOpBits, idxRegDst, idxRegSrc);
     418    off = iemNativeEmitAmd64OneByteModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
     419                                                  0x0a, 0x0b, cOpBits, idxRegDst, idxRegSrc);
    370420    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    371421
     
    399449#ifdef RT_ARCH_AMD64
    400450    /* On AMD64 we just use the correctly size OR instruction harvest the EFLAGS. */
    401     off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
    402                                            0x32, 0x33, cOpBits, idxRegDst, idxRegSrc);
     451    off = iemNativeEmitAmd64OneByteModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
     452                                                  0x32, 0x33, cOpBits, idxRegDst, idxRegSrc);
    403453    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    404454
     
    432482#ifdef RT_ARCH_AMD64
    433483    /* On AMD64 we just use the correctly sized ADD instruction to get the right EFLAGS.SF value. */
    434     off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
    435                                            0x02, 0x03, cOpBits, idxRegDst, idxRegSrc);
    436     IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    437 
    438     iemNativeVarRegisterRelease(pReNative, idxVarSrc);
    439     iemNativeVarRegisterRelease(pReNative, idxVarDst);
    440 
    441     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl);
     484    off = iemNativeEmitAmd64OneByteModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
     485                                                  0x02, 0x03, cOpBits, idxRegDst, idxRegSrc);
     486    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     487
     488    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
     489    iemNativeVarRegisterRelease(pReNative, idxVarDst);
     490
     491    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX);
    442492
    443493#elif defined(RT_ARCH_ARM64)
     
    446496       OF, CF, ZF and SF. */
    447497    uint8_t const         idxRegDstIn = iemNativeRegAllocTmp(pReNative, &off);
    448     PIEMNATIVEINSTR const pCodeBuf    = iemNativeInstrBufEnsure(pReNative, off, 5);
     498    PIEMNATIVEINSTR const pCodeBuf    = iemNativeInstrBufEnsure(pReNative, off, 4);
    449499    if (cOpBits >= 32)
    450500    {
     
    466516
    467517    /** @todo Explain why the carry flag shouldn't be inverted for ADDS. */
    468     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, cOpBits, idxRegDst,
    469                                            idxRegDstIn, idxRegSrc, true /*fNativeFlags*/, false /*fInvertCarry*/);
     518    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst,
     519                                           idxRegDstIn, idxRegSrc, false /*fInvertCarry*/);
    470520
    471521    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
     
    480530
    481531
     532/**
     533 * The ADC instruction takes CF as input and will set all status flags.
     534 */
    482535DECL_INLINE_THROW(uint32_t)
    483536iemNativeEmit_adc_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    484537                          uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
    485538{
    486     RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
    487     AssertFailed();
    488     return iemNativeEmitBrk(pReNative, off, 0x666);
     539    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     540    uint8_t const idxRegSrc = iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off, true /*fInitialized*/);
     541    uint8_t const idxRegEfl = iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
     542
     543#ifdef RT_ARCH_AMD64
     544    /* On AMD64 we use BT to set EFLAGS.CF and then issue an ADC instruction
     545       with matching size to get the correct flags. */
     546    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 9)
     547
     548    /* Use the BT instruction to set CF according to idxRegEfl. */
     549    pCodeBuf[off++] = iemNativeEmitAmd64TwoByteModRmInstrRREx(pCodeBuf, off, 0x0f, 0x0b, 0xba, 32 /*cOpBits*/, 4, idxRegEfl);
     550    pCodeBuf[off++] = X86_EFL_CF_BIT;
     551
     552    off = iemNativeEmitAmd64OneByteModRmInstrRREx(pCodeBuf, off, 0x12, 0x13, cOpBits, idxRegDst, idxRegSrc);
     553    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     554
     555    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
     556    iemNativeVarRegisterRelease(pReNative, idxVarDst);
     557
     558    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl);
     559
     560#elif defined(RT_ARCH_ARM64)
     561    /* On ARM64 we use the RMIF instruction to load PSTATE.CF from idxRegEfl and
     562       then ADCS for the calculation.  We need all inputs and result for the two
     563       flags (AF,PF) that can't be directly derived from PSTATE.NZCV. */
     564    uint8_t const         idxRegDstIn = iemNativeRegAllocTmp(pReNative, &off);
     565    PIEMNATIVEINSTR const pCodeBuf    = iemNativeInstrBufEnsure(pReNative, off, 7);
     566
     567    pCodeBuf[off++] = Armv8A64MkInstrRmif(idxRegEfl, (X86_EFL_CF_BIT - 1) & 63, RT_BIT_32(1) /*fMask=C*/);
     568    off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst);
     569    if (cOpBits >= 32)
     570        pCodeBuf[off++] = Armv8A64MkInstrAdcs(idxRegDst, idxRegDst, idxRegSrc, cOpBits > 32 /*f64Bit*/);
     571    else
     572    {
     573        /* Since we're also adding in the carry flag here, shifting operands up
     574           doesn't work. So, we have to calculate carry & overflow manually. */
     575        pCodeBuf[off++] = Armv8A64MkInstrAdc(idxRegDst, idxRegDst, idxRegSrc, false /*f64Bit*/);
     576        pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, cOpBits > 8); /* NZ are okay, CV aren't.*/
     577    }
     578    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     579
     580    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst,
     581                                           idxRegDstIn, idxRegSrc, false /*fInvertCarry*/);
     582
     583    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
     584    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
     585    if (cOpBits < 32)
     586        off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(cOpBits) - 1U);
     587    iemNativeVarRegisterRelease(pReNative, idxVarDst);
     588
     589#else
     590# error "port me"
     591#endif
     592    iemNativeVarRegisterRelease(pReNative, idxVarEfl);
     593    return off;
    489594}
    490595
     
    502607#ifdef RT_ARCH_AMD64
    503608    /* On AMD64 we just use the correctly sized SUB instruction to get the right EFLAGS.SF value. */
    504     off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
    505                                            0x2a, 0x2b, cOpBits, idxRegDst, idxRegSrc);
    506     IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    507 
    508     iemNativeVarRegisterRelease(pReNative, idxVarSrc);
    509     iemNativeVarRegisterRelease(pReNative, idxVarDst);
    510 
    511     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl);
     609    off = iemNativeEmitAmd64OneByteModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
     610                                                  0x2a, 0x2b, cOpBits, idxRegDst, idxRegSrc);
     611    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     612
     613    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
     614    iemNativeVarRegisterRelease(pReNative, idxVarDst);
     615
     616    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX);
    512617
    513618#elif defined(RT_ARCH_ARM64)
     
    535640    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    536641
    537     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, cOpBits, idxRegDst,
    538                                            idxRegDstIn, idxRegSrc, true /*fNativeFlags*/, true /*fInvertCarry*/);
     642    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst,
     643                                           idxRegDstIn, idxRegSrc, true /*fInvertCarry*/);
    539644
    540645    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
     
    561666#ifdef RT_ARCH_AMD64
    562667    /* On AMD64 we just use the correctly sized CMP instruction to get the right EFLAGS.SF value. */
    563     off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
    564                                            0x3a, 0x3b, cOpBits, idxRegDst, idxRegSrc);
    565     IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    566 
    567     iemNativeVarRegisterRelease(pReNative, idxVarSrc);
    568     iemNativeVarRegisterRelease(pReNative, idxVarDst);
    569 
    570     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl);
     668    off = iemNativeEmitAmd64OneByteModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off,
     669                                                  0x3a, 0x3b, cOpBits, idxRegDst, idxRegSrc);
     670    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     671
     672    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
     673    iemNativeVarRegisterRelease(pReNative, idxVarDst);
     674
     675    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX);
    571676
    572677#elif defined(RT_ARCH_ARM64)
     
    590695    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    591696
    592     off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, cOpBits, idxRegResult,
    593                                            idxRegDst, idxRegSrc, true /*fNativeFlags*/, true /*fInvertCarry*/);
     697    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegResult,
     698                                           idxRegDst, idxRegSrc, true /*fInvertCarry*/);
    594699
    595700    iemNativeRegFreeTmp(pReNative, idxRegResult);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette