VirtualBox

Changeset 103693 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Mar 5, 2024 8:16:32 PM (12 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
162060
Message:

VMM/IEM: Implemented iemNativeEmit_sbb_r_r_efl and enabled it for both hosts. bugref:10376

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h

    r103691 r103693  
    12691269    IEMOP_MNEMONIC2(MR, SBB, sbb, Eb, Gb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES | IEMOPHINT_LOCK_ALLOWED);
    12701270    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1271     IEMOP_BODY_BINARY_rm_r8_RW(bRm, sbb, 0, 0);
     1271    IEMOP_BODY_BINARY_rm_r8_RW(bRm, sbb, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64, 0);
    12721272}
    12731273
     
    12821282    IEMOP_MNEMONIC2(MR, SBB, sbb, Ev, Gv, DISOPTYPE_HARMLESS, IEMOPHINT_LOCK_ALLOWED);
    12831283    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1284     IEMOP_BODY_BINARY_rm_rv_RW(    bRm, sbb, 0, 0);
     1284    IEMOP_BODY_BINARY_rm_rv_RW(    bRm, sbb, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64, 0);
    12851285    IEMOP_BODY_BINARY_rm_rv_LOCKED(bRm, sbb);
    12861286}
     
    12961296    IEMOP_MNEMONIC2(RM, SBB, sbb, Gb, Eb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES);
    12971297    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1298     IEMOP_BODY_BINARY_r8_rm(bRm, sbb, 0);
     1298    IEMOP_BODY_BINARY_r8_rm(bRm, sbb, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64);
    12991299}
    13001300
     
    13091309    IEMOP_MNEMONIC2(RM, SBB, sbb, Gv, Ev, DISOPTYPE_HARMLESS, 0);
    13101310    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    1311     IEMOP_BODY_BINARY_rv_rm(bRm, iemAImpl_sbb_u16, iemAImpl_sbb_u32, iemAImpl_sbb_u64, 1, 0, sbb, 0);
     1311    IEMOP_BODY_BINARY_rv_rm(bRm, iemAImpl_sbb_u16, iemAImpl_sbb_u32, iemAImpl_sbb_u64, 1, 0, sbb, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64);
    13121312}
    13131313
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h

    r103692 r103693  
    287287
    288288            /* The overflow flag is more work. See IEM_EFL_UPDATE_STATUS_BITS_FOR_ARITHMETIC. */
    289             pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg,  idxRegDstIn, idxRegSrc,    false); /* ~((a_uDst) ^ (a_uSrcOf)) */
    290             pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg2, idxRegDstIn, idxRegResult, false); /*  (a_uDst) ^ (a_uResult) */
    291             pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpReg,  idxTmpReg,   idxTmpReg2,   false /*f64Bit*/);
    292             pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg, cOpBits - 1,  false /*f64Bit*/);
    293             pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl,  idxTmpReg, X86_EFL_OF_BIT, 1);
     289            if (fInvertCarry) /* sbb:  ~((a_uDst) ^ ~(a_uSrcOf)) ->  (a_uDst) ^  (a_uSrcOf); HACK ALERT: fInvertCarry == sbb */
     290                pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg,  idxRegDstIn, idxRegSrc,  false);
     291            else              /* adc:  ~((a_uDst) ^ (a_uSrcOf))  ->  (a_uDst) ^ ~(a_uSrcOf) */
     292                pCodeBuf[off++] = Armv8A64MkInstrEon(idxTmpReg,  idxRegDstIn, idxRegSrc,  false);
     293            pCodeBuf[off++] = Armv8A64MkInstrEor(idxTmpReg2,   idxRegDstIn, idxRegResult, false); /*  (a_uDst) ^ (a_uResult) */
     294            pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpReg,    idxTmpReg,   idxTmpReg2,   false /*f64Bit*/);
     295            pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxTmpReg, idxTmpReg,   cOpBits - 1,  false /*f64Bit*/);
     296            pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegEfl,    idxTmpReg,   X86_EFL_OF_BIT, 1);
    294297            iemNativeRegFreeTmp(pReNative, idxTmpReg2);
    295298        }
     
    515518    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    516519
    517     /** @todo Explain why the carry flag shouldn't be inverted for ADDS. */
    518520    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, UINT8_MAX, cOpBits, idxRegDst,
    519521                                           idxRegDstIn, idxRegSrc, false /*fInvertCarry*/);
     
    709711
    710712
     713/**
     714 * The SBB instruction takes CF as input and will set all status flags.
     715 */
    711716DECL_INLINE_THROW(uint32_t)
    712717iemNativeEmit_sbb_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    713718                          uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
     719{
     720    uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/);
     721    uint8_t const idxRegSrc = iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off, true /*fInitialized*/);
     722    uint8_t const idxRegEfl = iemNativeVarRegisterAcquire(pReNative, idxVarEfl, &off, true /*fInitialized*/);
     723
     724#ifdef RT_ARCH_AMD64
     725    /* On AMD64 we use BT to set EFLAGS.CF and then issue an SBB instruction
     726       with matching size to get the correct flags. */
     727    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 9);
     728
     729    /* Use the BT instruction to set CF according to idxRegEfl. */
     730    off = iemNativeEmitAmd64TwoByteModRmInstrRREx(pCodeBuf, off, 0x0f, 0x0b, 0xba, 32 /*cOpBits*/, 4, idxRegEfl);
     731    pCodeBuf[off++] = X86_EFL_CF_BIT;
     732
     733    off = iemNativeEmitAmd64OneByteModRmInstrRREx(pCodeBuf, off, 0x1a, 0x1b, cOpBits, idxRegDst, idxRegSrc);
     734    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     735
     736    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
     737    iemNativeVarRegisterRelease(pReNative, idxVarDst);
     738
     739    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl);
     740
     741#elif defined(RT_ARCH_ARM64)
     742    /* On ARM64 we use the RMIF+CFINV instructions to load PSTATE.CF from
     743       idxRegEfl and then SBCS for the calculation.  We need all inputs and
     744       result for the two flags (AF,PF) that can't be directly derived from
     745       PSTATE.NZCV. */
     746    uint8_t const         idxRegDstIn = iemNativeRegAllocTmp(pReNative, &off);
     747    PIEMNATIVEINSTR const pCodeBuf    = iemNativeInstrBufEnsure(pReNative, off, 5);
     748
     749    pCodeBuf[off++] = Armv8A64MkInstrRmif(idxRegEfl, (X86_EFL_CF_BIT - 1) & 63, RT_BIT_32(1) /*fMask=C*/);
     750    pCodeBuf[off++] = ARMV8_A64_INSTR_CFINV;
     751    off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, idxRegDstIn, idxRegDst);
     752    if (cOpBits >= 32)
     753        pCodeBuf[off++] = Armv8A64MkInstrSbcs(idxRegDst, idxRegDst, idxRegSrc, cOpBits > 32 /*f64Bit*/);
     754    else
     755    {
     756        /* Since we're also adding in the carry flag here, shifting operands up
     757           doesn't work. So, we have to calculate carry & overflow manually. */
     758        pCodeBuf[off++] = Armv8A64MkInstrSbc(idxRegDst, idxRegDst, idxRegSrc, false /*f64Bit*/);
     759        pCodeBuf[off++] = Armv8A64MkInstrSetF8SetF16(idxRegDst, cOpBits > 8); /* NZ are okay, CV aren't.*/
     760    }
     761    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     762
     763    off = iemNativeEmitEFlagsForArithmetic(pReNative, off, UINT8_MAX, idxRegEfl, cOpBits, idxRegDst,
     764                                           idxRegDstIn, idxRegSrc, true /*fInvertCarry*/);
     765
     766    iemNativeRegFreeTmp(pReNative, idxRegDstIn);
     767    iemNativeVarRegisterRelease(pReNative, idxVarSrc);
     768    if (cOpBits < 32)
     769        off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegDst, RT_BIT_32(cOpBits) - 1U);
     770    iemNativeVarRegisterRelease(pReNative, idxVarDst);
     771
     772#else
     773# error "port me"
     774#endif
     775    iemNativeVarRegisterRelease(pReNative, idxVarEfl);
     776    return off;
     777}
     778
     779
     780DECL_INLINE_THROW(uint32_t)
     781iemNativeEmit_imul_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     782                           uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
    714783{
    715784    RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
     
    720789
    721790DECL_INLINE_THROW(uint32_t)
    722 iemNativeEmit_imul_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    723                            uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
     791iemNativeEmit_popcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     792                             uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
    724793{
    725794    RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
     
    730799
    731800DECL_INLINE_THROW(uint32_t)
    732 iemNativeEmit_popcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    733                              uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
     801iemNativeEmit_tzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     802                            uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
    734803{
    735804    RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
     
    740809
    741810DECL_INLINE_THROW(uint32_t)
    742 iemNativeEmit_tzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     811iemNativeEmit_lzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    743812                            uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
    744813{
     
    749818
    750819
    751 DECL_INLINE_THROW(uint32_t)
    752 iemNativeEmit_lzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    753                             uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)
    754 {
    755     RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);
    756     AssertFailed();
    757     return iemNativeEmitBrk(pReNative, off, 0x666);
    758 }
    759 
    760 
    761820#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllN8veEmit_x86_h */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette