VirtualBox

Changeset 101640 in vbox for trunk/src/VBox/VMM/include


Ignore:
Timestamp:
Oct 28, 2023 1:01:28 AM (15 months ago)
Author:
vboxsync
Message:

VMM/IEM: Emit native code for BltIn_CheckIrq. bugref:10371

Location:
trunk/src/VBox/VMM/include
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r101547 r101640  
    929929    {
    930930        /* kIemTbDbgEntryType_ThreadedCall. */
    931         uint32_t    uType      : 4;
    932         uint32_t    uUnused    : 12;
     931        uint32_t    uType       : 4;
     932        /** Set if the call was recompiled to native code, clear if just calling
     933         *  threaded function. */
     934        uint32_t    fRecompiled : 1;
     935        uint32_t    uUnused     : 11;
    933936        /** The threaded call number (IEMTHREADEDFUNCS). */
    934         uint32_t    enmCall    : 16;
     937        uint32_t    enmCall     : 16;
    935938    } ThreadedCall;
    936939
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r101626 r101640  
    263263{
    264264    kIemNativeLabelType_Invalid = 0,
     265    /* Labels w/o data, only once instance per TB: */
    265266    kIemNativeLabelType_Return,
    266267    kIemNativeLabelType_ReturnBreak,
     268    kIemNativeLabelType_NonZeroRetOrPassUp,
     269    kIemNativeLabelType_RaiseGp0,
     270    /* Labels with data, potentially multiple instances per TB: */
    267271    kIemNativeLabelType_If,
    268272    kIemNativeLabelType_Else,
    269273    kIemNativeLabelType_Endif,
    270     kIemNativeLabelType_NonZeroRetOrPassUp,
    271     kIemNativeLabelType_RaiseGp0,
     274    kIemNativeLabelType_CheckIrq,
    272275    kIemNativeLabelType_End
    273276} IEMNATIVELABELTYPE;
     
    338341    kIemNativeGstReg_End
    339342} IEMNATIVEGSTREG;
     343
     344/**
     345 * Intended use statement for iemNativeRegAllocTmpForGuestReg().
     346 */
     347typedef enum IEMNATIVEGSTREGUSE
     348{
     349    /** The usage is read-only, the register holding the guest register
     350     * shadow copy will not be modified by the caller. */
     351    kIemNativeGstRegUse_ReadOnly = 0,
     352    /** The caller will update the guest register (think: PC += cbInstr).
     353     * The guest shadow copy will follow the returned register. */
     354    kIemNativeGstRegUse_ForUpdate,
     355    /** The caller will use the guest register value as input in a calculation
     356     * and the host register will be modified.
     357     * This means that the returned host register will not be marked as a shadow
     358     * copy of the guest register. */
     359    kIemNativeGstRegUse_Calculation
     360} IEMNATIVEGSTREGUSE;
    340361
    341362/**
     
    591612    /** Condition sequence number (for generating unique labels). */
    592613    uint16_t                    uCondSeqNo;
    593     uint32_t                    uPadding3;
     614    /** Check IRQ seqeunce number (for generating unique lables). */
     615    uint16_t                    uCheckIrqSeqNo;
     616    uint16_t                    uPadding3;
    594617
    595618    /** Core state requiring care with branches. */
     
    624647#define IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(a_Name) FNIEMNATIVERECOMPFUNC a_Name
    625648
    626 
    627649DECLHIDDEN(uint32_t)        iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
    628650                                                 uint32_t offWhere = UINT32_MAX, uint16_t uData = 0) RT_NOEXCEPT;
     
    637659DECLHIDDEN(uint8_t)         iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm,
    638660                                                    bool fPreferVolatile = true) RT_NOEXCEPT;
    639 DECLHIDDEN(uint8_t)         iemNativeRegAllocTmpForGuest(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    640                                                          IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT;
     661DECLHIDDEN(uint8_t)         iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
     662                                                            IEMNATIVEGSTREG enmGstReg,
     663                                                            IEMNATIVEGSTREGUSE enmIntendedUse) RT_NOEXCEPT;
     664DECLHIDDEN(uint8_t)         iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
     665                                                                            IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT;
     666
    641667DECLHIDDEN(uint8_t)         iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar) RT_NOEXCEPT;
    642668DECLHIDDEN(uint32_t)        iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs) RT_NOEXCEPT;
     
    970996        pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
    971997    pbCodeBuf[off++] = 0x8b;
    972     off = iemNativeEmitGprByVCpuDisp(pbCodeBuf,off,iGpr, offVCpu);
     998    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off,iGpr, offVCpu);
    973999    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    9741000
     
    12631289 * Emits a load effective address to a GRP with an BP relative source address.
    12641290 */
    1265 DECLINLINE(uint32_t) iemNativeEmitLeaGrpByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
     1291DECLINLINE(uint32_t) iemNativeEmitLeaGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
    12661292{
    12671293    /* lea gprdst, [rbp + offDisp] */
     
    13741400
    13751401
     1402#ifdef RT_ARCH_AMD64
     1403/**
     1404 * Common bit of iemNativeEmitLoadGprByGpr and friends.
     1405 */
     1406DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByGprDisp(uint8_t *pbCodeBuf, uint32_t off,
     1407                                                      uint8_t iGprReg, uint8_t iGprBase, int32_t offDisp)
     1408{
     1409    if (offDisp == 0 && (iGprBase & 7) != X86_GREG_xBP) /* Can use encoding w/o displacement field. */
     1410    {
     1411        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, iGprReg & 7, iGprBase & 7);
     1412        if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
     1413            pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
     1414    }
     1415    else if (offDisp == (int8_t)offDisp)
     1416    {
     1417        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, iGprBase & 7);
     1418        if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
     1419            pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
     1420        pbCodeBuf[off++] = (uint8_t)offDisp;
     1421    }
     1422    else
     1423    {
     1424        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, iGprBase & 7);
     1425        if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
     1426            pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
     1427        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
     1428        pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
     1429        pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
     1430        pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
     1431    }
     1432    return off;
     1433}
     1434#elif RT_ARCH_ARM64
     1435/**
     1436 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
     1437 */
     1438DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByGprLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,
     1439                                                      uint8_t iGprBase, int32_t offDisp,
     1440                                                      ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
     1441{
     1442    /*
     1443     * There are a couple of ldr variants that takes an immediate offset, so
     1444     * try use those if we can, otherwise we have to use the temporary register
     1445     * help with the addressing.
     1446     */
     1447    if ((uint32_t)offDisp < _4K * cbData && !((uint32_t)offDisp & (cbData - 1)))
     1448    {
     1449        /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
     1450        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1451        AssertReturn(pu32CodeBuf, UINT32_MAX);
     1452        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, iGprBase, (uint32_t)offDisp / cbData);
     1453    }
     1454    else
     1455    {
     1456        /* The offset is too large, so we must load it into a register and use
     1457           ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
     1458        /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
     1459        uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, off, (uint64)offDisp);
     1460        AssertReturn(idxTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
     1461
     1462        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1463        AssertReturn(pu32CodeBuf, UINT32_MAX);
     1464        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, iGprBase, idxTmpReg);
     1465
     1466        iemNativeRegFreeTmpImm(pReNative, idxTmpReg);
     1467    }
     1468    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1469    return off;
     1470}
     1471#endif
     1472
     1473
     1474/**
     1475 * Emits a 64-bit GPR load via a GPR base address with a displacement.
     1476 */
     1477DECLINLINE(uint32_t) iemNativeEmitLoadGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1478                                               uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp)
     1479{
     1480#ifdef RT_ARCH_AMD64
     1481    /* mov reg64, mem64 */
     1482    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     1483    AssertReturn(pbCodeBuf, UINT32_MAX);
     1484    pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
     1485    pbCodeBuf[off++] = 0x8b;
     1486    off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprBase, offDisp);
     1487    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1488
     1489#elif RT_ARCH_ARM64
     1490    off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
     1491
     1492#else
     1493# error "port me"
     1494#endif
     1495    return off;
     1496}
     1497
     1498
     1499/**
     1500 * Emits a 32-bit GPR load via a GPR base address with a displacement.
     1501 * @note Bits 63 thru 32 in @a iGprDst will be cleared.
     1502 */
     1503DECLINLINE(uint32_t) iemNativeEmitLoadGpr32ByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1504                                                 uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp)
     1505{
     1506#ifdef RT_ARCH_AMD64
     1507    /* mov reg32, mem32 */
     1508    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     1509    AssertReturn(pbCodeBuf, UINT32_MAX);
     1510    if (iGprDst >= 8 || iGprBase >= 8)
     1511        pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
     1512    pbCodeBuf[off++] = 0x8b;
     1513    off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprBase, offDisp);
     1514    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1515
     1516#elif RT_ARCH_ARM64
     1517    off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
     1518
     1519#else
     1520# error "port me"
     1521#endif
     1522    return off;
     1523}
     1524
     1525
    13761526/*********************************************************************************************************************************
    13771527*   Subtraction and Additions                                                                                                    *
     
    16831833/**
    16841834 * Emits code for AND'ing two 64-bit GPRs.
    1685  */
    1686 DECLINLINE(uint32_t ) iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
     1835 *
     1836 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64
     1837 *       and ARM64 hosts.
     1838 */
     1839DECLINLINE(uint32_t ) iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc,
     1840                                               bool fSetFlags = false)
    16871841{
    16881842#if defined(RT_ARCH_AMD64)
     
    16931847    pbCodeBuf[off++] = 0x23;
    16941848    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
     1849    RT_NOREF(fSetFlags);
    16951850
    16961851#elif defined(RT_ARCH_ARM64)
    16971852    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    16981853    AssertReturn(pu32CodeBuf, UINT32_MAX);
    1699     pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc);
     1854    if (!fSetFlags)
     1855        pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc);
     1856    else
     1857        pu32CodeBuf[off++] = Armv8A64MkInstrAnds(iGprDst, iGprDst, iGprSrc);
    17001858
    17011859#else
     
    17351893
    17361894/**
    1737  * Emits code for AND'ing an 32-bit GPRs with a constant.
    1738  */
    1739 DECLINLINE(uint32_t ) iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm)
     1895 * Emits code for AND'ing a 64-bit GPRs with a constant.
     1896 *
     1897 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64
     1898 *       and ARM64 hosts.
     1899 */
     1900DECLINLINE(uint32_t ) iemNativeEmitAndGprByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint64_t uImm,
     1901                                               bool fSetFlags = false)
    17401902{
    17411903#if defined(RT_ARCH_AMD64)
    1742     /* and Ev, imm */
    1743     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
    1744     AssertReturn(pbCodeBuf, UINT32_MAX);
    1745     if (iGprDst >= 8)
    1746         pbCodeBuf[off++] = X86_OP_REX_R;
    1747     if (uImm < 128)
    1748     {
     1904    if ((int64_t)uImm == (int8_t)uImm)
     1905    {
     1906        /* and Ev, imm8 */
     1907        uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
     1908        AssertReturn(pbCodeBuf, UINT32_MAX);
     1909        pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R);
    17491910        pbCodeBuf[off++] = 0x83;
    17501911        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
    17511912        pbCodeBuf[off++] = (uint8_t)uImm;
    17521913    }
    1753     else
    1754     {
     1914    else if ((int64_t)uImm == (int32_t)uImm)
     1915    {
     1916        /* and Ev, imm32 */
     1917        uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     1918        AssertReturn(pbCodeBuf, UINT32_MAX);
     1919        pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R);
    17551920        pbCodeBuf[off++] = 0x81;
    17561921        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
     
    17601925        pbCodeBuf[off++] = RT_BYTE4(uImm);
    17611926    }
     1927    else
     1928    {
     1929        /* Use temporary register for the 64-bit immediate. */
     1930        uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
     1931        AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
     1932        off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg);
     1933        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
     1934    }
     1935    RT_NOREF(fSetFlags);
    17621936
    17631937#elif defined(RT_ARCH_ARM64)
     
    17681942        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    17691943        AssertReturn(pu32CodeBuf, UINT32_MAX);
    1770         pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
    1771     }
    1772     else
    1773     {
    1774         /* Use temporary register for the immediate. */
     1944        if (!fSetFlags)
     1945            pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR);
     1946        else
     1947            pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR);
     1948    }
     1949    else
     1950    {
     1951        /* Use temporary register for the 64-bit immediate. */
    17751952        uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
    17761953        AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
    1777 
    1778         /* and gprdst, gprdst, tmpreg */
     1954        off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg, fSetFlags);
     1955        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
     1956    }
     1957
     1958#else
     1959# error "Port me"
     1960#endif
     1961    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1962    return off;
     1963}
     1964
     1965
     1966/**
     1967 * Emits code for AND'ing an 32-bit GPRs with a constant.
     1968 */
     1969DECLINLINE(uint32_t ) iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm,
     1970                                                 bool fSetFlags = false)
     1971{
     1972#if defined(RT_ARCH_AMD64)
     1973    /* and Ev, imm */
     1974    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     1975    AssertReturn(pbCodeBuf, UINT32_MAX);
     1976    if (iGprDst >= 8)
     1977        pbCodeBuf[off++] = X86_OP_REX_R;
     1978    if ((int32_t)uImm == (int8_t)uImm)
     1979    {
     1980        pbCodeBuf[off++] = 0x83;
     1981        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
     1982        pbCodeBuf[off++] = (uint8_t)uImm;
     1983    }
     1984    else
     1985    {
     1986        pbCodeBuf[off++] = 0x81;
     1987        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
     1988        pbCodeBuf[off++] = RT_BYTE1(uImm);
     1989        pbCodeBuf[off++] = RT_BYTE2(uImm);
     1990        pbCodeBuf[off++] = RT_BYTE3(uImm);
     1991        pbCodeBuf[off++] = RT_BYTE4(uImm);
     1992    }
     1993    RT_NOREF(fSetFlags);
     1994
     1995#elif defined(RT_ARCH_ARM64)
     1996    uint32_t uImmR     = 0;
     1997    uint32_t uImmNandS = 0;
     1998    if (Armv8A64ConvertMaskToImmRImmS(uImm, &uImmNandS, &uImmR))
     1999    {
    17792000        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    17802001        AssertReturn(pu32CodeBuf, UINT32_MAX);
    1781         pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iTmpReg, false /*f64Bit*/);
    1782 
     2002        if (!fSetFlags)
     2003            pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
     2004        else
     2005            pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
     2006    }
     2007    else
     2008    {
     2009        /* Use temporary register for the 64-bit immediate. */
     2010        uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
     2011        AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
     2012        if (!fSetFlags)
     2013            off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg);
     2014        else
     2015            off = iemNativeEmitAndsGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg);
    17832016        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
    17842017    }
     
    27242957        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
    27252958    }
    2726     else
     2959    else if (fBits <= UINT32_MAX)
    27272960    {
    27282961        /* test Eb, imm8 or test Ev, imm32 */
     
    27472980        }
    27482981    }
     2982    /** @todo implement me. */
     2983    else
     2984        AssertFailedReturn(UINT32_MAX);
    27492985
    27502986#elif defined(RT_ARCH_ARM64)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette