VirtualBox

Changeset 101304 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Sep 29, 2023 1:02:02 AM (16 months ago)
Author:
vboxsync
Message:

VMM/IEM: Emit native code for pure defer-CImpl instructions. Tested on linux.amd64 only and this doesn't cover cases with zero parameters. bugref:10371

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

    r100787 r101304  
    98899889 *
    98909890 * @param   iStReg              The other stack register.
    9891  * @param   pfnAImpl            The assembly comparison implementation.
     9891 * @param   fUCmp               true for FUCOMI[P], false for FCOMI[P].
    98929892 * @param   uPopAndFpuOpcode    Bits 15-0: The FPU opcode.
    98939893 *                              Bit  31: Whether we should pop the stack when
    98949894 *                              done or not.
    98959895 */
    9896 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode)
     9896IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode)
    98979897{
    98989898    Assert(iStReg < 8);
     
    99189918    if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
    99199919    {
    9920         uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
     9920        uint32_t u32Eflags;
     9921        if (!fUCmp)
     9922            u32Eflags = iemAImpl_fcomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
     9923        else
     9924            u32Eflags = iemAImpl_fucomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
    99219925
    99229926        pFpuCtx->FSW &= ~X86_FSW_C1;
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h

    r100858 r101304  
    1053810538    IEMOP_MNEMONIC(fucomi_st0_stN, "fucomi st0,stN");
    1053910539    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    10540                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fucomi_r80_by_r80,
     10540                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), true /*fUCmp*/,
    1054110541                                0 /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1054210542}
     
    1054810548    IEMOP_MNEMONIC(fcomi_st0_stN, "fcomi st0,stN");
    1054910549    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    10550                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,
     10550                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/,
    1055110551                                false /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1055210552}
     
    1146711467    IEMOP_MNEMONIC(fucomip_st0_stN, "fucomip st0,stN");
    1146811468    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    11469                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,
     11469                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/,
    1147011470                                RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1147111471}
     
    1147711477    IEMOP_MNEMONIC(fcomip_st0_stN, "fcomip st0,stN");
    1147811478    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    11479                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,
     11479                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/,
    1148011480                                RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1148111481}
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py

    r101275 r101304  
    3737__version__ = "$Revision$"
    3838
    39 # Standard python imports.
     39# Standard python imports:
    4040#import sys;
    4141
    42 #import IEMAllInstPython as iai;
     42# Out python imports:
     43import IEMAllInstPython as iai;
    4344
    4445
     
    4748    Class that deals with transforming a threaded function variation into a
    4849    native recompiler function.
     50
     51    This base class doesn't do any transforming and just renders the same
     52    code as for the threaded function.
    4953    """
    5054
     
    5862        (for the selected host architecture).
    5963        """
    60         return False;
     64        return True;
    6165
    6266    def renderCode(self, cchIndent):
     
    6468        Returns the native recompiler function body for this threaded variant.
    6569        """
    66         return ' ' * cchIndent + '    AssertFailed();';
     70        aoStmts = self.oVariation.aoStmtsForThreadedFunction # type: list(McStmt)
     71        return iai.McStmt.renderCodeForList(aoStmts, cchIndent);
    6772
    6873
     
    7883    """
    7984
    80     _ = oVariation;
    81     _ = sHostArch;
     85    #
     86    # Analyze the statements.
     87    #
     88    aoStmts = oVariation.aoStmtsForThreadedFunction # type: list(McStmt)
     89
     90    # The simplest case are the IEM_MC_DEFER_TO_CIMPL_*_RET_THREADED ones, just pass them thru:
     91    if (    len(aoStmts) == 1
     92        and aoStmts[0].sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
     93        and aoStmts[0].sName.endswith('_RET_THREADED')
     94        and sHostArch in ('amd64',)):
     95        return NativeRecompFunctionVariation(oVariation, sHostArch);
    8296
    8397    return None;
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r101275 r101304  
    14051405
    14061406/**
     1407 * Emits a call to a CImpl function or something similar.
     1408 */
     1409static int32_t iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1410                                      uintptr_t pfnCImpl, uint8_t cbInstr, uint8_t cAddParams,
     1411                                      uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
     1412{
     1413#ifdef VBOX_STRICT
     1414    off = iemNativeEmitMarker(pReNative, off);
     1415    AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1416#endif
     1417
     1418    /*
     1419     * Load the parameters.
     1420     */
     1421#if defined(RT_OS_WINDOWS) && defined(VBOXSTRICTRC_STRICT_ENABLED)
     1422    /* Special code the hidden VBOXSTRICTRC pointer. */
     1423    off = iemNativeEmitLoadGprFromGpr(  pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     1424    off = iemNativeEmitLoadGprImm64(    pReNative, off, IEMNATIVE_CALL_ARG2_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
     1425    if (cAddParams > 0)
     1426        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam0);
     1427    if (cAddParams > 1)
     1428        off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam1);
     1429    if (cAddParams > 2)
     1430        off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG1, uParam2);
     1431    off = iemNativeEmitLeaGrpByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
     1432
     1433#else
     1434    AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4);
     1435    off = iemNativeEmitLoadGprFromGpr(  pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     1436    off = iemNativeEmitLoadGprImm64(    pReNative, off, IEMNATIVE_CALL_ARG1_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
     1437    if (cAddParams > 0)
     1438        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, uParam0);
     1439    if (cAddParams > 1)
     1440        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam1);
     1441    if (cAddParams > 2)
     1442# if IEMNATIVE_CALL_ARG_GREG_COUNT >= 5
     1443        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG4_GREG, uParam2);
     1444# else
     1445        off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam2);
     1446# endif
     1447#endif
     1448    AssertReturn(off != UINT32_MAX, off);
     1449
     1450    /*
     1451     * Make the call.
     1452     */
     1453#ifdef RT_ARCH_AMD64
     1454    off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, pfnCImpl);
     1455
     1456    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     1457    AssertReturn(pbCodeBuf, UINT32_MAX);
     1458    pbCodeBuf[off++] = 0xff;                    /* call rax */
     1459    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
     1460
     1461# if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS)
     1462    off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
     1463# endif
     1464
     1465#elif defined(RT_ARCH_ARM64)
     1466    off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, pfnCImpl);
     1467
     1468    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1469    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1470    pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0);
     1471
     1472#else
     1473# error "Port me!"
     1474#endif
     1475
     1476    /*
     1477     * Check the status code.
     1478     */
     1479    return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr);
     1480}
     1481
     1482
     1483/**
    14071484 * Emits a call to a threaded worker function.
    14081485 */
     
    15241601
    15251602/**
    1526  * Emits a standard epilog.
     1603 * Emits the RC fiddling code for handling non-zero return code or rcPassUp.
    15271604 */
    15281605static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
     
    18081885
    18091886
     1887DECLINLINE(uint32_t) iemNativeEmitCImplCall1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1888                                             uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0)
     1889{
     1890    return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 1, uArg0, 0, 0);
     1891}
     1892
     1893
     1894DECLINLINE(uint32_t) iemNativeEmitCImplCall2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1895                                             uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1)
     1896{
     1897    return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 2, uArg0, uArg1, 0);
     1898}
     1899
     1900
     1901DECLINLINE(uint32_t) iemNativeEmitCImplCall3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1902                                             uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1, uint64_t uArg2)
     1903{
     1904    return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 3, uArg0, uArg1, uArg2);
     1905}
     1906
     1907
     1908/*
     1909 * MC definitions for the native recompiler.
     1910 */
     1911
     1912#define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl) \
     1913    return iemNativeEmitCImplCall0(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr) /** @todo not used ... */
     1914
     1915#define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0) \
     1916    return iemNativeEmitCImplCall1(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0)
     1917
     1918#define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1) \
     1919    return iemNativeEmitCImplCall2(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1)
     1920
     1921#define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2) \
     1922    return iemNativeEmitCImplCall3(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1, a2)
     1923
     1924
     1925
    18101926/*
    18111927 * Include g_apfnIemNativeRecompileFunctions and associated functions.
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r101262 r101304  
    50925092IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
    50935093IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
    5094 IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode);
     5094IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
    50955095/** @} */
    50965096
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r101275 r101304  
    114114/** @def IEMNATIVE_REG_FIXED_PVMCPU
    115115 * The register number hold in pVCpu pointer.  */
     116/** @def IEMNATIVE_REG_FIXED_TMP0
     117 * Dedicated temporary register.
     118 * @todo replace this by a register allocator and content tracker.  */
    116119#ifdef RT_ARCH_AMD64
    117120# define IEMNATIVE_REG_FIXED_PVMCPU         X86_GREG_xBX
     121# define IEMNATIVE_REG_FIXED_TMP0           X86_GREG_x11
    118122
    119123#elif defined(RT_ARCH_ARM64)
    120124# define IEMNATIVE_REG_FIXED_PVMCPU         ARMV8_A64_REG_X28
    121 /** Dedicated temporary register.
    122  * @todo replace this by a register allocator and content tracker.  */
    123125# define IEMNATIVE_REG_FIXED_TMP0           ARMV8_A64_REG_X15
    124126
     
    584586    /* mov gprdst, qword [rbp + offDisp]  */
    585587    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     588    AssertReturn(pbCodeBuf, UINT32_MAX);
    586589    if (iGprDst < 8)
    587590        pbCodeBuf[off++] = X86_OP_REX_W;
     
    602605    /* mov gprdst, dword [rbp + offDisp]  */
    603606    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     607    AssertReturn(pbCodeBuf, UINT32_MAX);
    604608    if (iGprDst >= 8)
    605609        pbCodeBuf[off++] = X86_OP_REX_R;
     
    618622    /* lea gprdst, [rbp + offDisp] */
    619623    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     624    AssertReturn(pbCodeBuf, UINT32_MAX);
    620625    if (iGprDst < 8)
    621626        pbCodeBuf[off++] = X86_OP_REX_W;
     
    628633
    629634
    630 #ifdef RT_ARCH_AMD64
    631635/**
    632636 * Emits a 64-bit GPR store with an BP relative destination address.
     637 *
     638 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
    633639 */
    634640DECLINLINE(uint32_t) iemNativeEmitStoreGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iGprSrc)
    635641{
     642#ifdef RT_ARCH_AMD64
    636643    /* mov qword [rbp + offDisp], gprdst */
    637644    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     645    AssertReturn(pbCodeBuf, UINT32_MAX);
    638646    if (iGprSrc < 8)
    639647        pbCodeBuf[off++] = X86_OP_REX_W;
     
    642650    pbCodeBuf[off++] = 0x89;
    643651    return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprSrc, offDisp);
    644 }
    645 #endif
     652
     653#elif defined(RT_ARCH_ARM64)
     654    if (offDisp >= 0 && offDisp < 4096 * 8 && !((uint32_t)offDisp & 7))
     655    {
     656        /* str w/ unsigned imm12 (scaled) */
     657        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     658        AssertReturn(pu32CodeBuf, UINT32_MAX);
     659        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, iGprSrc,
     660                                                      ARMV8_A64_BP, (uint32_t)offDisp / 8);
     661    }
     662    else if (offDisp >= -256 offDisp <= 256)
     663    {
     664        /* stur w/ signed imm9 (unscaled) */
     665        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     666        AssertReturn(pu32CodeBuf, UINT32_MAX);
     667        pu32CodeBuf[off++] = Armv8A64MkInstrSturLdur(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_BP, offDisp);
     668    }
     669    else
     670    {
     671        /* Use temporary indexing register. */
     672        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uint32_t)offDisp);
     673        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     674        AssertReturn(pu32CodeBuf, UINT32_MAX);
     675        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_BP,
     676                                                       IEMNATIVE_REG_FIXDE_TMP0, kArmv8A64InstrLdStExtend_Sxtw);
     677    }
     678    return off;
     679
     680#else
     681# error "Port me!"
     682#endif
     683}
     684
     685
     686/**
     687 * Emits a 64-bit immediate store with an BP relative destination address.
     688 *
     689 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
     690 */
     691DECLINLINE(uint32_t) iemNativeEmitStoreImm64ByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint64_t uImm64)
     692{
     693#ifdef RT_ARCH_AMD64
     694    if ((int64_t)uImm64 == (int32_t)uImm64)
     695    {
     696        /* mov qword [rbp + offDisp], imm32 - sign extended */
     697        uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 11);
     698        AssertReturn(pbCodeBuf, UINT32_MAX);
     699
     700        pbCodeBuf[off++] = X86_OP_REX_W;
     701        pbCodeBuf[off++] = 0xc7;
     702        if (offDisp < 128 && offDisp >= -128)
     703        {
     704            pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, 0, X86_GREG_xBP);
     705            pbCodeBuf[off++] = (uint8_t)offDisp;
     706        }
     707        else
     708        {
     709            pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, 0, X86_GREG_xBP);
     710            pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
     711            pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
     712            pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
     713            pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
     714        }
     715        pbCodeBuf[off++] = RT_BYTE1(uImm64);
     716        pbCodeBuf[off++] = RT_BYTE2(uImm64);
     717        pbCodeBuf[off++] = RT_BYTE3(uImm64);
     718        pbCodeBuf[off++] = RT_BYTE4(uImm64);
     719        return off;
     720    }
     721#endif
     722
     723    /* Load tmp0, imm64; Store tmp to bp+disp. */
     724    off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, uImm64);
     725    return iemNativeEmitStoreGprByBp(pReNative, off, offDisp, IEMNATIVE_REG_FIXED_TMP0);
     726}
    646727
    647728
     
    654735    /* sub gprdst, imm8/imm32 */
    655736    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     737    AssertReturn(pbCodeBuf, UINT32_MAX);
    656738    if (iGprDst < 7)
    657739        pbCodeBuf[off++] = X86_OP_REX_W;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette