VirtualBox

Changeset 102699 in vbox for trunk/src


Ignore:
Timestamp:
Dec 25, 2023 10:22:01 PM (14 months ago)
Author:
vboxsync
Message:

VMM/IEM: Native translation of BODY_LOAD_TLB_AFTER_BRANCH. (only tested on amd64) bugref:10371

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp

    r102695 r102699  
    6969 * Used by TB code to deal with a TLB miss for a new page.
    7070 */
    71 IEM_DECL_NATIVE_HLP_DEF(RTGCPHYS, iemNativeHlpMemCodeNewPageTlbMiss,(PVMCPUCC pVCpu, uint8_t offInstr))
     71IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpMemCodeNewPageTlbMiss,(PVMCPUCC pVCpu))
     72{
     73    pVCpu->iem.s.pbInstrBuf       = NULL;
     74    pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE;
     75    pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE;
     76    iemOpcodeFetchBytesJmp(pVCpu, 0, NULL);
     77    if (pVCpu->iem.s.pbInstrBuf)
     78    { /* likely */ }
     79    else
     80    {
     81        IEM_DO_LONGJMP(pVCpu, VINF_IEM_REEXEC_BREAK);
     82    }
     83}
     84
     85
     86/**
     87 * Used by TB code to deal with a TLB miss for a new page.
     88 */
     89IEM_DECL_NATIVE_HLP_DEF(RTGCPHYS, iemNativeHlpMemCodeNewPageTlbMissWithOff,(PVMCPUCC pVCpu, uint8_t offInstr))
    7290{
    7391    pVCpu->iem.s.pbInstrBuf       = NULL;
     
    196214    /* If we end up with ZERO in idxTmpReg there is nothing to do.*/
    197215    uint32_t const offFixupJumpToVmCheck1 = off;
    198     off = iemNativeEmitJzToFixed(pReNative, off, 0);
     216    off = iemNativeEmitJzToFixed(pReNative, off, off /* ASSUME jz rel8 suffices */);
    199217
    200218    /* Some relevant FFs are set, but if's only APIC or/and PIC being set,
     
    11301148# ifdef VBOX_STRICT /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); */
    11311149    off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp2, X86_PAGE_OFFSET_MASK, true /*fSetFlags*/);
    1132     off = iemNativeEmitJzToFixed(pReNative, off, 1);
     1150    off = iemNativeEmitJzToFixed(pReNative, off, off + 1 /* correct for ARM64 */);
    11331151    off = iemNativeEmitBrk(pReNative, off, 0x2005);
    11341152# endif
     
    11881206
    11891207    /*
    1190      * TLB miss: Call iemNativeHlpMemCodeNewPageTlbMiss to do the work.
     1208     * TLB miss: Call iemNativeHlpMemCodeNewPageTlbMissWithOff to do the work.
    11911209     */
    11921210    iemNativeLabelDefine(pReNative, idxLabelTlbMiss, off);
     
    11991217
    12001218    /* Done setting up parameters, make the call. */
    1201     off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMiss);
     1219    off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMissWithOff);
    12021220
    12031221    /* Move the result to the right register. */
     
    12151233
    12161234    iemNativeRegFreeTmp(pReNative, idxRegGCPhys);
     1235    return off;
     1236}
     1237
     1238
     1239/**
     1240 * Macro that implements TLB loading and updating pbInstrBuf updating when
     1241 * branching or when crossing a page on an instruction boundrary.
     1242 *
     1243 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
     1244 * it is an inter-page branch and also check the page offset.
     1245 *
     1246 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
     1247 */
     1248#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) \
     1249    RT_NOREF(a_cbInstr); \
     1250    off = iemNativeEmitBltLoadTlbAfterBranch(pReNative, off, pTb, a_idxRange)
     1251
     1252#if 0
     1253do { \
     1254        /* Is RIP within the current code page? */ \
     1255        Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
     1256        uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
     1257        uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
     1258        if (off < pVCpu->iem.s.cbInstrBufTotal) \
     1259        { \
     1260            Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
     1261            Assert(pVCpu->iem.s.pbInstrBuf); \
     1262            RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
     1263                                                     | pTb->aRanges[(a_idxRange)].offPhysPage; \
     1264            if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
     1265            { /* we're good */ } \
     1266            else \
     1267            { \
     1268                Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
     1269                      (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
     1270                      pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
     1271                RT_NOREF(a_cbInstr); \
     1272                STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
     1273                return VINF_IEM_REEXEC_BREAK; \
     1274            } \
     1275        } \
     1276        else \
     1277        { \
     1278            /* Must translate new RIP. */ \
     1279            pVCpu->iem.s.pbInstrBuf       = NULL; \
     1280            pVCpu->iem.s.offCurInstrStart = 0; \
     1281            pVCpu->iem.s.offInstrNextByte = 0; \
     1282            iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
     1283            Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
     1284            \
     1285            RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
     1286                                                     | pTb->aRanges[(a_idxRange)].offPhysPage; \
     1287            uint64_t const offNew                    = uPc - pVCpu->iem.s.uInstrBufPc; \
     1288            if (   GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
     1289                && pVCpu->iem.s.pbInstrBuf) \
     1290            { /* likely */ } \
     1291            else \
     1292            { \
     1293                Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
     1294                      (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
     1295                      pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
     1296                RT_NOREF(a_cbInstr); \
     1297                STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
     1298                return VINF_IEM_REEXEC_BREAK; \
     1299            } \
     1300        } \
     1301    } while(0)
     1302#endif
     1303
     1304DECL_FORCE_INLINE(uint32_t)
     1305iemNativeEmitBltLoadTlbAfterBranch(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTB pTb, uint8_t idxRange)
     1306{
     1307//    off = iemNativeEmitBrk(pReNative, off, 0x1010);
     1308#ifdef VBOX_STRICT
     1309    off = iemNativeEmitMarker(pReNative, off, 0x80000006);
     1310#endif
     1311
     1312    /*
     1313     * Define labels and allocate the register for holding the GCPhys of the new page.
     1314     */
     1315    uint32_t const idxLabelCheckBranchMiss = iemNativeLabelCreate(pReNative, kIemNativeLabelType_CheckBranchMiss);
     1316    uint16_t const uTlbSeqNo        = pReNative->uTlbSeqNo++;
     1317    uint32_t const idxLabelTlbMiss  = iemNativeLabelCreate(pReNative, kIemNativeLabelType_TlbMiss, UINT32_MAX, uTlbSeqNo);
     1318    //
     1319
     1320    RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(pTb, idxRange)
     1321                                             | pTb->aRanges[idxRange].offPhysPage;
     1322
     1323    /*
     1324     *
     1325     * First check if RIP is within the current code.
     1326     *
     1327     * This is very similar to iemNativeEmitBltInCheckPcAfterBranch, the only
     1328     * difference is what we do when stuff doesn't match up.
     1329     *
     1330     * What we to do is.
     1331     *      1. Calculate the FLAT PC (RIP + CS.BASE).
     1332     *      2. Subtract iem.s.uInstrBufPc from it and getting 'off'.
     1333     *      3. The 'off' must be less than X86_PAGE_SIZE/cbInstrBufTotal or
     1334     *         we need to retranslate RIP via the TLB.
     1335     *      4. Add 'off' to iem.s.GCPhysInstrBuf and compare with the
     1336     *         GCPhysRangePageWithOffset constant mentioned above.
     1337     *
     1338     * The adding of CS.BASE to RIP can be skipped in the first step if we're
     1339     * in 64-bit code or flat 32-bit.
     1340     *
     1341     */
     1342
     1343    /* Allocate registers for step 1. Get the shadowed stuff before allocating
     1344       the temp register, so we don't accidentally clobber something we'll be
     1345       needing again immediately.  This is why we get idxRegCsBase here. */
     1346    /** @todo save+restore active registers and guest shadows in tlb-miss! */
     1347    off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */);
     1348    uint8_t const  idxRegPc     = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
     1349                                                                  kIemNativeGstRegUse_ReadOnly, true /*fNoVolatileRegs*/);
     1350    uint8_t const  idxRegCsBase = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) ? UINT8_MAX
     1351                                : iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
     1352                                                                 kIemNativeGstRegUse_ReadOnly, true /*fNoVolatileRegs*/);
     1353
     1354    uint8_t const  idxRegTmp    = iemNativeRegAllocTmp(pReNative, &off); /* volatile reg is okay for these two */
     1355    uint8_t const  idxRegTmp2   = iemNativeRegAllocTmp(pReNative, &off);
     1356
     1357#ifdef VBOX_STRICT
     1358    /* Do assertions before idxRegTmp contains anything. */
     1359    Assert(RT_SIZEOFMEMB(VMCPUCC, iem.s.cbInstrBufTotal) == sizeof(uint16_t));
     1360# ifdef RT_ARCH_AMD64
     1361    {
     1362        uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8+2+1 + 11+2+1);
     1363        /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
     1364        if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
     1365        {
     1366            /* cmp r/m64, imm8 */
     1367            pbCodeBuf[off++] = X86_OP_REX_W;
     1368            pbCodeBuf[off++] = 0x83;
     1369            off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 7, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
     1370            pbCodeBuf[off++] = 0;
     1371            /* je rel8 */
     1372            pbCodeBuf[off++] = 0x74;
     1373            pbCodeBuf[off++] = 1;
     1374            /* int3 */
     1375            pbCodeBuf[off++] = 0xcc;
     1376
     1377        }
     1378
     1379        /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); - done later by the non-x86 code */
     1380        /* test r/m64, imm32 */
     1381        pbCodeBuf[off++] = X86_OP_REX_W;
     1382        pbCodeBuf[off++] = 0xf7;
     1383        off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 0, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
     1384        pbCodeBuf[off++] = RT_BYTE1(X86_PAGE_OFFSET_MASK);
     1385        pbCodeBuf[off++] = RT_BYTE2(X86_PAGE_OFFSET_MASK);
     1386        pbCodeBuf[off++] = RT_BYTE3(X86_PAGE_OFFSET_MASK);
     1387        pbCodeBuf[off++] = RT_BYTE4(X86_PAGE_OFFSET_MASK);
     1388        /* jz rel8 */
     1389        pbCodeBuf[off++] = 0x74;
     1390        pbCodeBuf[off++] = 1;
     1391        /* int3 */
     1392        pbCodeBuf[off++] = 0xcc;
     1393        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1394    }
     1395# else
     1396
     1397    /* Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_F_MODE_X86_IS_FLAT(pReNative->fExec)); */
     1398    if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
     1399    {
     1400        off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, cpum.GstCtx.cs.u64Base));
     1401# ifdef RT_ARCH_ARM64
     1402        uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     1403        pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 2, idxRegTmp);
     1404        pu32CodeBuf[off++] = Armv8A64MkInstrBrk(0x2006);
     1405        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1406# else
     1407#  error "Port me!"
     1408# endif
     1409    }
     1410# endif
     1411
     1412#endif /* VBOX_STRICT */
     1413
     1414    /* Because we're lazy, we'll jump back here to recalc 'off' and share the
     1415       GCPhysRangePageWithOffset check.  This is a little risky, so we use the
     1416       2nd register to check if we've looped more than once already.*/
     1417    off = iemNativeEmitGprZero(pReNative, off, idxRegTmp2);
     1418
     1419    uint32_t const offLabelRedoChecks = off;
     1420
     1421    /* 1+2. Calculate 'off' first (into idxRegTmp). */
     1422    off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.uInstrBufPc));
     1423    if (IEM_F_MODE_X86_IS_FLAT(pReNative->fExec))
     1424    {
     1425#ifdef RT_ARCH_ARM64
     1426        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1427        pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegPc, idxRegTmp);
     1428        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1429#else
     1430        off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
     1431        off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
     1432#endif
     1433    }
     1434    else
     1435    {
     1436#ifdef RT_ARCH_ARM64
     1437        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     1438        pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegTmp, idxRegCsBase, idxRegTmp);
     1439        pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegPc);
     1440        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1441#else
     1442        off = iemNativeEmitNegGpr(pReNative, off, idxRegTmp);
     1443        off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegCsBase);
     1444        off = iemNativeEmitAddTwoGprs(pReNative, off, idxRegTmp, idxRegPc);
     1445#endif
     1446    }
     1447
     1448    /* 3. Check that off is less than X86_PAGE_SIZE/cbInstrBufTotal.
     1449          Unlike iemNativeEmitBltInCheckPcAfterBranch we'll jump to the TLB loading if this fails. */
     1450    off = iemNativeEmitCmpGprWithImm(pReNative, off, idxRegTmp, X86_PAGE_SIZE - 1);
     1451    uint32_t const offFixedJumpToTlbLoad = off;
     1452    off = iemNativeEmitJaToFixed(pReNative, off, off /* (ASSUME ja rel8 suffices) */);
     1453
     1454    /* 4a. Add iem.s.GCPhysInstrBuf to off ... */
     1455#ifdef RT_ARCH_AMD64
     1456    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     1457    pbCodeBuf[off++] = idxRegTmp < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
     1458    pbCodeBuf[off++] = 0x03; /* add r64, r/m64 */
     1459    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, idxRegTmp, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
     1460    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1461
     1462#elif defined(RT_ARCH_ARM64)
     1463
     1464    off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, idxRegTmp2, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf));
     1465    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1466    pu32CodeBuf[off++] = Armv8A64MkInstrAddReg(idxRegTmp, idxRegTmp, idxRegTmp2);
     1467    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1468
     1469# ifdef VBOX_STRICT /* Assert(!(pVCpu->iem.s.GCPhysInstrBuf & X86_PAGE_OFFSET_MASK)); */
     1470    off = iemNativeEmitAndGpr32ByImm(pReNative, off, idxRegTmp2, X86_PAGE_OFFSET_MASK, true /*fSetFlags*/);
     1471    off = iemNativeEmitJzToFixed(pReNative, off, off + 1 /* correct for ARM64 */);
     1472    off = iemNativeEmitBrk(pReNative, off, 0x2005);
     1473# endif
     1474#else
     1475# error "Port me"
     1476#endif
     1477
     1478    /* 4b. ... and compare with GCPhysRangePageWithOffset.
     1479
     1480       Unlike iemNativeEmitBltInCheckPcAfterBranch we'll have to be more
     1481       careful and avoid implicit temporary register usage here.
     1482
     1483       Unlike the threaded version of this code, we do not obsolete TBs here to
     1484       reduce the code size and because indirect calls may legally end at the
     1485       same offset in two different pages depending on the program state. */
     1486    /** @todo synch the threaded BODY_LOAD_TLB_AFTER_BRANCH version with this. */
     1487    off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegTmp2, GCPhysRangePageWithOffset);
     1488    off = iemNativeEmitCmpGprWithGpr(pReNative, off, idxRegTmp, idxRegTmp2);
     1489    off = iemNativeEmitJnzToLabel(pReNative, off, idxLabelCheckBranchMiss);
     1490    uint32_t const offFixedJumpToEnd = off;
     1491    off = iemNativeEmitJmpToFixed(pReNative, off, off + 512 /* force rel32 */);
     1492
     1493    /*
     1494     * First we try to go via the TLB.
     1495     */
     1496    iemNativeFixupFixedJump(pReNative, offFixedJumpToTlbLoad, off);
     1497//off = iemNativeEmitBrk(pReNative, off, 0x1111);
     1498
     1499    /* Check that we haven't been here before. */
     1500    off = iemNativeEmitTestIfGprIsNotZeroAndJmpToLabel(pReNative, off, idxRegTmp2,  false /*f64Bit*/, idxLabelCheckBranchMiss);
     1501
     1502    /*
     1503     * TLB miss: Call iemNativeHlpMemCodeNewPageTlbMiss to do the work.
     1504     */
     1505    iemNativeLabelDefine(pReNative, idxLabelTlbMiss, off);
     1506
     1507    /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
     1508    off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     1509
     1510    /* Done setting up parameters, make the call. */
     1511    off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpMemCodeNewPageTlbMiss);
     1512
     1513    /* Jmp back to the start and redo the checks. */
     1514    off = iemNativeEmitLoadGpr8Imm(pReNative, off, idxRegTmp2, 1); /* indicate that we've looped once already */
     1515    off = iemNativeEmitJmpToFixed(pReNative, off, offLabelRedoChecks);
     1516
     1517    /* The end. */
     1518    iemNativeFixupFixedJump(pReNative, offFixedJumpToEnd, off);
     1519
     1520    iemNativeRegFreeTmp(pReNative, idxRegTmp2);
     1521    iemNativeRegFreeTmp(pReNative, idxRegTmp);
     1522    iemNativeRegFreeTmp(pReNative, idxRegPc);
     1523    if (idxRegCsBase != UINT8_MAX)
     1524        iemNativeRegFreeTmp(pReNative, idxRegCsBase);
    12171525    return off;
    12181526}
     
    13931701    BODY_FLUSH_PENDING_WRITES();
    13941702    BODY_CHECK_CS_LIM(cbInstr);
     1703    Assert(offRange == 0);
    13951704    BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
    13961705    BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
     
    14201729    BODY_SET_CUR_INSTR();
    14211730    BODY_FLUSH_PENDING_WRITES();
     1731    Assert(offRange == 0);
    14221732    BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
    14231733    BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
     
    14481758    BODY_FLUSH_PENDING_WRITES();
    14491759    BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
     1760    Assert(offRange == 0);
    14501761    BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
    14511762    BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r102695 r102699  
    36313631 * @param   enmGstReg       The guest register that will is to be updated.
    36323632 * @param   enmIntendedUse  How the caller will be using the host register.
     3633 * @param   fNoVolatileRegs Set if no volatile register allowed, clear if any
     3634 *                          register is okay (default).  The ASSUMPTION here is
     3635 *                          that the caller has already flushed all volatile
     3636 *                          registers, so this is only applied if we allocate a
     3637 *                          new register.
    36333638 * @sa      iemNativeRegAllocTmpForGuestRegIfAlreadyPresent
    36343639 */
    36353640DECL_HIDDEN_THROW(uint8_t)
    3636 iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    3637                                 IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse)
     3641iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, IEMNATIVEGSTREG enmGstReg,
     3642                                IEMNATIVEGSTREGUSE enmIntendedUse, bool fNoVolatileRegs /*=false*/)
    36383643{
    36393644    Assert(enmGstReg < kIemNativeGstReg_End && g_aGstShadowInfo[enmGstReg].cb != 0);
     
    36413646    static const char * const s_pszIntendedUse[] = { "fetch", "update", "full write", "destructive calc" };
    36423647#endif
     3648    uint32_t const fRegMask = !fNoVolatileRegs
     3649                            ? IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK
     3650                            : IEMNATIVE_HST_GREG_MASK & ~IEMNATIVE_REG_FIXED_MASK & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK;
    36433651
    36443652    /*
     
    36653673                    & (~IEMNATIVE_REG_FIXED_MASK & IEMNATIVE_HST_GREG_MASK)))
    36663674            {
    3667                 uint8_t const idxRegNew = iemNativeRegAllocTmp(pReNative, poff);
     3675                uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask);
    36683676
    36693677                *poff = iemNativeEmitLoadGprFromGpr(pReNative, *poff, idxRegNew, idxReg);
     
    37023710             */
    37033711            /** @todo share register for readonly access. */
    3704             uint8_t const idxRegNew = iemNativeRegAllocTmp(pReNative, poff, enmIntendedUse == kIemNativeGstRegUse_Calculation);
     3712            uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask,
     3713                                                             enmIntendedUse == kIemNativeGstRegUse_Calculation);
    37053714
    37063715            if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
     
    37213730            idxReg = idxRegNew;
    37223731        }
     3732        Assert(RT_BIT_32(idxReg) & fRegMask); /* See assumption in fNoVolatileRegs docs. */
    37233733
    37243734#ifdef VBOX_STRICT
     
    37333743     * Allocate a new register, load it with the guest value and designate it as a copy of the
    37343744     */
    3735     uint8_t const idxRegNew = iemNativeRegAllocTmp(pReNative, poff, enmIntendedUse == kIemNativeGstRegUse_Calculation);
     3745    uint8_t const idxRegNew = iemNativeRegAllocTmpEx(pReNative, poff, fRegMask, enmIntendedUse == kIemNativeGstRegUse_Calculation);
    37363746
    37373747    if (enmIntendedUse != kIemNativeGstRegUse_ForFullWrite)
     
    87648774#ifdef VBOX_STRICT
    87658775    off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, idxReg, X86_EFL_RA1_MASK);
    8766     off = iemNativeEmitJnzToFixed(pReNative, off, 1);
     8776    uint32_t offFixup = off;
     8777    off = iemNativeEmitJnzToFixed(pReNative, off, off);
    87678778    off = iemNativeEmitBrk(pReNative, off, UINT32_C(0x2001));
     8779    iemNativeFixupFixedJump(pReNative, offFixup, off);
    87688780
    87698781    off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, idxReg, X86_EFL_RAZ_MASK & CPUMX86EFLAGS_HW_MASK_32);
    8770     off = iemNativeEmitJzToFixed(pReNative, off, 1);
     8782    offFixup = off;
     8783    off = iemNativeEmitJzToFixed(pReNative, off, off);
    87718784    off = iemNativeEmitBrk(pReNative, off, UINT32_C(0x2002));
     8785    iemNativeFixupFixedJump(pReNative, offFixup, off);
    87728786#endif
    87738787
     
    1102311037    }
    1102411038    uint32_t const offJmpFixup = off;
    11025     off = iemNativeEmitJzToFixed(pReNative, off, 0);
     11039    off = iemNativeEmitJzToFixed(pReNative, off, off /* ASSUME jz rel8 suffices*/);
    1102611040
    1102711041    /*
     
    1121011224    static const char * const a_apszMarkers[] =
    1121111225    {
    11212         "unknown0", "CheckCsLim", "ConsiderLimChecking", "CheckOpcodes", "PcAfterBranch", "LoadTlbForNewPage"
     11226        /*[0]=*/ "unknown0",        "CheckCsLim",           "ConsiderLimChecking",  "CheckOpcodes",
     11227        /*[4]=*/ "PcAfterBranch",   "LoadTlbForNewPage",    "LoadTlbAfterBranch"
    1121311228    };
    1121411229#endif
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp

    r102663 r102699  
    341341            if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
    342342            { /* we're good */ } \
     343            /** @todo r=bird: Not sure if we need the TB obsolete complication here. \
     344             * If we're preceeded by an indirect jump, there is no reason why the TB \
     345             * would be 'obsolete' just because this time around the indirect jump ends \
     346             * up at the same offset in a different page.  This would be real bad for \
     347             * indirect trampolines/validators. */ \
    343348            else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
    344349            { \
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py

    r102695 r102699  
    19811981        ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim',      2, True  ),
    19821982
    1983         ( 'CheckCsLimAndOpcodesLoadingTlb',                     3, False ),
    1984         ( 'CheckOpcodesLoadingTlb',                             3, False ),
    1985         ( 'CheckOpcodesLoadingTlbConsiderCsLim',                3, False ),
     1983        ( 'CheckCsLimAndOpcodesLoadingTlb',                     3, True ),
     1984        ( 'CheckOpcodesLoadingTlb',                             3, True ),
     1985        ( 'CheckOpcodesLoadingTlbConsiderCsLim',                3, True ),
    19861986
    19871987        ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb',           2, True  ),
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r102684 r102699  
    814814                                                    bool fPreferVolatile = true);
    815815DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    816                                                             IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse);
     816                                                            IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse,
     817                                                            bool fNoVoltileRegs = false);
    817818DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    818819                                                                            IEMNATIVEGSTREG enmGstReg);
  • trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h

    r102662 r102699  
    29032903 * Emits a Jcc rel32 / B.cc imm19 with a fixed displacement.
    29042904 *
    2905  * The @a offTarget is applied x86-style, so zero means the next instruction.
    2906  * The unit is IEMNATIVEINSTR.
    2907  */
    2908 DECL_INLINE_THROW(uint32_t)
    2909 iemNativeEmitJccToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget, IEMNATIVEINSTRCOND enmCond)
    2910 {
    2911 #ifdef RT_ARCH_AMD64
    2912     /* jcc rel32 */
     2905 * @note The @a offTarget is the absolute jump target (unit is IEMNATIVEINSTR).
     2906 *
     2907 *       Only use hardcoded jumps forward when emitting for exactly one
     2908 *       platform, otherwise apply iemNativeFixupFixedJump() to ensure hitting
     2909 *       the right target address on all platforms!
     2910 *
     2911 *       Please also note that on x86 it is necessary pass off + 256 or higher
     2912 *       for @a offTarget one believe the intervening code is more than 127
     2913 *       bytes long.
     2914 */
     2915DECL_INLINE_THROW(uint32_t)
     2916iemNativeEmitJccToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget, IEMNATIVEINSTRCOND enmCond)
     2917{
     2918#ifdef RT_ARCH_AMD64
     2919    /* jcc rel8 / rel32 */
    29132920    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
    2914     if (offTarget < 128 && offTarget >= -128)
     2921    int32_t         offDisp   = (int32_t)(offTarget - (off + 2));
     2922    if (offDisp < 128 && offDisp >= -128)
    29152923    {
    29162924        pbCodeBuf[off++] = (uint8_t)enmCond | 0x70;
    2917         pbCodeBuf[off++] = RT_BYTE1((uint32_t)offTarget);
    2918     }
    2919     else
    2920     {
     2925        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
     2926    }
     2927    else
     2928    {
     2929        offDisp -= 4;
    29212930        pbCodeBuf[off++] = 0x0f;
    29222931        pbCodeBuf[off++] = (uint8_t)enmCond | 0x80;
    2923         pbCodeBuf[off++] = RT_BYTE1((uint32_t)offTarget);
    2924         pbCodeBuf[off++] = RT_BYTE2((uint32_t)offTarget);
    2925         pbCodeBuf[off++] = RT_BYTE3((uint32_t)offTarget);
    2926         pbCodeBuf[off++] = RT_BYTE4((uint32_t)offTarget);
     2932        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
     2933        pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
     2934        pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
     2935        pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
    29272936    }
    29282937
    29292938#elif defined(RT_ARCH_ARM64)
    29302939    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    2931     pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, offTarget + 1);
     2940    pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, (int32_t)(offTarget - off));
    29322941
    29332942#else
     
    29422951 * Emits a JZ/JE rel32 / B.EQ imm19 with a fixed displacement.
    29432952 *
    2944  * The @a offTarget is applied x86-style, so zero means the next instruction.
    2945  * The unit is IEMNATIVEINSTR.
    2946  */
    2947 DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     2953 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
     2954 */
     2955DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
    29482956{
    29492957#ifdef RT_ARCH_AMD64
     
    29602968 * Emits a JNZ/JNE rel32 / B.NE imm19 with a fixed displacement.
    29612969 *
    2962  * The @a offTarget is applied x86-style, so zero means the next instruction.
    2963  * The unit is IEMNATIVEINSTR.
    2964  */
    2965 DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     2970 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
     2971 */
     2972DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
    29662973{
    29672974#ifdef RT_ARCH_AMD64
     
    29782985 * Emits a JBE/JNA rel32 / B.LS imm19 with a fixed displacement.
    29792986 *
    2980  * The @a offTarget is applied x86-style, so zero means the next instruction.
    2981  * The unit is IEMNATIVEINSTR.
    2982  */
    2983 DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     2987 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
     2988 */
     2989DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
    29842990{
    29852991#ifdef RT_ARCH_AMD64
     
    29963002 * Emits a JA/JNBE rel32 / B.HI imm19 with a fixed displacement.
    29973003 *
    2998  * The @a offTarget is applied x86-style, so zero means the next instruction.
    2999  * The unit is IEMNATIVEINSTR.
    3000  */
    3001 DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     3004 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
     3005 */
     3006DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
    30023007{
    30033008#ifdef RT_ARCH_AMD64
     
    30123017
    30133018/**
     3019 * Emits a JMP rel32/rel8 / B imm26 with a fixed displacement.
     3020 *
     3021 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
     3022 */
     3023DECL_INLINE_THROW(uint32_t) iemNativeEmitJmpToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
     3024{
     3025#ifdef RT_ARCH_AMD64
     3026    /* jmp rel8 or rel32 */
     3027    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
     3028    int32_t         offDisp   = offTarget - (off + 2);
     3029    if (offDisp < 128 && offDisp >= -128)
     3030    {
     3031        pbCodeBuf[off++] = 0xeb;
     3032        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
     3033    }
     3034    else
     3035    {
     3036        offDisp -= 3;
     3037        pbCodeBuf[off++] = 0xe9;
     3038        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
     3039        pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
     3040        pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
     3041        pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
     3042    }
     3043
     3044#elif defined(RT_ARCH_ARM64)
     3045    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     3046    pu32CodeBuf[off++] = Armv8A64MkInstrB(enmCond, (int32_t)(offTarget - off));
     3047
     3048#else
     3049# error "Port me!"
     3050#endif
     3051    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     3052    return off;
     3053}
     3054
     3055
     3056/**
    30143057 * Fixes up a conditional jump to a fixed label.
    3015  * @see  iemNativeEmitJnzToFixed, iemNativeEmitJzToFixed, ...
    3016  */
    3017 DECLINLINE(void) iemNativeFixupFixedJump(PIEMRECOMPILERSTATE pReNative, uint32_t offFixup, uint32_t offTarget)
    3018 {
    3019 # if defined(RT_ARCH_AMD64)
     3058 * @see  iemNativeEmitJmpToFixed, iemNativeEmitJnzToFixed,
     3059 *       iemNativeEmitJzToFixed, ...
     3060 */
     3061DECL_INLINE_THROW(void) iemNativeFixupFixedJump(PIEMRECOMPILERSTATE pReNative, uint32_t offFixup, uint32_t offTarget)
     3062{
     3063#ifdef RT_ARCH_AMD64
    30203064    uint8_t * const pbCodeBuf = pReNative->pInstrBuf;
    3021     if (pbCodeBuf[offFixup] != 0x0f)
    3022     {
    3023         Assert((uint8_t)(pbCodeBuf[offFixup] - 0x70) <= 0x10);
     3065    uint8_t const   bOpcode   = pbCodeBuf[offFixup];
     3066    if ((uint8_t)(bOpcode - 0x70) < (uint8_t)0x10 || bOpcode == 0xeb)
     3067    {
    30243068        pbCodeBuf[offFixup + 1] = (uint8_t)(offTarget - (offFixup + 2));
    3025         Assert(pbCodeBuf[offFixup + 1] == offTarget - (offFixup + 2));
    3026     }
    3027     else
    3028     {
    3029         Assert((uint8_t)(pbCodeBuf[offFixup + 1] - 0x80) <= 0x10);
    3030         uint32_t const offRel32 = offTarget - (offFixup + 6);
    3031         pbCodeBuf[offFixup + 2] = RT_BYTE1(offRel32);
    3032         pbCodeBuf[offFixup + 3] = RT_BYTE2(offRel32);
    3033         pbCodeBuf[offFixup + 4] = RT_BYTE3(offRel32);
    3034         pbCodeBuf[offFixup + 5] = RT_BYTE4(offRel32);
    3035     }
    3036 
    3037 # elif defined(RT_ARCH_ARM64)
     3069        AssertStmt(pbCodeBuf[offFixup + 1] == offTarget - (offFixup + 2),
     3070                   IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_FIXED_JUMP_OUT_OF_RANGE));
     3071    }
     3072    else
     3073    {
     3074        if (bOpcode != 0x0f)
     3075            Assert(bOpcode == 0xe9);
     3076        else
     3077        {
     3078            offFixup += 1;
     3079            Assert((uint8_t)(pbCodeBuf[offFixup] - 0x80) <= 0x10);
     3080        }
     3081        uint32_t const offRel32 = offTarget - (offFixup + 5);
     3082        pbCodeBuf[offFixup + 1] = RT_BYTE1(offRel32);
     3083        pbCodeBuf[offFixup + 2] = RT_BYTE2(offRel32);
     3084        pbCodeBuf[offFixup + 3] = RT_BYTE3(offRel32);
     3085        pbCodeBuf[offFixup + 4] = RT_BYTE4(offRel32);
     3086    }
     3087
     3088#elif defined(RT_ARCH_ARM64)
    30383089    uint32_t * const pu32CodeBuf = pReNative->pInstrBuf;
    3039 
    3040     int32_t const offDisp = offTarget - offFixup;
    3041     Assert(offDisp >= -262144 && offDisp < 262144);
    3042     Assert((pu32CodeBuf[offFixup] & UINT32_C(0xff000000)) == UINT32_C(0x54000000)); /* B.COND + BC.COND */
    3043 
    3044     pu32CodeBuf[offFixup] = (pu32CodeBuf[offFixup] & UINT32_C(0xff00001f))
    3045                           | (((uint32_t)offDisp    & UINT32_C(0x0007ffff)) << 5);
    3046 
    3047 # endif
     3090    if ((pu32CodeBuf[offFixup] & UINT32_C(0xff000000)) == UINT32_C(0x54000000))
     3091    {
     3092        /* B.COND + BC.COND */
     3093        int32_t const offDisp = offTarget - offFixup;
     3094        Assert(offDisp >= -262144 && offDisp < 262144);
     3095        pu32CodeBuf[offFixup] = (pu32CodeBuf[offFixup] & UINT32_C(0xff00001f))
     3096                              | (((uint32_t)offDisp    & UINT32_C(0x0007ffff)) << 5);
     3097    }
     3098    else
     3099    {
     3100        /* B imm26 */
     3101        Assert((pu32CodeBuf[offFixup] & UINT32_C(0xfc000000)) == UINT32_C(0x14000000));
     3102        int32_t const offDisp = offTarget - offFixup;
     3103        Assert(offDisp >= -33554432 && offDisp < 33554432);
     3104        pu32CodeBuf[offFixup] = (pu32CodeBuf[offFixup] & UINT32_C(0xfc000000))
     3105                              | ((uint32_t)offDisp     & UINT32_C(0x03ffffff));
     3106    }
     3107
     3108#else
     3109# error "Port me!"
     3110#endif
    30483111}
    30493112
     
    33273390
    33283391/**
     3392 * Emits code that jumps to @a idxLabel if @a iGprSrc is not zero.
     3393 *
     3394 * The operand size is given by @a f64Bit.
     3395 */
     3396DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGprIsNotZeroAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     3397                                                                         uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel)
     3398{
     3399    Assert(idxLabel < pReNative->cLabels);
     3400
     3401#ifdef RT_ARCH_AMD64
     3402    /* test reg32,reg32  / test reg64,reg64 */
     3403    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     3404    if (f64Bit)
     3405        pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R | X86_OP_REX_B);
     3406    else if (iGprSrc >= 8)
     3407        pbCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
     3408    pbCodeBuf[off++] = 0x85;
     3409    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprSrc & 7, iGprSrc & 7);
     3410    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     3411
     3412    /* jnz idxLabel  */
     3413    off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
     3414
     3415#elif defined(RT_ARCH_ARM64)
     3416    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     3417    iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5);
     3418    pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(true /*fJmpIfNotZero*/, 0, iGprSrc, f64Bit);
     3419    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     3420
     3421#else
     3422# error "Port me!"
     3423#endif
     3424    return off;
     3425}
     3426
     3427
     3428/**
     3429 * Emits code that jumps to a new label if @a iGprSrc is not zero.
     3430 *
     3431 * The operand size is given by @a f64Bit.
     3432 */
     3433DECL_INLINE_THROW(uint32_t)
     3434iemNativeEmitTestIfGprIsNotZeroAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, bool f64Bit,
     3435                                               IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
     3436{
     3437    uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
     3438    return iemNativeEmitTestIfGprIsNotZeroAndJmpToLabel(pReNative, off, iGprSrc, f64Bit, idxLabel);
     3439}
     3440
     3441
     3442/**
    33293443 * Emits code that jumps to the given label if @a iGprLeft and @a iGprRight
    33303444 * differs.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette