VirtualBox

Changeset 105853 in vbox


Ignore:
Timestamp:
Aug 23, 2024 8:36:08 PM (8 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
164572
Message:

VMM/IEM: Don't force PC updating before branches, nor flushing of dirty guest shadowed registers either. Both needs more work before todo 4 in bugref:10720 can be marked as resolved. bugref:10373 bugref:10629

Location:
trunk/src/VBox/VMM
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py

    r105664 r105853  
    18731873        self.asParams = asParams;
    18741874        self.oUser    = None;
     1875
     1876    def __eq__(self, oOther):
     1877        if self.sName != oOther.sName:
     1878            return False;
     1879        if len(self.asParams) != len(oOther.asParams):
     1880            return False;
     1881        for iParam, sMyParam in enumerate(self.asParams):
     1882            if sMyParam != oOther.asParams[iParam]:
     1883                return False;
     1884        return True;
    18751885
    18761886    def renderCode(self, cchIndent = 0):
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py

    r105768 r105853  
    402402                aoStmts[iStmt] = oStmt;
    403403
     404                fHadEmptyElseBranch = len(oStmt.aoElseBranch) == 0;
     405
    404406                # Check the two branches for final references. Both branches must
    405407                # start processing with the same dVars set, fortunately as shallow
    406408                # copy suffices.
    407409                dFreedInIfBranch   = self.__analyzeVariableLiveness(oStmt.aoIfBranch, dict(dVars), iDepth + 1);
    408                 dFreedInElseBranch = self.__analyzeVariableLiveness(oStmt.aoElseBranch, dVars, iDepth + 1);
     410                dFreedInElseBranch = self.__analyzeVariableLiveness(oStmt.aoElseBranch, dVars,     iDepth + 1);
    409411
    410412                # Add free statements to the start of the IF-branch for variables use
     
    434436                        oStmt.aoIfBranch.insert(0, oFreeStmt);
    435437                        oStmt.aoElseBranch.insert(0, oFreeStmt);
     438
     439                #
     440                # HACK ALERT!
     441                #
     442                # This is a bit backwards, but if the else branch was empty, just zap
     443                # it so we don't create a bunch of unnecessary jumps as well as a
     444                # potential troublesome dirty guest shadowed register flushing for the
     445                # if-branch.  The IEM_MC_ENDIF code is forgiving here and will
     446                # automatically free the lost variables when merging the states.
     447                #
     448                # (In fact this behaviour caused trouble if we moved the IEM_MC_FREE_LOCAL
     449                # statements ouf of the branches and put them after the IF/ELSE blocks
     450                # to try avoid the unnecessary jump troubles, as the variable would be
     451                # assigned a host register and thus differ in an incompatible, cause the
     452                # endif code to just free the register and variable both, with the result
     453                # that the IEM_MC_FREE_LOCAL following the IF/ELSE blocks would assert
     454                # since the variable was already freed.)
     455                #
     456                # See iemNativeRecompFunc_cmovne_Gv_Ev__greg64_nn_64 and
     457                # the other cmovcc functions for examples.
     458                #
     459                if fHadEmptyElseBranch:
     460                    oStmt.aoElseBranch = [];
     461                #while (    oStmt.aoIfBranch
     462                #       and oStmt.aoElseBranch
     463                #       and oStmt.aoIfBranch[-1] == oStmt.aoElseBranch[-1]):
     464                #    aoStmts.insert(iStmt + 1, oStmt.aoIfBranch[-1]);
     465                #    del oStmt.aoIfBranch[-1];
     466                #    del oStmt.aoElseBranch[-1];
    436467
    437468            elif not oStmt.isCppStmt():
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h

    r105818 r105853  
    8989
    9090#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    91 # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
     91
     92# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     93/**
     94 * Updates IEMCPU::uPcUpdatingDebug.
     95 */
     96DECL_INLINE_THROW(uint32_t) iemNativeEmitPcDebugAdd(PIEMRECOMPILERSTATE pReNative, uint32_t off, int64_t offDisp, uint8_t cBits)
     97{
     98# ifdef RT_ARCH_AMD64
     99    if (pReNative->Core.fDebugPcInitialized && cBits >= 32)
     100    {
     101        PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
     102        if ((int32_t)offDisp == offDisp || cBits != 64)
     103        {
     104            /* add [q]word [pVCpu->iem.s.uPcUpdatingDebug], imm32/imm8 */
     105            if (cBits == 64)
     106                pCodeBuf[off++] = X86_OP_REX_W;
     107            pCodeBuf[off++] = (int8_t)offDisp == offDisp ? 0x83 : 0x81;
     108            off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     109            if ((int8_t)offDisp == offDisp)
     110                pCodeBuf[off++] = (int8_t)offDisp;
     111            else
     112            {
     113                *(int32_t *)&pCodeBuf[off] = (int32_t)offDisp;
     114                off += sizeof(int32_t);
     115            }
     116        }
     117        else
     118        {
     119            /* mov tmp0, imm64 */
     120            off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, offDisp);
     121
     122            /* add [pVCpu->iem.s.uPcUpdatingDebug], tmp0 */
     123            if (cBits == 64)
     124                pCodeBuf[off++] = X86_OP_REX_W | (IEMNATIVE_REG_FIXED_TMP0 >= 8 ? X86_OP_REX_R : 0);
     125            else if (IEMNATIVE_REG_FIXED_TMP0 >= 8)
     126                pCodeBuf[off++] = X86_OP_REX_R;
     127            pCodeBuf[off++] = 0x01;
     128            off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0 & 7,
     129                                             RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     130        }
     131        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     132        return off;
     133    }
     134# endif
     135
     136    uint8_t const         idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     137    PIEMNATIVEINSTR const pCodeBuf  = iemNativeInstrBufEnsure(pReNative, off, RT_ARCH_VAL == RT_ARCH_VAL_AMD64 ? 32 : 12);
     138
     139    if (pReNative->Core.fDebugPcInitialized)
     140    {
     141        Log4(("uPcUpdatingDebug+=%ld cBits=%d off=%#x\n", offDisp, cBits, off));
     142        off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, idxTmpReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     143    }
     144    else
     145    {
     146        Log4(("uPcUpdatingDebug=rip+%ld cBits=%d off=%#x\n", offDisp, cBits, off));
     147        pReNative->Core.fDebugPcInitialized = true;
     148        off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, idxTmpReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     149    }
     150
     151    if (cBits == 64)
     152        off = iemNativeEmitAddGprImmEx(pCodeBuf, off, idxTmpReg, offDisp, IEMNATIVE_REG_FIXED_TMP0);
     153    else
     154    {
     155        off = iemNativeEmitAddGpr32ImmEx(pCodeBuf, off, idxTmpReg, (int32_t)offDisp, IEMNATIVE_REG_FIXED_TMP0);
     156        if (cBits == 16)
     157            off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxTmpReg, UINT16_MAX);
     158    }
     159
     160    off = iemNativeEmitStoreGprToVCpuU64Ex(pCodeBuf, off, idxTmpReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug),
     161                                           IEMNATIVE_REG_FIXED_TMP0);
     162
     163    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     164    iemNativeRegFreeTmp(pReNative, idxTmpReg);
     165    return off;
     166}
     167
     168
     169# elif defined(IEMNATIVE_REG_FIXED_PC_DBG)
    92170DECL_INLINE_THROW(uint32_t) iemNativePcAdjustCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off)
    93171{
     
    99177}
    100178# endif
     179
    101180#endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING  */
    102181
     
    391470         * can disregard its state when we hit the IEM_MC_ENDIF.
    392471         */
    393         uint8_t idxCondDepth = pReNative->cCondDepth;
    394         if (idxCondDepth)
    395         {
    396             idxCondDepth--;
    397             if (pReNative->aCondStack[idxCondDepth].fInElse)
    398                 pReNative->aCondStack[idxCondDepth].fElseExitTb = true;
    399             else
    400                 pReNative->aCondStack[idxCondDepth].fIfExitTb   = true;
    401         }
     472        iemNativeMarkCurCondBranchAsExiting(pReNative);
    402473
    403474        /*
     
    510581
    511582    pReNative->Core.offPc += cbInstr;
    512 # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
     583    Log4(("offPc=%x cbInstr=%#x off=%#x\n", pReNative->Core.offPc, cbInstr, off));
     584# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     585    off = iemNativeEmitPcDebugAdd(pReNative, off, cbInstr, 64);
     586    off = iemNativeEmitPcDebugCheck(pReNative, off);
     587# elif defined(IEMNATIVE_REG_FIXED_PC_DBG)
    513588    off = iemNativePcAdjustCheck(pReNative, off);
    514589# endif
     590
    515591    if (pReNative->cCondDepth)
    516592        off = iemNativeEmitPcWriteback(pReNative, off);
    517593    else
    518594        pReNative->Core.cInstrPcUpdateSkipped++;
     595
    519596#endif
    520597
     
    535612DECL_INLINE_THROW(uint32_t)
    536613iemNativeEmitAddToEip32AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
     614{
     615#if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG)
     616# ifdef IEMNATIVE_REG_FIXED_PC_DBG
     617    if (!pReNative->Core.offPc)
     618        off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     619# endif
     620
     621    /* Allocate a temporary PC register. */
     622    uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
     623
     624    /* Perform the addition and store the result. */
     625    off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr);
     626    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     627
     628    /* Free but don't flush the PC register. */
     629    iemNativeRegFreeTmp(pReNative, idxPcReg);
     630#endif
     631
     632#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     633    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
     634
     635    pReNative->Core.offPc += cbInstr;
     636    Log4(("offPc=%x cbInstr=%#x off=%#x\n", pReNative->Core.offPc, cbInstr, off));
     637# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     638    off = iemNativeEmitPcDebugAdd(pReNative, off, cbInstr, 32);
     639    off = iemNativeEmitPcDebugCheck(pReNative, off);
     640# elif defined(IEMNATIVE_REG_FIXED_PC_DBG)
     641    off = iemNativePcAdjustCheck(pReNative, off);
     642# endif
     643
     644    if (pReNative->cCondDepth)
     645        off = iemNativeEmitPcWriteback(pReNative, off);
     646    else
     647        pReNative->Core.cInstrPcUpdateSkipped++;
     648#endif
     649
     650    return off;
     651}
     652
     653
     654#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \
     655    off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
     656    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0)
     657
     658#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \
     659    off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
     660    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
     661    off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0)
     662
     663/** Same as iemRegAddToIp16AndFinishingNoFlags. */
     664DECL_INLINE_THROW(uint32_t)
     665iemNativeEmitAddToIp16AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
    537666{
    538667#if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG)
     
    547676    /* Perform the addition and store the result. */
    548677    off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr);
     678    off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
    549679    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
    550680
     
    557687
    558688    pReNative->Core.offPc += cbInstr;
    559 # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
     689    Log4(("offPc=%x cbInstr=%#x off=%#x\n", pReNative->Core.offPc, cbInstr, off));
     690# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     691    off = iemNativeEmitPcDebugAdd(pReNative, off, cbInstr, 16);
     692    off = iemNativeEmitPcDebugCheck(pReNative, off);
     693# elif defined(IEMNATIVE_REG_FIXED_PC_DBG)
    560694    off = iemNativePcAdjustCheck(pReNative, off);
    561695# endif
    562     if (pReNative->cCondDepth)
    563         off = iemNativeEmitPcWriteback(pReNative, off);
    564     else
    565         pReNative->Core.cInstrPcUpdateSkipped++;
    566 #endif
    567 
    568     return off;
    569 }
    570 
    571 
    572 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr, a_rcNormal) \
    573     off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
    574     off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0)
    575 
    576 #define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbInstr, a_rcNormal) \
    577     off = iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr)); \
    578     off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off); \
    579     off = iemNativeEmitFinishInstructionWithStatus<a_rcNormal, false /*a_fIsJump*/>(pReNative, off, pCallEntry, 0)
    580 
    581 /** Same as iemRegAddToIp16AndFinishingNoFlags. */
    582 DECL_INLINE_THROW(uint32_t)
    583 iemNativeEmitAddToIp16AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
    584 {
    585 #if !defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) || defined(IEMNATIVE_REG_FIXED_PC_DBG)
    586 # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
    587     if (!pReNative->Core.offPc)
    588         off = iemNativeEmitLoadGprFromVCpuU64(pReNative, off, IEMNATIVE_REG_FIXED_PC_DBG, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
    589 # endif
    590 
    591     /* Allocate a temporary PC register. */
    592     uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
    593 
    594     /* Perform the addition and store the result. */
    595     off = iemNativeEmitAddGpr32Imm8(pReNative, off, idxPcReg, cbInstr);
    596     off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
    597     off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
    598 
    599     /* Free but don't flush the PC register. */
    600     iemNativeRegFreeTmp(pReNative, idxPcReg);
    601 #endif
    602 
    603 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    604     STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    605 
    606     pReNative->Core.offPc += cbInstr;
    607 # if defined(IEMNATIVE_REG_FIXED_PC_DBG)
    608     off = iemNativePcAdjustCheck(pReNative, off);
    609 # endif
     696
    610697    if (pReNative->cCondDepth)
    611698        off = iemNativeEmitPcWriteback(pReNative, off);
     
    701788
    702789    /* We speculatively modify PC and may raise #GP(0), so make sure the right values are in CPUMCTX. */
     790/** @todo relax this one, we won't raise \#GP when a_fWithinPage is true. */
    703791    off = iemNativeRegFlushPendingWrites(pReNative, off);
    704792
    705793#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    706794    Assert(pReNative->Core.offPc == 0);
    707 
    708795    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    709796#endif
     
    728815        off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
    729816    }
     817#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     818    off = iemNativeEmitPcDebugAdd(pReNative, off, (int64_t)offDisp + cbInstr, enmEffOpSize == IEMMODE_64BIT ? 64 : 16);
     819    off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
     820#endif
     821
    730822    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
    731823
     
    819911#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    820912    Assert(pReNative->Core.offPc == 0);
    821 
    822913    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    823914#endif
     
    836927    if (!a_fFlat)
    837928        off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
     929
     930#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     931    off = iemNativeEmitPcDebugAdd(pReNative, off, offDisp + cbInstr, enmEffOpSize == IEMMODE_32BIT ? 32 : 16);
     932    off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
     933#endif
    838934
    839935    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     
    883979#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    884980    Assert(pReNative->Core.offPc == 0);
    885 
    886981    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    887982#endif
     
    894989    off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
    895990    off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
     991#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     992    off = iemNativeEmitPcDebugAdd(pReNative, off, offDisp + cbInstr, 16);
     993    off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
     994#endif
    896995    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
    897996
     
    9911090#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    9921091    Assert(pReNative->Core.offPc == 0);
    993 
    9941092    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    9951093#endif
     
    10091107    /* Store the result. */
    10101108    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     1109
     1110#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1111    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     1112    pReNative->Core.fDebugPcInitialized = true;
     1113    Log4(("uPcUpdatingDebug=rip off=%#x\n", off));
     1114#endif
    10111115
    10121116    iemNativeVarRegisterRelease(pReNative, idxVarPc);
     
    14011505#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    14021506    Assert(pReNative->Core.offPc == 0);
    1403 
    14041507    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    14051508#endif
     
    14581561    /* Store the result. */
    14591562    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxNewPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     1563#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1564    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxNewPcReg, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     1565    pReNative->Core.fDebugPcInitialized = true;
     1566    Log4(("uPcUpdatingDebug=rip/indirect-call off=%#x\n", off));
     1567#endif
    14601568
    14611569#if 1
     
    15231631#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    15241632    Assert(pReNative->Core.offPc == 0);
    1525 
    15261633    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    15271634#endif
     
    15541661    /* Store the result. */
    15551662    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     1663#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1664    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     1665    pReNative->Core.fDebugPcInitialized = true;
     1666    Log4(("uPcUpdatingDebug=rip/rel-call-16 off=%#x offDisp=%d\n", off, offDisp));
     1667#endif
    15561668
    15571669    /* Need to transfer the shadow information to the new RIP register. */
     
    15891701#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    15901702    Assert(pReNative->Core.offPc == 0);
    1591 
    15921703    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    15931704#endif
     
    16171728    /* Store the result. */
    16181729    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     1730#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1731    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     1732    pReNative->Core.fDebugPcInitialized = true;
     1733    Log4(("uPcUpdatingDebug=eip/rel-call-32 off=%#x offDisp=%d\n", off, offDisp));
     1734#endif
    16191735
    16201736    /* Need to transfer the shadow information to the new RIP register. */
     
    16521768#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    16531769    Assert(pReNative->Core.offPc == 0);
    1654 
    16551770    STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativePcUpdateTotal);
    16561771#endif
     
    16781793    /* Store the result. */
    16791794    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     1795#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1796    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcRegNew, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     1797    pReNative->Core.fDebugPcInitialized = true;
     1798    Log4(("uPcUpdatingDebug=rip/rel-call-64 off=%#x offDisp=%ld\n", off, offDisp));
     1799#endif
    16801800
    16811801    /* Need to transfer the shadow information to the new RIP register. */
     
    17021822    off = iemNativeEmitAddGpr16ImmEx(pCodeBuf, off, idxRegRsp, cbPopAdd); /* ASSUMES this does NOT modify bits [63:16]! */
    17031823    RT_NOREF(idxRegTmp);
     1824
    17041825#elif defined(RT_ARCH_ARM64)
    17051826    /* ubfiz regeff, regrsp, #0, #16 - copies bits 15:0 from RSP to EffSp bits 15:0, zeroing bits 63:16. */
     
    17161837    /* bfi regrsp, regeff, #0, #16 - moves bits 15:0 from tmp to RSP bits 15:0, keeping the other RSP bits as is. */
    17171838    pCodeBuf[off++] = Armv8A64MkInstrBfi(idxRegRsp, idxRegTmp, 0, 16, false /*f64Bit*/);
     1839
    17181840#else
    17191841# error "Port me"
     
    19982120
    19992121    /* Commit the result and clear any current guest shadows for RIP. */
    2000     off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegRsp, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rsp));
     2122    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegRsp,       RT_UOFFSETOF(VMCPU, cpum.GstCtx.rsp));
    20012123    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegMemResult, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
    2002     iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxRegMemResult,  kIemNativeGstReg_Pc, off);
     2124    iemNativeRegClearAndMarkAsGstRegShadow(pReNative, idxRegMemResult, kIemNativeGstReg_Pc, off);
     2125#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     2126    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxRegMemResult, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     2127    pReNative->Core.fDebugPcInitialized = true;
     2128    Log4(("uPcUpdatingDebug=rip/ret off=%#x\n", off));
     2129#endif
    20032130
    20042131    /* Need to transfer the shadowing information to the host register containing the updated value now. */
     
    24642591 *          (too many nestings)
    24652592 */
    2466 DECL_INLINE_THROW(PIEMNATIVECOND) iemNativeCondPushIf(PIEMRECOMPILERSTATE pReNative, uint32_t *poff)
    2467 {
    2468 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    2469     *poff = iemNativeRegFlushPendingWrites(pReNative, *poff);
    2470 #endif
    2471 
     2593DECL_INLINE_THROW(PIEMNATIVECOND) iemNativeCondPushIf(PIEMRECOMPILERSTATE pReNative)
     2594{
    24722595    uint32_t const idxStack = pReNative->cCondDepth;
    24732596    AssertStmt(idxStack < RT_ELEMENTS(pReNative->aCondStack), IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_TOO_DEEPLY_NESTED));
     
    25082631#endif
    25092632
    2510 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    2511     Assert(pReNative->Core.offPc == 0);
    2512 #endif
    2513 
    25142633    /* Copy the initial state so we can restore it in the 'else' block. */
    25152634    pEntry->InitialState = pReNative->Core;
     
    25302649    Assert(!pEntry->fInElse);
    25312650
    2532 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    2533     /* Writeback any dirty shadow registers. */
    2534     /** @todo r=aeichner Possible optimization is to only writeback guest registers which became dirty
    2535      *                   in one of the branches and leave guest registers already dirty before the start of the if
    2536      *                   block alone. */
    2537     off = iemNativeRegFlushDirtyGuest(pReNative, off);
    2538 #endif
    2539 
    2540     /* Jump to the endif */
    2541     off = iemNativeEmitJmpToLabel(pReNative, off, pEntry->idxLabelEndIf);
     2651    /* We can skip dirty register flushing and the dirty register flushing if
     2652       the branch already jumped to a TB exit. */
     2653    if (!pEntry->fIfExitTb)
     2654    {
     2655#if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) && 0
     2656        /* Writeback any dirty shadow registers. */
     2657        /** @todo r=aeichner Possible optimization is to only writeback guest registers which became dirty
     2658         *                   in one of the branches and leave guest registers already dirty before the start of the if
     2659         *                   block alone. */
     2660        off = iemNativeRegFlushDirtyGuest(pReNative, off);
     2661#endif
     2662
     2663        /* Jump to the endif. */
     2664        off = iemNativeEmitJmpToLabel(pReNative, off, pEntry->idxLabelEndIf);
     2665    }
     2666# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     2667    else
     2668        Assert(pReNative->Core.offPc == 0);
     2669# endif
    25422670
    25432671    /* Define the else label and enter the else part of the condition. */
    25442672    iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off);
    25452673    pEntry->fInElse = true;
    2546 
    2547 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    2548     Assert(pReNative->Core.offPc == 0);
    2549 #endif
    25502674
    25512675    /* Snapshot the core state so we can do a merge at the endif and restore
     
    25682692    Assert(pReNative->cCondDepth > 0 && pReNative->cCondDepth <= RT_ELEMENTS(pReNative->aCondStack));
    25692693    PIEMNATIVECOND const pEntry = &pReNative->aCondStack[pReNative->cCondDepth - 1];
    2570 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
    2571     Assert(pReNative->Core.offPc == 0);
     2694
     2695#if defined(IEMNATIVE_WITH_DELAYED_PC_UPDATING) && 0
     2696    off = iemNativeRegFlushDirtyGuest(pReNative, off);
    25722697#endif
    25732698
     
    25762701     * other branch and skip all the merging headache.
    25772702     */
     2703    bool fDefinedLabels = false;
    25782704    if (pEntry->fElseExitTb || pEntry->fIfExitTb)
    25792705    {
     
    26002726    else
    26012727    {
    2602 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    2603         /* Writeback any dirty shadow registers (else branch). */
    2604         /** @todo r=aeichner Possible optimization is to only writeback guest registers which became dirty
    2605          *                   in one of the branches and leave guest registers already dirty before the start of the if
    2606          *                   block alone. */
    2607         off = iemNativeRegFlushDirtyGuest(pReNative, off);
    2608 #endif
    2609 
    26102728        /*
    26112729         * Now we have find common group with the core state at the end of the
     
    26172735         *        But we'd need more info about future for that to be worth the effort. */
    26182736        PCIEMNATIVECORESTATE const pOther = pEntry->fInElse ? &pEntry->IfFinalState : &pEntry->InitialState;
    2619 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    2620         Assert(   pOther->bmGstRegShadowDirty == 0
    2621                && pReNative->Core.bmGstRegShadowDirty == 0);
     2737#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     2738        AssertMsgStmt(pReNative->Core.offPc == pOther->offPc,
     2739                      ("Core.offPc=%#x pOther->offPc=%#x\n", pReNative->Core.offPc, pOther->offPc),
     2740                      IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_COND_ENDIF_RECONCILIATION_FAILED));
    26222741#endif
    26232742
    26242743        if (memcmp(&pReNative->Core, pOther, sizeof(*pOther)) != 0)
    26252744        {
    2626             /* shadow guest stuff first. */
     2745#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     2746            /*
     2747             * If the branch has differences in dirty shadow registers, we will flush
     2748             * the register only dirty in the current branch and dirty any that's only
     2749             * dirty in the other one.
     2750             */
     2751            uint64_t const fGstRegDirtyOther = pOther->bmGstRegShadowDirty;
     2752            uint64_t const fGstRegDirtyThis  = pReNative->Core.bmGstRegShadowDirty;
     2753            uint64_t const fGstRegDirtyDiff  = fGstRegDirtyOther ^ fGstRegDirtyThis;
     2754            uint64_t const fGstRegDirtyHead  = fGstRegDirtyThis  & fGstRegDirtyDiff;
     2755            uint64_t       fGstRegDirtyTail  = fGstRegDirtyOther & fGstRegDirtyDiff;
     2756            if (!fGstRegDirtyDiff)
     2757            { /* likely */ }
     2758            else
     2759            {
     2760                //uint64_t const fGstRegDirtyHead = pReNative->Core.bmGstRegShadowDirty & fGstRegDirtyDiff;
     2761                if (fGstRegDirtyHead)
     2762                {
     2763                    Log12(("iemNativeEmitEndIf: flushing dirty guest registers in current branch: %RX64\n", fGstRegDirtyHead));
     2764                    off = iemNativeRegFlushDirtyGuest(pReNative, off, fGstRegDirtyHead);
     2765                }
     2766            }
     2767#endif
     2768
     2769            /*
     2770             * Shadowed guest registers.
     2771             *
     2772             * We drop any shadows where the two states disagree about where
     2773             * things are kept.  We may end up flushing dirty more registers
     2774             * here, if the two branches keeps things in different registers.
     2775             */
    26272776            uint64_t fGstRegs = pReNative->Core.bmGstRegShadows;
    26282777            if (fGstRegs)
     
    26342783                    fGstRegs &= ~RT_BIT_64(idxGstReg);
    26352784
    2636                     uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
    2637                     if (  !(pOther->bmGstRegShadows & RT_BIT_64(idxGstReg))
    2638                         || idxHstReg != pOther->aidxGstRegShadows[idxGstReg])
     2785                    uint8_t const idxCurHstReg   = pReNative->Core.aidxGstRegShadows[idxGstReg];
     2786                    uint8_t const idxOtherHstReg = pOther->aidxGstRegShadows[idxGstReg];
     2787                    if (   idxCurHstReg != idxOtherHstReg
     2788                        || !(pOther->bmGstRegShadows & RT_BIT_64(idxGstReg)))
    26392789                    {
    2640                         Log12(("iemNativeEmitEndIf: dropping gst %s from hst %s\n",
    2641                                g_aGstShadowInfo[idxGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg]));
    2642 
    2643 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    2644                         /* Writeback any dirty shadow registers we are about to unshadow. */
    2645                         off = iemNativeRegFlushDirtyGuestByHostRegShadow(pReNative, off, idxHstReg);
    2646 #endif
    2647                         iemNativeRegClearGstRegShadowing(pReNative, idxHstReg, off);
     2790#ifndef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     2791                        Log12(("iemNativeEmitEndIf: dropping gst %s (%d) from hst %s (other %d/%#RX64)\n",
     2792                               g_aGstShadowInfo[idxGstReg].pszName, idxGstReg, g_apszIemNativeHstRegNames[idxCurHstReg],
     2793                               idxOtherHstReg, pOther->bmGstRegShadows));
     2794#else
     2795                        Log12(("iemNativeEmitEndIf: dropping %s gst %s (%d) from hst %s (other %d/%#RX64/%s)\n",
     2796                               pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg) ? "_dirty_" : "clean",
     2797                               g_aGstShadowInfo[idxGstReg].pszName, idxGstReg, g_apszIemNativeHstRegNames[idxCurHstReg],
     2798                               idxOtherHstReg, pOther->bmGstRegShadows,
     2799                               pOther->bmGstRegShadowDirty & RT_BIT_64(idxGstReg) ? "dirty" : "clean"));
     2800                        if (pOther->bmGstRegShadowDirty & RT_BIT_64(idxGstReg))
     2801                            fGstRegDirtyTail |= RT_BIT_64(idxGstReg);
     2802                        if (pReNative->Core.bmGstRegShadowDirty & RT_BIT_64(idxGstReg))
     2803                            off = iemNativeRegFlushPendingWrite(pReNative, off, (IEMNATIVEGSTREG)idxGstReg);
     2804#endif
     2805                        iemNativeRegClearGstRegShadowingOne(pReNative, idxCurHstReg, (IEMNATIVEGSTREG)idxGstReg, off);
    26482806                    }
    26492807                } while (fGstRegs);
    26502808            }
    26512809            else
     2810                Assert(pReNative->Core.bmHstRegsWithGstShadow == 0);
     2811
     2812#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     2813            /*
     2814             * Generate jumpy code for flushing dirty registers from the other
     2815             * branch that aren't dirty in the current one.
     2816             */
     2817            if (!fGstRegDirtyTail)
     2818            { /* likely */ }
     2819            else
    26522820            {
    2653                 Assert(pReNative->Core.bmHstRegsWithGstShadow == 0);
    2654 #ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    2655                 Assert(pReNative->Core.bmGstRegShadowDirty == 0);
    2656 #endif
     2821                STAM_REL_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeEndIfOtherBranchDirty);
     2822                Log12(("iemNativeEmitEndIf: Dirty register only in the other branch: %#RX64 - BAD!\n", fGstRegDirtyTail));
     2823
     2824                /* First the current branch has to jump over the dirty flushing from the other branch. */
     2825                uint32_t const offFixup1 = off;
     2826                off = iemNativeEmitJmpToFixed(pReNative, off, off + 10);
     2827
     2828                /* Put the endif and maybe else label here so the other branch ends up here. */
     2829                if (!pEntry->fInElse)
     2830                    iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off);
     2831                else
     2832                    Assert(pReNative->paLabels[pEntry->idxLabelElse].off <= off);
     2833                iemNativeLabelDefine(pReNative, pEntry->idxLabelEndIf, off);
     2834                fDefinedLabels = true;
     2835
     2836                /* Flush the dirty guest registers from the other branch. */
     2837                while (fGstRegDirtyTail)
     2838                {
     2839                    unsigned idxGstReg = ASMBitFirstSetU64(fGstRegDirtyTail) - 1;
     2840                    fGstRegDirtyTail &= ~RT_BIT_64(idxGstReg);
     2841                    Log12(("iemNativeEmitEndIf: tail flushing %s (%d) from other branch %d (cur %d/%#RX64)\n",
     2842                           g_aGstShadowInfo[idxGstReg].pszName, idxGstReg, pOther->aidxGstRegShadows[idxGstReg],
     2843                           pReNative->Core.aidxGstRegShadows[idxGstReg], pReNative->Core.bmGstRegShadows));
     2844
     2845                    off = iemNativeRegFlushPendingWriteEx(pReNative, off, (PIEMNATIVECORESTATE)pOther, (IEMNATIVEGSTREG)idxGstReg);
     2846
     2847                    /* Mismatching shadowing should've been dropped in the previous step already. */
     2848                    Assert(   !(pReNative->Core.bmGstRegShadows & RT_BIT_64(idxGstReg))
     2849                           || pReNative->Core.aidxGstRegShadows[idxGstReg] == pOther->aidxGstRegShadows[idxGstReg]);
     2850                }
     2851
     2852                /* Here is the actual endif label, fixup the above jump to land here. */
     2853                iemNativeFixupFixedJump(pReNative, offFixup1, off);
    26572854            }
    2658 
    2659             /* Check variables next. For now we must require them to be identical
    2660                or stuff we can recreate. */
     2855#endif
     2856
     2857            /*
     2858             * Check variables next. For now we must require them to be identical
     2859             * or stuff we can recreate. (No code is emitted here.)
     2860             */
    26612861            Assert(pReNative->Core.u64ArgVars == pOther->u64ArgVars);
    2662             uint32_t fVars = pReNative->Core.bmVars | pOther->bmVars;
     2862#ifdef VBOX_STRICT
     2863            uint32_t const offAssert = off;
     2864#endif
     2865            uint32_t       fVars     = pReNative->Core.bmVars | pOther->bmVars;
    26632866            if (fVars)
    26642867            {
     
    27022905                } while (fVars);
    27032906            }
    2704 
    2705             /* Finally, check that the host register allocations matches. */
    2706             AssertMsgStmt(pReNative->Core.bmHstRegs == pOther->bmHstRegs,
     2907            Assert(off == offAssert);
     2908
     2909            /*
     2910             * Finally, check that the host register allocations matches.
     2911             */
     2912            AssertMsgStmt((pReNative->Core.bmHstRegs & (pReNative->Core.bmHstRegs ^ pOther->bmHstRegs)) == 0,
    27072913                          ("Core.bmHstRegs=%#x pOther->bmHstRegs=%#x - %#x\n",
    27082914                           pReNative->Core.bmHstRegs, pOther->bmHstRegs, pReNative->Core.bmHstRegs ^ pOther->bmHstRegs),
     
    27142920     * Define the endif label and maybe the else one if we're still in the 'if' part.
    27152921     */
    2716     if (!pEntry->fInElse)
    2717         iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off);
    2718     else
    2719         Assert(pReNative->paLabels[pEntry->idxLabelElse].off <= off);
    2720     iemNativeLabelDefine(pReNative, pEntry->idxLabelEndIf, off);
     2922    if (!fDefinedLabels)
     2923    {
     2924        if (!pEntry->fInElse)
     2925            iemNativeLabelDefine(pReNative, pEntry->idxLabelElse, off);
     2926        else
     2927            Assert(pReNative->paLabels[pEntry->idxLabelElse].off <= off);
     2928        iemNativeLabelDefine(pReNative, pEntry->idxLabelEndIf, off);
     2929    }
    27212930
    27222931    /* Pop the conditional stack.*/
     
    27352944{
    27362945    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl);
    2737     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     2946    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    27382947
    27392948    /* Get the eflags. */
     
    27622971{
    27632972    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl);
    2764     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     2973    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    27652974
    27662975    /* Get the eflags. */
     
    27892998{
    27902999    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
    2791     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3000    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    27923001
    27933002    /* Get the eflags. */
     
    28193028{
    28203029    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
    2821     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3030    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    28223031
    28233032    /* Get the eflags. */
     
    28553064{
    28563065    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBit1InEfl | fBit2InEfl);
    2857     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3066    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    28583067
    28593068    /* Get the eflags. */
     
    29293138{
    29303139    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl | fBit1InEfl | fBit2InEfl);
    2931     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3140    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    29323141
    29333142    /* We need an if-block label for the non-inverted variant. */
     
    30153224DECL_INLINE_THROW(uint32_t) iemNativeEmitIfCxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off)
    30163225{
    3017     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3226    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    30183227
    30193228    uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
     
    30383247DECL_INLINE_THROW(uint32_t) iemNativeEmitIfRcxEcxIsNotZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit)
    30393248{
    3040     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3249    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    30413250
    30423251    uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
     
    30573266DECL_INLINE_THROW(uint32_t) iemNativeEmitIfCxIsNotOne(PIEMRECOMPILERSTATE pReNative, uint32_t off)
    30583267{
    3059     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3268    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    30603269
    30613270    uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
     
    30863295DECL_INLINE_THROW(uint32_t) iemNativeEmitIfRcxEcxIsNotOne(PIEMRECOMPILERSTATE pReNative, uint32_t off, bool f64Bit)
    30873296{
    3088     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3297    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    30893298
    30903299    uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_GPR(X86_GREG_xCX),
     
    31153324{
    31163325    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
    3117     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3326    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    31183327
    31193328    /* We have to load both RCX and EFLAGS before we can start branching,
     
    31773386{
    31783387    IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl);
    3179     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3388    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    31803389
    31813390    /* We have to load both RCX and EFLAGS before we can start branching,
     
    32193428iemNativeEmitIfLocalIsZ(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarLocal)
    32203429{
    3221     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3430    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    32223431
    32233432    IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVarLocal);
     
    32453454iemNativeEmitIfGregBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t iBitNo)
    32463455{
    3247     PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off);
     3456    PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative);
    32483457    Assert(iGReg < 16);
    32493458
     
    35273736        fGstShwFlush |= RT_BIT_64(kIemNativeGstReg_EFlags);
    35283737    iemNativeRegFlushGuestShadows(pReNative, fGstShwFlush);
     3738
     3739#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     3740    pReNative->Core.fDebugPcInitialized = false;
     3741    Log4(("fDebugPcInitialized=false cimpl off=%#x (v1)\n", off));
     3742#endif
    35293743
    35303744    return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr);
     
    67036917         */
    67046918        /* Allocate a temporary PC register. */
    6705         uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
     6919/** @todo r=bird: This would technically need to be done up front as it's a register allocation. */
     6920        uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
     6921                                                                 kIemNativeGstRegUse_ForUpdate);
    67066922
    67076923        /* Perform the addition and store the result. */
    67086924        off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc);
    67096925        off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     6926# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     6927        off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
     6928# endif
    67106929
    67116930        /* Free and flush the PC register. */
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r105739 r105853  
    88 *      - Level 2  (Log2) : Details calls as they're recompiled.
    99 *      - Level 3  (Log3) : Disassemble native code after recompiling.
    10  *      - Level 4  (Log4) : ...
     10 *      - Level 4  (Log4) : Delayed PC updating.
    1111 *      - Level 5  (Log5) : ...
    1212 *      - Level 6  (Log6) : ...
     
    20722072    pReNative->Core.offPc                  = 0;
    20732073    pReNative->Core.cInstrPcUpdateSkipped  = 0;
     2074# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     2075    pReNative->Core.fDebugPcInitialized    = false;
     2076# endif
    20742077#endif
    20752078#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
     
    29772980#endif /* unused */
    29782981
    2979 
    29802982#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     2983
    29812984/**
    29822985 * Stores the host reg @a idxHstReg into guest shadow register @a enmGstReg.
     
    30023005        case sizeof(uint16_t):
    30033006            return iemNativeEmitStoreGprToVCpuU16(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
    3004 #if 0 /* not present in the table. */
     3007# if 0 /* not present in the table. */
    30053008        case sizeof(uint8_t):
    30063009            return iemNativeEmitStoreGprToVCpuU8(pReNative, off, idxHstReg, g_aGstShadowInfo[enmGstReg].off);
    3007 #endif
     3010# endif
    30083011        default:
    30093012            AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IPE_NOT_REACHED_DEFAULT_CASE));
     
    30133016
    30143017/**
    3015  * Emits code to flush a pending write of the given guest register if any.
     3018 * Emits code to flush a pending write of the given guest register,
     3019 * version with alternative core state.
     3020 *
     3021 * @returns New code buffer offset.
     3022 * @param   pReNative       The native recompile state.
     3023 * @param   off             Current code buffer position.
     3024 * @param   pCore           Alternative core state.
     3025 * @param   enmGstReg       The guest register to flush.
     3026 */
     3027DECL_HIDDEN_THROW(uint32_t)
     3028iemNativeRegFlushPendingWriteEx(PIEMRECOMPILERSTATE pReNative, uint32_t off, PIEMNATIVECORESTATE pCore, IEMNATIVEGSTREG enmGstReg)
     3029{
     3030    uint8_t const idxHstReg = pCore->aidxGstRegShadows[enmGstReg];
     3031
     3032    Assert(   (   enmGstReg >= kIemNativeGstReg_GprFirst
     3033               && enmGstReg <= kIemNativeGstReg_GprLast)
     3034           || enmGstReg == kIemNativeGstReg_MxCsr);
     3035    Assert(   idxHstReg != UINT8_MAX
     3036           && pCore->bmGstRegShadowDirty & RT_BIT_64(enmGstReg));
     3037    Log12(("iemNativeRegFlushPendingWriteEx: Clearing guest register %s shadowed by host %s (off=%#x)\n",
     3038           g_aGstShadowInfo[enmGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg], off));
     3039
     3040    off = iemNativeEmitStoreGprWithGstShadowReg(pReNative, off, enmGstReg, idxHstReg);
     3041
     3042    pCore->bmGstRegShadowDirty &= ~RT_BIT_64(enmGstReg);
     3043    return off;
     3044}
     3045
     3046
     3047/**
     3048 * Emits code to flush a pending write of the given guest register.
    30163049 *
    30173050 * @returns New code buffer offset.
     
    30913124        iemNativeDbgInfoAddGuestRegWriteback(pReNative, false /*fSimdReg*/, pReNative->Core.bmGstRegShadowDirty & fGstRegShadows);
    30923125# endif
    3093         /** @todo r=bird: This is a crap way of enumerating a bitmask where we're
    3094          *        likely to only have a single bit set. It'll be in the 0..15 range,
    3095          *        but still it's 15 unnecessary loops for the last guest register.  */
    3096 
    30973126        uint64_t bmGstRegShadowDirty = pReNative->Core.bmGstRegShadowDirty & fGstRegShadows;
    30983127        do
     
    31073136    return off;
    31083137}
    3109 #endif
     3138
     3139#endif /* IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK */
    31103140
    31113141
     
    56675697}
    56685698
    5669 
    56705699#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     5700
     5701# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     5702
     5703/**
     5704 * Checks if the value in @a idxPcReg matches IEMCPU::uPcUpdatingDebug.
     5705 */
     5706DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheckWithReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxPcReg)
     5707{
     5708    Assert(idxPcReg != IEMNATIVE_REG_FIXED_TMP0);
     5709    Assert(pReNative->Core.fDebugPcInitialized);
     5710
     5711    /* cmp [pVCpu->iem.s.uPcUpdatingDebug], pcreg */
     5712#  ifdef RT_ARCH_AMD64
     5713    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 32);
     5714    pCodeBuf[off++] = X86_OP_REX_W | (idxPcReg >= 8 ? X86_OP_REX_R : 0);
     5715    pCodeBuf[off++] = 0x3b;
     5716    off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, idxPcReg & 7, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     5717#  else
     5718    PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     5719    off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, RT_UOFFSETOF(VMCPU, iem.s.uPcUpdatingDebug));
     5720    off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, IEMNATIVE_REG_FIXED_TMP0, idxPcReg);
     5721#  endif
     5722
     5723    uint32_t offFixup = off;
     5724    off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 1, kIemNativeInstrCond_e);
     5725    off = iemNativeEmitBrkEx(pCodeBuf, off, UINT32_C(0x2200));
     5726    iemNativeFixupFixedJump(pReNative, offFixup, off);
     5727
     5728    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     5729    return off;
     5730}
     5731
     5732
     5733/**
     5734 * Checks that the current RIP+offPc matches IEMCPU::uPcUpdatingDebug.
     5735 */
     5736DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off)
     5737{
     5738    if (pReNative->Core.fDebugPcInitialized)
     5739    {
     5740        uint8_t const idxPcReg  = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc);
     5741        if (pReNative->Core.offPc)
     5742        {
     5743            uint8_t const         idxTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     5744            PIEMNATIVEINSTR const pCodeBuf  = iemNativeInstrBufEnsure(pReNative, off, RT_ARCH_VAL == RT_ARCH_VAL_AMD64 ? 32 : 8);
     5745            off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, idxTmpReg, idxPcReg, pReNative->Core.offPc);
     5746            IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     5747            off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxTmpReg);
     5748            iemNativeRegFreeTmp(pReNative, idxTmpReg);
     5749        }
     5750        else
     5751            off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
     5752        iemNativeRegFreeTmp(pReNative, idxPcReg);
     5753    }
     5754    return off;
     5755}
     5756
     5757# endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG */
     5758
    56715759/**
    56725760 * Emits code to update the guest RIP value by adding the current offset since the start of the last RIP update.
     
    56755763{
    56765764    Assert(pReNative->Core.offPc);
     5765    Log4(("offPc=%#x -> 0; off=%#x\n", pReNative->Core.offPc, off));
    56775766# ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
    56785767    iemNativeDbgInfoAddNativeOffset(pReNative, off);
     
    56875776    off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, pReNative->Core.offPc);
    56885777    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     5778# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     5779    off = iemNativeEmitPcDebugCheckWithReg(pReNative, off, idxPcReg);
     5780# endif
    56895781
    56905782    /* Free but don't flush the PC register. */
     
    57025794    return off;
    57035795}
     5796
    57045797#endif /* IEMNATIVE_WITH_DELAYED_PC_UPDATING */
    57055798
     
    64316524#endif
    64326525
     6526#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     6527    pReNative->Core.fDebugPcInitialized = false;
     6528    Log4(("fDebugPcInitialized=false cimpl off=%#x (v2)\n", off));
     6529#endif
     6530
    64336531    /*
    64346532     * Check the status code.
     
    65166614#else
    65176615# error "port me"
     6616#endif
     6617
     6618#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     6619    pReNative->Core.fDebugPcInitialized = false;
     6620    Log4(("fDebugPcInitialized=false todo off=%#x (v2)\n", off));
    65186621#endif
    65196622
     
    87468849        ENTRY(iem.s.idxTbCurInstr),
    87478850        ENTRY(iem.s.fSkippingEFlags),
     8851#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     8852        ENTRY(iem.s.uPcUpdatingDebug),
     8853#endif
    87488854#ifdef VBOX_WITH_STATISTICS
    87498855        ENTRY(iem.s.StatNativeTlbHitsForFetch),
     
    1009010196             * Actual work.
    1009110197             */
    10092             Log2(("%u[%u]: %s%s\n", idxCurCall, pCallEntry->idxInstr, g_apszIemThreadedFunctions[pCallEntry->enmFunction],
    10093                   pfnRecom ? "(recompiled)" : "(todo)"));
     10198            Log2(("%u[%u]: %s%s (off=%#x)\n", idxCurCall, pCallEntry->idxInstr,
     10199                  g_apszIemThreadedFunctions[pCallEntry->enmFunction], pfnRecom ? "(recompiled)" : "(todo)", off));
    1009410200            if (pfnRecom) /** @todo stats on this.   */
    1009510201            {
  • trunk/src/VBox/VMM/VMMR3/IEMR3.cpp

    r105805 r105853  
    809809        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal,   STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates",   "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
    810810        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
    811 
     811#  endif /* VBOX_WITH_STATISTICS */
     812#  ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
     813        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEndIfOtherBranchDirty, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
     814                        "IEM_MC_ENDIF flushing dirty shadow registers for other branch (not good).",
     815                        "/IEM/CPU%u/re/NativeEndIfOtherBranchDirty", idCpu);
     816#  endif
     817#  ifdef VBOX_WITH_STATISTICS
    812818#   ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
    813819        STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r105805 r105853  
    112112#if defined(DOXYGEN_RUNNING) || 1
    113113# define IEMNATIVE_WITH_DELAYED_PC_UPDATING
     114#endif
     115/** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     116 * Enabled delayed PC updating debugging code.
     117 * This is an alternative to the ARM64-only IEMNATIVE_REG_FIXED_PC_DBG. */
     118#if defined(DOXYGEN_RUNNING) || 0
     119# define IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
    114120#endif
    115121
     
    22702276    /** Dummy entry for ppTbLookupEntryR3. */
    22712277    R3PTRTYPE(PIEMTB)       pTbLookupEntryDummyR3;
     2278#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     2279    /** The debug code advances this register as if it was CPUMCTX::rip and we
     2280     * didn't do delayed PC updating.  When CPUMCTX::rip is finally updated,
     2281     * the result is compared with this value. */
     2282    uint64_t                uPcUpdatingDebug;
     2283#else
     2284    uint64_t                u64Placeholder;
     2285#endif
    22722286    /** @} */
    22732287
     
    23942408    STAMCOUNTER             StatNativePcUpdateDelayed;
    23952409
     2410    /** Native recompiler: Number of time we had complicated dirty shadow
     2411     *  register situations with the other branch in IEM_MC_ENDIF. */
     2412    STAMCOUNTER             StatNativeEndIfOtherBranchDirty;
     2413
    23962414//#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
    23972415    /** Native recompiler: Number of calls to iemNativeSimdRegAllocFindFree. */
     
    25192537
    25202538#ifdef IEM_WITH_TLB_TRACE
     2539    uint64_t                au64Padding[4];
     2540#else
    25212541    uint64_t                au64Padding[6];
    2522 #else
    2523     //uint64_t                au64Padding[0];
    25242542#endif
    25252543
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r105818 r105853  
    12461246    uint32_t                    offPc;
    12471247    /** Number of instructions where we could skip the updating. */
    1248     uint32_t                    cInstrPcUpdateSkipped;
     1248    uint8_t                     cInstrPcUpdateSkipped;
     1249# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1250    /** Set after we've loaded PC into uPcUpdatingDebug at the first update. */
     1251    bool                        fDebugPcInitialized;
     1252    uint8_t                     abPadding[2];
     1253# else
     1254    uint8_t                     abPadding[3];
     1255# endif
    12491256#endif
    12501257    /** Allocation bitmap for aHstRegs. */
     
    13861393    /** Set if we're in the "else" part, clear if we're in the "if" before it. */
    13871394    bool                        fInElse;
    1388     /** Set if the if-block unconditionally exited the TB. */
    1389     bool                        fIfExitTb;
    1390     /** Set if the else-block unconditionally exited the TB. */
    1391     bool                        fElseExitTb;
     1395    union
     1396    {
     1397        struct
     1398        {
     1399            /** Set if the if-block unconditionally exited the TB. */
     1400            bool                fIfExitTb;
     1401            /** Set if the else-block unconditionally exited the TB. */
     1402            bool                fElseExitTb;
     1403        };
     1404        /** Indexed by fInElse. */
     1405        bool                    afExitTb[2];
     1406    };
    13921407    bool                        afPadding[5];
    13931408    /** The label for the IEM_MC_ELSE. */
     
    17121727                                                               uint64_t fGstSimdShwExcept);
    17131728#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
     1729# ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
     1730DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off);
     1731DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcDebugCheckWithReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxPcReg);
     1732# endif
    17141733DECL_HIDDEN_THROW(uint32_t) iemNativeEmitPcWritebackSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off);
    17151734#endif
    17161735#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    17171736DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWrite(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEGSTREG enmGstReg);
    1718 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t fFlushGstReg = UINT64_MAX);
    1719 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuestByHostRegShadow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxHstReg);
     1737DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWriteEx(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1738                                                            PIEMNATIVECORESTATE pCore, IEMNATIVEGSTREG enmGstReg);
     1739DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuest(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1740                                                        uint64_t fFlushGstReg = UINT64_MAX);
     1741DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushDirtyGuestByHostRegShadow(PIEMRECOMPILERSTATE pReNative,
     1742                                                                       uint32_t off, uint8_t idxHstReg);
    17201743#endif
    17211744
     
    23262349    Assert(pReNative->Core.bmHstRegsWithGstShadow             & RT_BIT_32(idxHstReg));
    23272350#ifdef IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    2328     Assert(!(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & pReNative->Core.bmGstRegShadowDirty));
     2351    Assert(!(pReNative->Core.bmGstRegShadowDirty              & RT_BIT_64(enmGstReg)));
    23292352#endif
    23302353
  • trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h

    r105673 r105853  
    43604360 */
    43614361DECL_FORCE_INLINE_THROW(uint32_t)
    4362 iemNativeEmitAddGpr32ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int32_t iAddend)
     4362iemNativeEmitAddGpr32ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int32_t iAddend, uint8_t iGprTmp = UINT8_MAX)
    43634363{
    43644364#if defined(RT_ARCH_AMD64)
     
    43754375    pCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
    43764376    pCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
     4377    RT_NOREF(iGprTmp);
    43774378
    43784379#elif defined(RT_ARCH_ARM64)
     
    43864387        if (uAbsAddend & 0xfffU)
    43874388            pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(fSub, iGprDst, iGprDst, uAbsAddend & 0xfff, false /*f64Bit*/);
     4389    }
     4390    else if (iGprTmp != UINT8_MAX)
     4391    {
     4392        off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprTmp, iAddend);
     4393        pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprDst, iGprTmp, false /*f64Bit*/);
    43884394    }
    43894395    else
     
    82668272
    82678273/**
     8274 * Helper for marking the current conditional branch as exiting the TB.
     8275 *
     8276 * This simplifies the state consolidation later when we reach the IEM_MC_ENDIF.
     8277 */
     8278DECL_FORCE_INLINE(void) iemNativeMarkCurCondBranchAsExiting(PIEMRECOMPILERSTATE pReNative)
     8279{
     8280    uint8_t idxCondDepth = pReNative->cCondDepth;
     8281    if (idxCondDepth)
     8282    {
     8283        idxCondDepth--;
     8284        pReNative->aCondStack[idxCondDepth].afExitTb[pReNative->aCondStack[idxCondDepth].fInElse] = true;
     8285    }
     8286}
     8287
     8288
     8289/**
    82688290 * Emits a Jcc rel32 / B.cc imm19 to the given label (ASSUMED requiring fixup).
    82698291 */
     
    82738295{
    82748296    Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason));
     8297
    82758298#if defined(IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE) && defined(RT_ARCH_AMD64)
    82768299    /* jcc rel32 */
     
    83808403{
    83818404    Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason));
     8405
     8406    iemNativeMarkCurCondBranchAsExiting(pReNative);
     8407
    83828408#ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
    83838409# ifdef RT_ARCH_AMD64
     
    84098435{
    84108436    Assert(IEMNATIVELABELTYPE_IS_EXIT_REASON(enmExitReason));
     8437
     8438    iemNativeMarkCurCondBranchAsExiting(pReNative);
     8439
    84118440#ifdef IEMNATIVE_WITH_RECOMPILER_PER_CHUNK_TAIL_CODE
    84128441# ifdef RT_ARCH_AMD64
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette