VirtualBox

Changeset 100183 in vbox for trunk


Ignore:
Timestamp:
Jun 15, 2023 9:04:04 PM (18 months ago)
Author:
vboxsync
Message:

VMM/IEM: More recompilation code. bugref:10369

Location:
trunk
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r100144 r100183  
    24992499 *  VERR, VERW).  This is not used outside the instruction implementations. */
    25002500#define VINF_IEM_SELECTOR_NOT_OK                    (5305)
     2501/** Recompiler: Translation block allocation failed. */
     2502#define VERR_IEM_TB_ALLOC_FAILED                    (-5309)
     2503/** Recompiled execution: Stop execution TB - Mode (fExec) changed. */
     2504#define VINF_IEM_REEXEC_MODE_CHANGED                (5310)
    25012505/** Restart the current instruction. For testing only. */
    25022506#define VERR_IEM_RESTART_INSTRUCTION                (-5389)
  • trunk/src/VBox/VMM/VMMAll/IEMAllThreadedRecompiler.cpp

    r100151 r100183  
    6565#include <iprt/asm-math.h>
    6666#include <iprt/assert.h>
     67#include <iprt/mem.h>
    6768#include <iprt/string.h>
    6869#include <iprt/x86.h>
     
    139140     * @{ */
    140141    RTGCPHYS            GCPhysPc;
    141     uint64_t            uPc;
     142    /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
    142143    uint32_t            fFlags;
    143144    union
     
    145146        struct
    146147        {
     148            /** @todo we actually need BASE, LIM and CS?  If we don't tie a TB to a RIP
     149             * range, because that's bad for PIC/PIE code on unix with address space
     150             * randomization enabled, the assumption is that anything involving PC
     151             * (RIP/EIP/IP, maybe + CS.BASE) will be done by reading current register
     152             * values and not embedding presumed values into the code. Thus the uCsBase
     153             * member here shouldn't be needed.  For the same reason, uCsLimit isn't helpful
     154             * either as RIP/EIP/IP may differ between address spaces.  So, before TB
     155             * execution we'd need to check CS.LIM against RIP+cbPC (ditto for 64-bit
     156             * canonicallity).
     157             *
     158             * We could bake instruction limit / canonicallity checks into the generated
     159             * code if we find ourselves close to the limit and should expect to run into
     160             * it by the end of the translation block. That would just be using a very
     161             * simple threshold distance and be a special IEMTB_F_XXX flag so we figure out
     162             * it out when picking the TB.
     163             *
     164             * The CS value is likewise useless as we'll always be using the actual CS
     165             * register value whenever it is relevant (mainly pushing to the stack in a
     166             * call, trap, whatever).
     167             *
     168             * The segment attributes should be handled via the IEM_F_MODE_XXX and
     169             * IEM_F_X86_CPL_MASK portions of fFlags, so we could skip those too, I think.
     170             * All the places where they matter, we would be in CIMPL code which would
     171             * consult the actual CS.ATTR and not depend on the recompiled code block.
     172             */
    147173            /** The CS base. */
    148174            uint32_t uCsBase;
     
    151177            /** The CS selector value. */
    152178            uint16_t CS;
    153             /**< Relevant X86DESCATTR_XXX bits. */
     179            /**< Relevant CS X86DESCATTR_XXX bits. */
    154180            uint16_t fAttr;
    155181        } x86;
     
    167193        {
    168194            /** Number of calls in paCalls. */
    169             uint32_t            cCalls;
     195            uint16_t            cCalls;
    170196            /** Number of calls allocated. */
    171             uint32_t            cAllocated;
     197            uint16_t            cAllocated;
    172198            /** The call sequence table. */
    173199            PIEMTHRDEDCALLENTRY paCalls;
    174200        } Thrd;
    175201    };
    176 
    177 
    178202} IEMTB;
     203
     204
     205/*********************************************************************************************************************************
     206*   Internal Functions                                                                                                           *
     207*********************************************************************************************************************************/
     208static VBOXSTRICTRC iemThreadedTbExec(PVMCPUCC pVCpu, PIEMTB pTb);
    179209
    180210
     
    273303#include "IEMThreadedInstructions.cpp.h"
    274304
     305/*
     306 * Translation block management.
     307 */
     308
     309/**
     310 * Allocate a translation block for threadeded recompilation.
     311 *
     312 * @returns Pointer to the translation block on success, NULL on failure.
     313 * @param   pVM         The cross context virtual machine structure.
     314 * @param   pVCpu       The cross context virtual CPU structure of the calling
     315 *                      thread.
     316 * @param   GCPhysPc    The physical address corresponding to RIP + CS.BASE.
     317 * @param   fExtraFlags Extra flags (IEMTB_F_XXX).
     318 */
     319static PIEMTB iemThreadedTbAlloc(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPc, uint32_t fExtraFlags)
     320{
     321    /*
     322     * Just using the heap for now.  Will make this more efficient and
     323     * complicated later, don't worry. :-)
     324     */
     325    PIEMTB pTb = (PIEMTB)RTMemAlloc(sizeof(IEMTB));
     326    if (pTb)
     327    {
     328        pTb->Thrd.paCalls = (PIEMTHRDEDCALLENTRY)RTMemAlloc(sizeof(IEMTHRDEDCALLENTRY) * 128);
     329        if (pTb->Thrd.paCalls)
     330        {
     331            pTb->Thrd.cAllocated = 128;
     332            pTb->Thrd.cCalls     = 0;
     333            pTb->pNext           = NULL;
     334            RTListInit(&pTb->LocalList);
     335            pTb->cbPC            = 0;
     336            pTb->GCPhysPc        = GCPhysPc;
     337            pTb->x86.uCsBase     = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
     338            pTb->x86.uCsLimit    = (uint32_t)pVCpu->cpum.GstCtx.cs.u32Limit;
     339            pTb->x86.CS          = (uint32_t)pVCpu->cpum.GstCtx.cs.Sel;
     340            pTb->x86.fAttr       = (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u;
     341            pTb->fFlags          = (pVCpu->iem.s.fExec & IEMTB_F_IEM_F_MASK) | fExtraFlags;
     342            pVCpu->iem.s.cTbAllocs++;
     343            return pTb;
     344        }
     345        RTMemFree(pTb);
     346    }
     347    RT_NOREF(pVM);
     348    return NULL;
     349}
     350
     351
     352/**
     353 * Frees pTb.
     354 *
     355 * @param   pVM     The cross context virtual machine structure.
     356 * @param   pVCpu   The cross context virtual CPU structure of the calling
     357 *                  thread.
     358 * @param   pTb     The translation block to free..
     359 */
     360static void iemThreadedTbFree(PVMCC pVM, PVMCPUCC pVCpu, PIEMTB pTb)
     361{
     362    RT_NOREF(pVM);
     363    AssertPtr(pTb);
     364
     365    AssertCompile((IEMTB_F_STATE_OBSOLETE >> IEMTB_F_STATE_SHIFT) == (IEMTB_F_STATE_MASK >> IEMTB_F_STATE_SHIFT));
     366    pTb->fFlags |= IEMTB_F_STATE_OBSOLETE; /* works, both bits set */
     367
     368    RTMemFree(pTb->Thrd.paCalls);
     369    pTb->Thrd.paCalls = NULL;
     370
     371    RTMemFree(pTb);
     372    pVCpu->iem.s.cTbFrees++;
     373}
     374
     375
     376static PIEMTB iemThreadedTbLookup(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPc, uint64_t uPc, uint32_t fExtraFlags)
     377{
     378    RT_NOREF(pVM, pVCpu, GCPhysPc, uPc, fExtraFlags);
     379    return NULL;
     380}
    275381
    276382
     
    279385 */
    280386
    281 static VBOXSTRICTRC iemThreadedCompile(PVMCC pVM, PVMCPUCC pVCpu)
    282 {
    283     RT_NOREF(pVM, pVCpu);
    284     return VERR_NOT_IMPLEMENTED;
    285 }
    286 
    287 
    288387static VBOXSTRICTRC iemThreadedCompileLongJumped(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    289388{
     
    293392
    294393
    295 static PIEMTB iemThreadedTbLookup(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPC, uint64_t uPc)
    296 {
    297     RT_NOREF(pVM, pVCpu, GCPhysPC, uPc);
    298     return NULL;
     394/**
     395 * Initializes the decoder state when compiling TBs.
     396 *
     397 * This presumes that fExec has already be initialized.
     398 *
     399 * This is very similar to iemInitDecoder() and iemReInitDecoder(), so may need
     400 * to apply fixes to them as well.
     401 *
     402 * @param   pVCpu   The cross context virtual CPU structure of the calling
     403 *                  thread.
     404 * @param   fReInit Clear for the first call for a TB, set for subsequent calls
     405 *                  from inside the compile loop where we can skip a couple of
     406 *                  things.
     407 */
     408DECL_FORCE_INLINE(void) iemThreadedCompileInitDecoder(PVMCPUCC pVCpu, bool const fReInit)
     409{
     410    /* ASSUMES: That iemInitExec was already called and that anyone changing
     411       CPU state affecting the fExec bits since then will have updated fExec!  */
     412    AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
     413              ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
     414
     415    IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
     416
     417    /* Decoder state: */
     418    pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
     419    pVCpu->iem.s.enmEffAddrMode     = enmMode;
     420    if (enmMode != IEMMODE_64BIT)
     421    {
     422        pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
     423        pVCpu->iem.s.enmEffOpSize   = enmMode;
     424    }
     425    else
     426    {
     427        pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
     428        pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
     429    }
     430    pVCpu->iem.s.fPrefixes          = 0;
     431    pVCpu->iem.s.uRexReg            = 0;
     432    pVCpu->iem.s.uRexB              = 0;
     433    pVCpu->iem.s.uRexIndex          = 0;
     434    pVCpu->iem.s.idxPrefix          = 0;
     435    pVCpu->iem.s.uVex3rdReg         = 0;
     436    pVCpu->iem.s.uVexLength         = 0;
     437    pVCpu->iem.s.fEvexStuff         = 0;
     438    pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
     439    pVCpu->iem.s.offModRm           = 0;
     440    pVCpu->iem.s.iNextMapping       = 0;
     441
     442    if (!fReInit)
     443    {
     444        pVCpu->iem.s.cActiveMappings        = 0;
     445        pVCpu->iem.s.rcPassUp               = VINF_SUCCESS;
     446        pVCpu->iem.s.fEndTb                 = false;
     447    }
     448    else
     449    {
     450        Assert(pVCpu->iem.s.cActiveMappings == 0);
     451        Assert(pVCpu->iem.s.rcPassUp        == VINF_SUCCESS);
     452        Assert(pVCpu->iem.s.fEndTb          == false);
     453    }
     454
     455#ifdef DBGFTRACE_ENABLED
     456    switch (IEM_GET_CPU_MODE(pVCpu))
     457    {
     458        case IEMMODE_64BIT:
     459            RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
     460            break;
     461        case IEMMODE_32BIT:
     462            RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
     463            break;
     464        case IEMMODE_16BIT:
     465            RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
     466            break;
     467    }
     468#endif
     469}
     470
     471
     472/**
     473 * Initializes the opcode fetcher when starting the compilation.
     474 *
     475 * @param   pVCpu   The cross context virtual CPU structure of the calling
     476 *                  thread.
     477 */
     478DECL_FORCE_INLINE(void) iemThreadedCompileInitOpcodeFetching(PVMCPUCC pVCpu)
     479{
     480// // // // // // // // // // figure this out // // // //
     481    pVCpu->iem.s.pbInstrBuf         = NULL;
     482    pVCpu->iem.s.offInstrNextByte   = 0;
     483    pVCpu->iem.s.offCurInstrStart   = 0;
     484#ifdef VBOX_STRICT
     485    pVCpu->iem.s.GCPhysInstrBuf     = NIL_RTGCPHYS;
     486    pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
     487    pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
     488    pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
     489#endif
     490// // // // // // // // // // // // // // // // // // //
     491}
     492
     493
     494/**
     495 * Re-initializes the opcode fetcher between instructions while compiling.
     496 *
     497 * @param   pVCpu   The cross context virtual CPU structure of the calling
     498 *                  thread.
     499 */
     500DECL_FORCE_INLINE(void) iemThreadedCompileReInitOpcodeFetching(PVMCPUCC pVCpu)
     501{
     502    if (pVCpu->iem.s.pbInstrBuf)
     503    {
     504        uint64_t off = pVCpu->cpum.GstCtx.rip;
     505        Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu));
     506        off += pVCpu->cpum.GstCtx.cs.u64Base;
     507        off -= pVCpu->iem.s.uInstrBufPc;
     508        if (off < pVCpu->iem.s.cbInstrBufTotal)
     509        {
     510            pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
     511            pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
     512            if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
     513                pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
     514            else
     515                pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
     516        }
     517        else
     518        {
     519            pVCpu->iem.s.pbInstrBuf       = NULL;
     520            pVCpu->iem.s.offInstrNextByte = 0;
     521            pVCpu->iem.s.offCurInstrStart = 0;
     522            pVCpu->iem.s.cbInstrBuf       = 0;
     523            pVCpu->iem.s.cbInstrBufTotal  = 0;
     524            pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
     525        }
     526    }
     527    else
     528    {
     529        pVCpu->iem.s.offInstrNextByte = 0;
     530        pVCpu->iem.s.offCurInstrStart = 0;
     531        pVCpu->iem.s.cbInstrBuf       = 0;
     532        pVCpu->iem.s.cbInstrBufTotal  = 0;
     533#ifdef VBOX_STRICT
     534        pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
     535#endif
     536    }
     537}
     538
     539
     540/**
     541 * Compiles a new TB and executes it.
     542 *
     543 * We combine compilation and execution here as it makes it simpler code flow
     544 * in the main loop and it allows interpreting while compiling if we want to
     545 * explore that option.
     546 *
     547 * @returns Strict VBox status code.
     548 * @param   pVM         The cross context virtual machine structure.
     549 * @param   pVCpu       The cross context virtual CPU structure of the calling
     550 *                      thread.
     551 * @param   fExtraFlags Extra translation block flags: IEMTB_F_TYPE_THREADED and
     552 *                      maybe IEMTB_F_RIP_CHECKS.
     553 */
     554static VBOXSTRICTRC iemThreadedCompile(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPc, uint32_t fExtraFlags)
     555{
     556    /*
     557     * Allocate a new translation block.
     558     */
     559    if (!(fExtraFlags & IEMTB_F_RIP_CHECKS))
     560    { /* likely */  }
     561    else if (  !IEM_IS_64BIT_CODE(pVCpu)
     562             ? pVCpu->cpum.GstCtx.eip <= pVCpu->cpum.GstCtx.cs.u32Limit
     563             : IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.rip))
     564    { /* likely */ }
     565    else
     566        return IEMExecOne(pVCpu);
     567    fExtraFlags |= IEMTB_F_STATE_COMPILING;
     568
     569    PIEMTB pTb = iemThreadedTbAlloc(pVM, pVCpu, GCPhysPc, fExtraFlags);
     570    AssertReturn(pTb, VERR_IEM_TB_ALLOC_FAILED);
     571
     572    /* Set the current TB so iemThreadedCompileLongJumped and the CIMPL
     573       functions may get at it. */
     574    pVCpu->iem.s.pCurTbR3 = pTb;
     575
     576    /*
     577     * Now for the recomplication. (This mimicks IEMExecLots in many ways.)
     578     */
     579    iemThreadedCompileInitDecoder(pVCpu, false /*fReInit*/);
     580    iemThreadedCompileInitOpcodeFetching(pVCpu);
     581    VBOXSTRICTRC rcStrict;
     582    for (;;)
     583    {
     584        /* Process the next instruction. */
     585        uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
     586        uint16_t const cCallsPrev = pTb->Thrd.cCalls;
     587        rcStrict = FNIEMOP_CALL(g_apfnIemThreadedRecompilerOneByteMap[b]);
     588        if (   rcStrict == VINF_SUCCESS
     589            && !pVCpu->iem.s.fEndTb)
     590        {
     591            Assert(pTb->Thrd.cCalls > cCallsPrev);
     592            Assert(cCallsPrev - pTb->Thrd.cCalls < 5);
     593
     594        }
     595        else if (pTb->Thrd.cCalls > 0)
     596        {
     597            break;
     598        }
     599        else
     600        {
     601            pVCpu->iem.s.pCurTbR3 = NULL;
     602            iemThreadedTbFree(pVM, pVCpu, pTb);
     603            return rcStrict;
     604        }
     605
     606        /* Still space in the TB? */
     607        if (pTb->Thrd.cCalls + 5 < pTb->Thrd.cAllocated)
     608            iemThreadedCompileInitDecoder(pVCpu, true /*fReInit*/);
     609        else
     610            break;
     611        iemThreadedCompileReInitOpcodeFetching(pVCpu);
     612    }
     613
     614    /*
     615     * Complete the TB and link it.
     616     */
     617
     618#ifdef IEM_COMPILE_ONLY_MODE
     619    /*
     620     * Execute the translation block.
     621     */
     622#endif
     623
     624    return rcStrict;
    299625}
    300626
     
    360686
    361687/** @todo need private inline decl for throw/nothrow matching IEM_WITH_SETJMP? */
    362 DECL_INLINE_THROW(uint64_t) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu, PRTGCPHYS pPhys)
    363 {
     688DECL_FORCE_INLINE_THROW(uint64_t) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu, PRTGCPHYS pPhys)
     689{
     690    /* Set uCurTbStartPc to RIP and calc the effective PC. */
     691    uint64_t uPc = pVCpu->cpum.GstCtx.rip;
     692    pVCpu->iem.s.uCurTbStartPc = uPc;
    364693    Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu));
    365     uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
     694    uPc += pVCpu->cpum.GstCtx.cs.u64Base;
     695
    366696    if (pVCpu->iem.s.pbInstrBuf)
    367697    {
     
    381711    }
    382712    return iemGetPcWithPhysAndCodeMissed(pVCpu, uPc, pPhys);
     713}
     714
     715
     716/**
     717 * Determines the extra IEMTB_F_XXX flags.
     718 *
     719 * @returns IEMTB_F_TYPE_THREADED and maybe IEMTB_F_RIP_CHECKS.
     720 * @param   pVCpu   The cross context virtual CPU structure of the calling
     721 *                  thread.
     722 */
     723DECL_FORCE_INLINE(uint32_t) iemGetTbFlagsForCurrentPc(PVMCPUCC pVCpu)
     724{
     725    /*
     726     * Return IEMTB_F_RIP_CHECKS if the current PC is invalid or if it is
     727     * likely to go invalid before the end of the translation block.
     728     */
     729    if (IEM_IS_64BIT_CODE(pVCpu))
     730    {
     731        if (RT_LIKELY(   IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.rip)
     732                      && IEM_IS_CANONICAL(pVCpu->cpum.GstCtx.rip + 256)))
     733            return IEMTB_F_TYPE_THREADED;
     734    }
     735    else
     736    {
     737        if (RT_LIKELY(   pVCpu->cpum.GstCtx.eip < pVCpu->cpum.GstCtx.cs.u32Limit
     738                      && (uint64_t)256 + pVCpu->cpum.GstCtx.eip < pVCpu->cpum.GstCtx.cs.u32Limit))
     739            return IEMTB_F_TYPE_THREADED;
     740    }
     741    return IEMTB_F_RIP_CHECKS | IEMTB_F_TYPE_THREADED;
    383742}
    384743
     
    409768                /* Translate PC to physical address, we'll need this for both lookup and compilation. */
    410769                RTGCPHYS       GCPhysPc;
    411                 uint64_t const uPc = iemGetPcWithPhysAndCode(pVCpu, &GCPhysPc);
    412 
    413                 pTb = iemThreadedTbLookup(pVM, pVCpu, GCPhysPc, uPc);
     770                uint64_t const uPc         = iemGetPcWithPhysAndCode(pVCpu, &GCPhysPc);
     771                uint32_t const fExtraFlags = iemGetTbFlagsForCurrentPc(pVCpu);
     772
     773                pTb = iemThreadedTbLookup(pVM, pVCpu, GCPhysPc, uPc, fExtraFlags);
    414774                if (pTb)
    415775                    rcStrict = iemThreadedTbExec(pVCpu, pTb);
    416776                else
    417                     rcStrict = iemThreadedCompile(pVM, pVCpu /*, GCPhysPc, uPc*/);
     777                    rcStrict = iemThreadedCompile(pVM, pVCpu, GCPhysPc, fExtraFlags);
    418778                if (rcStrict == VINF_SUCCESS)
    419779                { /* likely */ }
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r100072 r100183  
    661661/** State mask.  */
    662662#define IEMTB_F_STATE_MASK              UINT32_C(0x0c000000)
     663/** State shift count.  */
     664#define IEMTB_F_STATE_SHIFT             26
    663665/** State: Compiling. */
    664666#define IEMTB_F_STATE_COMPILING         UINT32_C(0x04000000)
     
    668670#define IEMTB_F_STATE_OBSOLETE          UINT32_C(0x0c000000)
    669671
     672/** Checks that EIP/IP is wihin CS.LIM and that RIP is canonical before each
     673 *  instruction.  Used when we're close the limit before starting a TB, as
     674 *  determined by iemGetTbFlagsForCurrentPc(). */
     675#define IEMTB_F_RIP_CHECKS              UINT32_C(0x0c000000)
     676
    670677/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
    671  * @note We don't   */
    672 #define IEMTB_F_KEY_MASK                ((UINT32_C(0xffffffff) & ~IEM_F_X86_CTX_MASK) | IEM_F_X86_CTX_SMM)
     678 * @note We skip the CPL as we don't currently generate ring-specific code,
     679 *       that's all handled in CIMPL functions.
     680 *
     681 *       For the same reasons, we skip all of IEM_F_X86_CTX_MASK, with the
     682 *       exception of SMM (which we don't implement). */
     683#define IEMTB_F_KEY_MASK                ((UINT32_C(0xffffffff) & ~(IEM_F_X86_CTX_MASK | IEM_F_X86_CPL_MASK)) | IEM_F_X86_CTX_SMM)
    673684/** @} */
    674685
     
    980991     * This can either be one being executed or one being compiled. */
    981992    R3PTRTYPE(PIEMTB)       pCurTbR3;
     993    /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
     994     * The TBs are based on physical addresses, so this is needed to correleated
     995     * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
     996    uint64_t                uCurTbStartPc;
     997    /** Statistics: Number of TB allocation calls. */
     998    uint64_t                cTbAllocs;
     999    /** Statistics: Number of TB free calls. */
     1000    uint64_t                cTbFrees;
     1001    /** Whether to end the current TB. */
     1002    bool                    fEndTb;
    9821003    /** Spaced reserved for recompiler data / alignment. */
    983     uint64_t                auRecompilerStuff[7];
     1004    bool                    afRecompilerStuff1[7];
     1005    /** Spaced reserved for recompiler data / alignment. */
     1006    uint64_t                auRecompilerStuff2[3];
    9841007    /** @} */
    9851008
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette