VirtualBox

Changeset 102847 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Jan 11, 2024 2:41:51 PM (14 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
161046
Message:

VMM/IEM: Moved the TLB lookup emitter to a common header file so it can be shared with the IEMAllN8veRecompBltIn.cpp code for code TLB lookups. bugref:10371

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r102846 r102847  
    100100#include "IEMN8veRecompiler.h"
    101101#include "IEMN8veRecompilerEmit.h"
     102#include "IEMN8veRecompilerTlbLookup.h"
    102103#include "IEMNativeFunctions.h"
    103 
    104 
    105 /*
    106  * TLB Lookup config.
    107  */
    108 #if (defined(RT_ARCH_AMD64) && 1) || (defined(RT_ARCH_ARM64) && 1)
    109 # define IEMNATIVE_WITH_TLB_LOOKUP
    110 #endif
    111 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    112 # define IEMNATIVE_WITH_TLB_LOOKUP_FETCH
    113 #endif
    114 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    115 # define IEMNATIVE_WITH_TLB_LOOKUP_STORE
    116 #endif
    117 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    118 # define IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
    119 #endif
    120 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    121 # define IEMNATIVE_WITH_TLB_LOOKUP_PUSH
    122 #endif
    123 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    124 # define IEMNATIVE_WITH_TLB_LOOKUP_POP
    125 #endif
    126104
    127105
     
    71327110
    71337111/**
    7134  * Releases the variable's register.
    7135  *
    7136  * The register must have been previously acquired calling
    7137  * iemNativeVarRegisterAcquire(), iemNativeVarRegisterAcquireForGuestReg() or
    7138  * iemNativeVarRegisterSetAndAcquire().
    7139  */
    7140 DECL_INLINE_THROW(void) iemNativeVarRegisterRelease(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
    7141 {
    7142     IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
    7143     Assert(pReNative->Core.aVars[idxVar].fRegAcquired);
    7144     pReNative->Core.aVars[idxVar].fRegAcquired = false;
    7145 }
    7146 
    7147 
    7148 /**
    71497112 * Makes sure variable @a idxVar has a register assigned to it and that it stays
    71507113 * fixed till we call iemNativeVarRegisterRelease.
     
    71627125 */
    71637126DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquire(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar, uint32_t *poff,
    7164                                                        bool fInitialized = false, uint8_t idxRegPref = UINT8_MAX)
     7127                                                       bool fInitialized /*= false*/, uint8_t idxRegPref /*= UINT8_MAX*/)
    71657128{
    71667129    IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
     
    1034410307
    1034510308/**
    10346  * This must be instantiate *before* branching off to the lookup code,
    10347  * so that register spilling and whatnot happens for everyone.
    10348  */
    10349 typedef struct IEMNATIVEEMITTLBSTATE
    10350 {
    10351     bool const      fSkip;
    10352     uint8_t const   idxRegPtrHlp;   /**< We don't support immediate variables with register assignment, so this a tmp reg alloc. */
    10353     uint8_t const   idxRegPtr;
    10354     uint8_t const   idxRegSegBase;
    10355     uint8_t const   idxRegSegLimit;
    10356     uint8_t const   idxRegSegAttrib;
    10357     uint8_t const   idxReg1;
    10358     uint8_t const   idxReg2;
    10359 #if defined(RT_ARCH_ARM64)
    10360     uint8_t const   idxReg3;
    10361 #endif
    10362     uint64_t const  uAbsPtr;
    10363 
    10364     IEMNATIVEEMITTLBSTATE(PIEMRECOMPILERSTATE a_pReNative, uint32_t *a_poff, uint8_t a_idxVarGCPtrMem,
    10365                           uint8_t a_iSegReg, uint8_t a_cbMem, uint8_t a_offDisp = 0)
    10366 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    10367         /* 32-bit and 64-bit wraparound will require special handling, so skip these for absolute addresses. */
    10368         :           fSkip(   a_pReNative->Core.aVars[a_idxVarGCPtrMem].enmKind == kIemNativeVarKind_Immediate
    10369                           &&   (  (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT
    10370                                 ? (uint64_t)(UINT32_MAX - a_cbMem - a_offDisp)
    10371                                 : (uint64_t)(UINT64_MAX - a_cbMem - a_offDisp))
    10372                              < a_pReNative->Core.aVars[a_idxVarGCPtrMem].u.uValue)
    10373 #else
    10374         :           fSkip(true)
    10375 #endif
    10376 #if defined(RT_ARCH_AMD64) /* got good immediate encoding, otherwise we just load the address in a reg immediately. */
    10377         ,    idxRegPtrHlp(UINT8_MAX)
    10378 #else
    10379         ,    idxRegPtrHlp(   a_pReNative->Core.aVars[a_idxVarGCPtrMem].enmKind != kIemNativeVarKind_Immediate
    10380                           || fSkip
    10381                           ? UINT8_MAX
    10382                           : iemNativeRegAllocTmpImm(a_pReNative, a_poff, a_pReNative->Core.aVars[a_idxVarGCPtrMem].u.uValue) )
    10383 #endif
    10384         ,       idxRegPtr(a_pReNative->Core.aVars[a_idxVarGCPtrMem].enmKind != kIemNativeVarKind_Immediate && !fSkip
    10385                           ? iemNativeVarRegisterAcquire(a_pReNative, a_idxVarGCPtrMem, a_poff,
    10386                                                         true /*fInitialized*/, IEMNATIVE_CALL_ARG2_GREG)
    10387                           : idxRegPtrHlp)
    10388         ,   idxRegSegBase(a_iSegReg == UINT8_MAX || fSkip
    10389                           ? UINT8_MAX
    10390                           : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_BASE(a_iSegReg)))
    10391         ,  idxRegSegLimit((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
    10392                           ? UINT8_MAX
    10393                           : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg)))
    10394         , idxRegSegAttrib((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
    10395                           ? UINT8_MAX
    10396                           : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_ATTRIB(a_iSegReg)))
    10397         ,         idxReg1(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
    10398         ,         idxReg2(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
    10399 #if defined(RT_ARCH_ARM64)
    10400         ,         idxReg3(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
    10401 #endif
    10402         ,         uAbsPtr(  a_pReNative->Core.aVars[a_idxVarGCPtrMem].enmKind != kIemNativeVarKind_Immediate || fSkip
    10403                           ? UINT64_MAX
    10404                           : a_pReNative->Core.aVars[a_idxVarGCPtrMem].u.uValue)
    10405 
    10406     {
    10407         RT_NOREF(a_cbMem, a_offDisp);
    10408     }
    10409 
    10410     /* Alternative constructor for PUSH and POP where we don't have a GCPtrMem
    10411        variable, only a register derived from the guest RSP. */
    10412     IEMNATIVEEMITTLBSTATE(PIEMRECOMPILERSTATE a_pReNative, uint8_t a_idxRegPtr, uint32_t *a_poff,
    10413                           uint8_t a_iSegReg, uint8_t a_cbMem)
    10414 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    10415         :           fSkip(false)
    10416 #else
    10417         :           fSkip(true)
    10418 #endif
    10419         ,    idxRegPtrHlp(UINT8_MAX)
    10420         ,       idxRegPtr(a_idxRegPtr)
    10421         ,   idxRegSegBase(a_iSegReg == UINT8_MAX || fSkip
    10422                           ? UINT8_MAX
    10423                           : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_BASE(a_iSegReg)))
    10424         ,  idxRegSegLimit((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
    10425                           ? UINT8_MAX
    10426                           : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg)))
    10427         , idxRegSegAttrib((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
    10428                           ? UINT8_MAX
    10429                           : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_ATTRIB(a_iSegReg)))
    10430         ,         idxReg1(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
    10431         ,         idxReg2(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
    10432 #if defined(RT_ARCH_ARM64)
    10433         ,         idxReg3(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
    10434 #endif
    10435         ,         uAbsPtr(UINT64_MAX)
    10436 
    10437     {
    10438         RT_NOREF_PV(a_cbMem);
    10439     }
    10440 
    10441     void freeRegsAndReleaseVars(PIEMRECOMPILERSTATE a_pReNative, uint8_t idxVarGCPtrMem = UINT8_MAX) const
    10442     {
    10443         if (idxRegPtr != UINT8_MAX)
    10444         {
    10445             if (idxRegPtrHlp == UINT8_MAX)
    10446             {
    10447                 if (idxVarGCPtrMem != UINT8_MAX)
    10448                     iemNativeVarRegisterRelease(a_pReNative, idxVarGCPtrMem);
    10449             }
    10450             else
    10451             {
    10452                 Assert(idxRegPtrHlp == idxRegPtr);
    10453                 iemNativeRegFreeTmpImm(a_pReNative, idxRegPtrHlp);
    10454             }
    10455         }
    10456         else
    10457             Assert(idxRegPtrHlp == UINT8_MAX);
    10458         if (idxRegSegBase != UINT8_MAX)
    10459             iemNativeRegFreeTmp(a_pReNative, idxRegSegBase);
    10460         if (idxRegSegLimit != UINT8_MAX)
    10461         {
    10462             iemNativeRegFreeTmp(a_pReNative, idxRegSegLimit);
    10463             iemNativeRegFreeTmp(a_pReNative, idxRegSegAttrib);
    10464         }
    10465         else
    10466             Assert(idxRegSegAttrib == UINT8_MAX);
    10467 #if defined(RT_ARCH_ARM64)
    10468         iemNativeRegFreeTmp(a_pReNative, idxReg3);
    10469 #endif
    10470         iemNativeRegFreeTmp(a_pReNative, idxReg2);
    10471         iemNativeRegFreeTmp(a_pReNative, idxReg1);
    10472 
    10473     }
    10474 
    10475     uint32_t getRegsNotToSave() const
    10476     {
    10477         if (!fSkip)
    10478             return RT_BIT_32(idxReg1)
    10479                  | RT_BIT_32(idxReg2)
    10480 #if defined(RT_ARCH_ARM64)
    10481                  | RT_BIT_32(idxReg3)
    10482 #endif
    10483                  ;
    10484         return 0;
    10485     }
    10486 
    10487     /** This is only for avoid assertions. */
    10488     uint32_t getActiveRegsWithShadows() const
    10489     {
    10490 #ifdef VBOX_STRICT
    10491         if (!fSkip)
    10492             return RT_BIT_32(idxRegSegBase) | RT_BIT_32(idxRegSegLimit) | RT_BIT_32(idxRegSegAttrib);
    10493 #endif
    10494         return 0;
    10495     }
    10496 } IEMNATIVEEMITTLBSTATE;
    10497 
    10498 
    10499 /**
    1050010309 * This is called via iemNativeHlpAsmSafeWrapCheckTlbLookup.
    1050110310 */
     
    1054410353    RT_BREAKPOINT();
    1054510354}
    10546 DECLASM(void) iemNativeHlpAsmSafeWrapCheckTlbLookup(void);
    10547 
    10548 
    10549 #ifdef IEMNATIVE_WITH_TLB_LOOKUP
    10550 DECL_INLINE_THROW(uint32_t)
    10551 iemNativeEmitTlbLookup(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEEMITTLBSTATE const * const pTlbState,
    10552                        uint8_t iSegReg, uint8_t cbMem, uint8_t fAlignMask, uint32_t fAccess,
    10553                        uint32_t idxLabelTlbLookup, uint32_t idxLabelTlbMiss, uint8_t idxRegMemResult,
    10554                        uint8_t offDisp = 0)
    10555 {
    10556     Assert(!pTlbState->fSkip);
    10557 # if defined(RT_ARCH_AMD64)
    10558     uint8_t * const  pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 512);
    10559 # elif defined(RT_ARCH_ARM64)
    10560     uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 64);
    10561 # endif
    10562 
    10563     /*
    10564      * The expand down check isn't use all that much, so we emit here to keep
    10565      * the lookup straighter.
    10566      */
    10567     /* check_expand_down: ; complicted! */
    10568     uint32_t const offCheckExpandDown = off;
    10569     uint32_t       offFixupLimitDone  = 0;
    10570     if (iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
    10571     {
    10572 off = iemNativeEmitBrkEx(pCodeBuf, off, 1); /** @todo this needs testing */
    10573         /* cmp  seglim, regptr */
    10574         if (pTlbState->idxRegPtr != UINT8_MAX && offDisp == 0)
    10575             off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, pTlbState->idxRegPtr);
    10576         else if (pTlbState->idxRegPtr == UINT8_MAX)
    10577             off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxRegSegLimit,
    10578                                                  (uint32_t)(pTlbState->uAbsPtr + offDisp));
    10579         else if (cbMem == 1)
    10580             off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, pTlbState->idxReg2);
    10581         else
    10582         {   /* use idxRegMemResult to calc the displaced address. */
    10583             off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off, idxRegMemResult, pTlbState->idxRegPtr, offDisp);
    10584             off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, idxRegMemResult);
    10585         }
    10586         /* ja  tlbmiss */
    10587         off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
    10588 
    10589         /* reg1 = segattr & X86DESCATTR_D (0x4000) */
    10590         off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxRegSegAttrib, X86DESCATTR_D);
    10591         /* xor  reg1, X86DESCATTR_D */
    10592         off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1, X86DESCATTR_D);
    10593         /* shl  reg1, 2 (16 - 14) */
    10594         AssertCompile((X86DESCATTR_D << 2) == UINT32_C(0x10000));
    10595         off = iemNativeEmitShiftGpr32LeftEx(pCodeBuf, off, pTlbState->idxReg1, 2);
    10596         /* dec  reg1 (=> 0xffff if D=0; 0xffffffff if D=1) */
    10597         off = iemNativeEmitSubGpr32ImmEx(pCodeBuf, off, pTlbState->idxReg1, 1);
    10598         /* cmp  reg1, reg2 (64-bit) / imm (32-bit) */
    10599         if (pTlbState->idxRegPtr != UINT8_MAX)
    10600             off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1,
    10601                                                cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr);
    10602         else
    10603             off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1,
    10604                                                  (uint32_t)(pTlbState->uAbsPtr + offDisp + cbMem - 1)); /* fSkip=true on overflow. */
    10605         /* jbe  tlbmiss */
    10606         off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_be);
    10607         /* jmp  limitdone */
    10608         offFixupLimitDone = off;
    10609         off = iemNativeEmitJmpToFixedEx(pCodeBuf, off, off /* ASSUME short jump suffices */);
    10610     }
    10611 
    10612     /*
    10613      * tlblookup:
    10614      */
    10615     iemNativeLabelDefine(pReNative, idxLabelTlbLookup, off);
    10616 # if defined(RT_ARCH_ARM64) && 0
    10617     off = iemNativeEmitBrkEx(pCodeBuf, off, 0);
    10618 # endif
    10619 
    10620     /*
    10621      * 1. Segmentation.
    10622      *
    10623      * 1a. Check segment limit and attributes if non-flat 32-bit code.  This is complicated.
    10624      */
    10625     if (iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
    10626     {
    10627         /* Check that we've got a segment loaded and that it allows the access.
    10628            For write access this means a writable data segment.
    10629            For read-only accesses this means a readable code segment or any data segment. */
    10630         if (fAccess & IEM_ACCESS_TYPE_WRITE)
    10631         {
    10632             uint32_t const fMustBe1 = X86DESCATTR_P        | X86DESCATTR_DT    | X86_SEL_TYPE_WRITE;
    10633             uint32_t const fMustBe0 = X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE;
    10634             /* reg1 = segattrs & (must1|must0) */
    10635             off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,
    10636                                                   pTlbState->idxRegSegAttrib, fMustBe1 | fMustBe0);
    10637             /* cmp reg1, must1 */
    10638             AssertCompile(fMustBe1 <= UINT16_MAX);
    10639             off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, fMustBe1);
    10640             /* jne tlbmiss */
    10641             off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
    10642         }
    10643         else
    10644         {
    10645             /*  U  | !P |!DT |!CD | RW |
    10646                 16 |  8 |  4 |  3 |  1 |
    10647               -------------------------------
    10648                 0  |  0 |  0 |  0 |  0 | execute-only code segment. - must be excluded
    10649                 0  |  0 |  0 |  0 |  1 | execute-read code segment.
    10650                 0  |  0 |  0 |  1 |  0 | read-only data segment.
    10651                 0  |  0 |  0 |  1 |  1 | read-write data segment.   - last valid combination
    10652             */
    10653             /* reg1 = segattrs & (relevant attributes) */
    10654             off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxRegSegAttrib,
    10655                                                     X86DESCATTR_UNUSABLE | X86DESCATTR_P | X86DESCATTR_DT
    10656                                                   | X86_SEL_TYPE_CODE    | X86_SEL_TYPE_WRITE);
    10657             /* xor reg1, X86DESCATTR_P | X86DESCATTR_DT | X86_SEL_TYPE_CODE ; place C=1 RW=0 at the bottom & limit the range.
    10658                                             ; EO-code=0,  ER-code=2, RO-data=8, RW-data=10 */
    10659 #ifdef RT_ARCH_ARM64
    10660             off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1, X86DESCATTR_DT | X86_SEL_TYPE_CODE);
    10661             off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1, X86DESCATTR_P);
    10662 #else
    10663             off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1,
    10664                                                X86DESCATTR_P | X86DESCATTR_DT | X86_SEL_TYPE_CODE);
    10665 #endif
    10666             /* sub reg1, X86_SEL_TYPE_WRITE ; EO-code=-2, ER-code=0, RO-data=6, RW-data=8 */
    10667             off = iemNativeEmitSubGpr32ImmEx(pCodeBuf, off, pTlbState->idxReg1, X86_SEL_TYPE_WRITE /* ER-code */);
    10668             /* cmp reg1, X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE */
    10669             AssertCompile(X86_SEL_TYPE_CODE == 8);
    10670             off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, X86_SEL_TYPE_CODE);
    10671             /* ja  tlbmiss */
    10672             off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
    10673         }
    10674 
    10675         /* If we're accessing more than one byte or if we're working with a non-zero offDisp,
    10676            put the last address we'll be accessing in idxReg2 (64-bit). */
    10677         if ((cbMem > 1 || offDisp != 0) && pTlbState->idxRegPtr != UINT8_MAX)
    10678         {
    10679             if (!offDisp)
    10680                 /* reg2 = regptr + cbMem - 1; 64-bit result so we can fend of wraparounds/overflows. */
    10681                 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, pTlbState->idxReg2,/*=*/ pTlbState->idxRegPtr,/*+*/ cbMem - 1);
    10682             else
    10683             {
    10684                 /* reg2 = (uint32_t)(regptr + offDisp) + cbMem - 1;. */
    10685                 off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off,
    10686                                                        pTlbState->idxReg2,/*=*/ pTlbState->idxRegPtr,/*+*/ + offDisp);
    10687                 off = iemNativeEmitAddGprImmEx(pCodeBuf, off, pTlbState->idxReg2, cbMem - 1);
    10688             }
    10689         }
    10690 
    10691         /*
    10692          * Check the limit.  If this is a write access, we know that it's a
    10693          * data segment and includes the expand_down bit.  For read-only accesses
    10694          * we need to check that code/data=0 and expanddown=1 before continuing.
    10695          */
    10696         if (fAccess & IEM_ACCESS_TYPE_WRITE)
    10697         {
    10698             /* test segattrs, X86_SEL_TYPE_DOWN */
    10699             AssertCompile(X86_SEL_TYPE_DOWN < 128);
    10700             off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, pTlbState->idxRegSegAttrib, X86_SEL_TYPE_DOWN);
    10701             /* jnz  check_expand_down */
    10702             off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offCheckExpandDown, kIemNativeInstrCond_ne);
    10703         }
    10704         else
    10705         {
    10706             /* reg1 = segattr & (code | down) */
    10707             off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,
    10708                                                   pTlbState->idxRegSegAttrib, X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN);
    10709             /* cmp reg1, down */
    10710             off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, X86_SEL_TYPE_DOWN);
    10711             /* je check_expand_down */
    10712             off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offCheckExpandDown, kIemNativeInstrCond_e);
    10713         }
    10714 
    10715         /* expand_up:
    10716            cmp  seglim, regptr/reg2/imm */
    10717         if (pTlbState->idxRegPtr != UINT8_MAX)
    10718             off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit,
    10719                                                cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr);
    10720         else
    10721             off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxRegSegLimit,
    10722                                                  (uint32_t)pTlbState->uAbsPtr + offDisp + cbMem - 1U); /* fSkip=true on overflow. */
    10723         /* jbe  tlbmiss */
    10724         off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_be);
    10725 
    10726         /* limitdone: */
    10727         iemNativeFixupFixedJump(pReNative, offFixupLimitDone, off);
    10728     }
    10729 
    10730     /* 1b. Add the segment base.  We use idxRegMemResult for the ptr register if
    10731            this step is required or if the address is a constant (simplicity) or
    10732            if offDisp is non-zero. */
    10733     uint8_t const idxRegFlatPtr = iSegReg != UINT8_MAX || pTlbState->idxRegPtr == UINT8_MAX || offDisp != 0
    10734                                 ? idxRegMemResult : pTlbState->idxRegPtr;
    10735     if (iSegReg != UINT8_MAX)
    10736     {
    10737         Assert(idxRegFlatPtr != pTlbState->idxRegPtr);
    10738         /* regflat = segbase + regptr/imm */
    10739         if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
    10740         {
    10741             Assert(iSegReg >= X86_SREG_FS);
    10742             if (pTlbState->idxRegPtr != UINT8_MAX)
    10743             {
    10744                 off = iemNativeEmitGprEqGprPlusGprEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegSegBase, pTlbState->idxRegPtr);
    10745                 if (offDisp != 0)
    10746                     off = iemNativeEmitAddGprImmEx(pCodeBuf, off, idxRegFlatPtr, offDisp);
    10747             }
    10748             else
    10749                 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegSegBase,
    10750                                                      pTlbState->uAbsPtr + offDisp);
    10751         }
    10752         else if (pTlbState->idxRegPtr != UINT8_MAX)
    10753         {
    10754             off = iemNativeEmitGpr32EqGprPlusGprEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegSegBase, pTlbState->idxRegPtr);
    10755             if (offDisp != 0)
    10756                 off = iemNativeEmitAddGpr32ImmEx(pCodeBuf, off, idxRegFlatPtr, offDisp);
    10757         }
    10758         else
    10759             off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr,
    10760                                                    pTlbState->idxRegSegBase, (uint32_t)pTlbState->uAbsPtr + offDisp);
    10761     }
    10762     else if (pTlbState->idxRegPtr == UINT8_MAX)
    10763     {
    10764         if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
    10765             off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->uAbsPtr + offDisp);
    10766         else
    10767             off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, idxRegFlatPtr, (uint32_t)pTlbState->uAbsPtr + offDisp);
    10768     }
    10769     else if (offDisp != 0)
    10770     {
    10771         Assert(idxRegFlatPtr != pTlbState->idxRegPtr);
    10772         if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
    10773             off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegPtr, offDisp);
    10774         else
    10775             off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegPtr, offDisp);
    10776     }
    10777     else
    10778         Assert(idxRegFlatPtr == pTlbState->idxRegPtr);
    10779 
    10780     /*
    10781      * 2. Check that the address doesn't cross a page boundrary and doesn't have alignment issues.
    10782      *
    10783      * 2a. Alignment check using fAlignMask.
    10784      */
    10785     if (fAlignMask)
    10786     {
    10787         Assert(RT_IS_POWER_OF_TWO(fAlignMask + 1));
    10788         Assert(fAlignMask < 128);
    10789         /* test regflat, fAlignMask */
    10790         off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, fAlignMask);
    10791         /* jnz tlbmiss */
    10792         off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
    10793     }
    10794 
    10795     /*
    10796      * 2b. Check that it's not crossing page a boundrary. This is implicit in
    10797      *     the previous test if the alignment is same or larger than the type.
    10798      */
    10799     if (cbMem > fAlignMask + 1)
    10800     {
    10801         /* reg1 = regflat & 0xfff */
    10802         off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,/*=*/ idxRegFlatPtr,/*&*/ GUEST_PAGE_OFFSET_MASK);
    10803         /* cmp reg1, GUEST_PAGE_SIZE - cbMem */
    10804         off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE);
    10805         /* ja  tlbmiss */
    10806         off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
    10807     }
    10808 
    10809     /*
    10810      * 3. TLB lookup.
    10811      *
    10812      * 3a. Calculate the TLB tag value (IEMTLB_CALC_TAG).
    10813      *     In 64-bit mode we will also check for non-canonical addresses here.
    10814      */
    10815     if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
    10816     {
    10817 # if defined(RT_ARCH_AMD64)
    10818         /* mov reg1, regflat */
    10819         off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, pTlbState->idxReg1, idxRegFlatPtr);
    10820         /* rol reg1, 16 */
    10821         off = iemNativeEmitRotateGprLeftEx(pCodeBuf, off, pTlbState->idxReg1, 16);
    10822         /** @todo Would 'movsx reg2, word reg1' and working on reg2 in dwords be faster? */
    10823         /* inc word reg1 */
    10824         pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
    10825         if (pTlbState->idxReg1 >= 8)
    10826             pCodeBuf[off++] = X86_OP_REX_B;
    10827         pCodeBuf[off++] = 0xff;
    10828         pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, pTlbState->idxReg1 & 7);
    10829         /* cmp word reg1, 1 */
    10830         pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
    10831         if (pTlbState->idxReg1 >= 8)
    10832             pCodeBuf[off++] = X86_OP_REX_B;
    10833         pCodeBuf[off++] = 0x83;
    10834         pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, pTlbState->idxReg1 & 7);
    10835         pCodeBuf[off++] = 1;
    10836         /* ja  tlbmiss */
    10837         off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
    10838         /* shr reg1, 16 + GUEST_PAGE_SHIFT */
    10839         off = iemNativeEmitShiftGprRightEx(pCodeBuf, off, pTlbState->idxReg1, 16 + GUEST_PAGE_SHIFT);
    10840 
    10841 # elif defined(RT_ARCH_ARM64)
    10842         /* lsr  reg1, regflat, #48 */
    10843         pCodeBuf[off++] = Armv8A64MkInstrLslImm(pTlbState->idxReg1, idxRegFlatPtr, 4);
    10844         /* add  reg1, reg1, #1 */
    10845         pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(pTlbState->idxReg1, pTlbState->idxReg1, 1, false /*f64Bit*/);
    10846         /* tst  reg1, #0xfffe */
    10847         Assert(Armv8A64ConvertImmRImmS2Mask32(14, 31) == 0xfffe);
    10848         pCodeBuf[off++] = Armv8A64MkInstrTstImm(pTlbState->idxReg1, 14, 31,  false /*f64Bit*/);
    10849         /* b.nq tlbmiss */
    10850         off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
    10851 
    10852         /* ubfx reg1, regflat, #12, #36 */
    10853         pCodeBuf[off++] = Armv8A64MkInstrUbfx(pTlbState->idxReg1, idxRegFlatPtr, GUEST_PAGE_SHIFT, 48 - GUEST_PAGE_SHIFT);
    10854 # else
    10855 #  error "Port me"
    10856 # endif
    10857     }
    10858     else
    10859     {
    10860         /* reg1 = (uint32_t)(regflat >> 12) */
    10861         off = iemNativeEmitGpr32EqGprShiftRightImmEx(pCodeBuf, off, pTlbState->idxReg1, idxRegFlatPtr, GUEST_PAGE_SHIFT);
    10862     }
    10863     /* or  reg1, [qword pVCpu->iem.s.DataTlb.uTlbRevision] */
    10864 # if defined(RT_ARCH_AMD64)
    10865     pCodeBuf[off++] = pTlbState->idxReg1 < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
    10866     pCodeBuf[off++] = 0x0b; /* OR r64,r/m64 */
    10867     off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, pTlbState->idxReg1, RT_UOFFSETOF(VMCPUCC, iem.s.DataTlb.uTlbRevision));
    10868 # else
    10869     off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg3, RT_UOFFSETOF(VMCPUCC, iem.s.DataTlb.uTlbRevision));
    10870     off = iemNativeEmitOrGprByGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg3);
    10871 # endif
    10872 
    10873     /*
    10874      * 3b. Calc pTlbe.
    10875      */
    10876 # if defined(RT_ARCH_AMD64)
    10877     /* movzx reg2, byte reg1 */
    10878     off = iemNativeEmitLoadGprFromGpr8Ex(pCodeBuf, off, pTlbState->idxReg2, pTlbState->idxReg1);
    10879     /* shl   reg2, 5 ; reg2 *= sizeof(IEMTLBENTRY) */
    10880     AssertCompileSize(IEMTLBENTRY, 32);
    10881     off = iemNativeEmitShiftGprLeftEx(pCodeBuf, off, pTlbState->idxReg2, 5);
    10882     /* lea   reg2, [pVCpu->iem.s.DataTlb.aEntries + reg2] */
    10883     AssertCompile(IEMNATIVE_REG_FIXED_PVMCPU < 8);
    10884     pCodeBuf[off++] = pTlbState->idxReg2 < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_X | X86_OP_REX_R;
    10885     pCodeBuf[off++] = 0x8d;
    10886     pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, pTlbState->idxReg2 & 7, 4 /*SIB*/);
    10887     pCodeBuf[off++] = X86_SIB_MAKE(IEMNATIVE_REG_FIXED_PVMCPU & 7, pTlbState->idxReg2 & 7, 0);
    10888     pCodeBuf[off++] = RT_BYTE1(RT_UOFFSETOF(VMCPUCC,  iem.s.DataTlb.aEntries));
    10889     pCodeBuf[off++] = RT_BYTE2(RT_UOFFSETOF(VMCPUCC,  iem.s.DataTlb.aEntries));
    10890     pCodeBuf[off++] = RT_BYTE3(RT_UOFFSETOF(VMCPUCC,  iem.s.DataTlb.aEntries));
    10891     pCodeBuf[off++] = RT_BYTE4(RT_UOFFSETOF(VMCPUCC,  iem.s.DataTlb.aEntries));
    10892 
    10893 # elif defined(RT_ARCH_ARM64)
    10894     /* reg2 = (reg1 & 0xff) << 5 */
    10895     pCodeBuf[off++] = Armv8A64MkInstrUbfiz(pTlbState->idxReg2, pTlbState->idxReg1, 5, 8);
    10896     /* reg2 += offsetof(VMCPUCC, iem.s.DataTlb.aEntries) */
    10897     off = iemNativeEmitAddGprImmEx(pCodeBuf, off, pTlbState->idxReg2, RT_UOFFSETOF(VMCPUCC, iem.s.DataTlb.aEntries),
    10898                                    pTlbState->idxReg3 /*iGprTmp*/);
    10899     /* reg2 += pVCpu */
    10900     off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, pTlbState->idxReg2, IEMNATIVE_REG_FIXED_PVMCPU);
    10901 # else
    10902 #  error "Port me"
    10903 # endif
    10904 
    10905     /*
    10906      * 3c. Compare the TLBE.uTag with the one from 2a (reg1).
    10907      */
    10908 # if defined(RT_ARCH_AMD64)
    10909     /* cmp reg1, [reg2] */
    10910     pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_R) | (pTlbState->idxReg2 < 8 ? 0 : X86_OP_REX_B);
    10911     pCodeBuf[off++] = 0x3b;
    10912     off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, uTag));
    10913 # elif defined(RT_ARCH_ARM64)
    10914     off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, pTlbState->idxReg3, pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, uTag));
    10915     off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg3);
    10916 # else
    10917 #  error "Port me"
    10918 # endif
    10919     /* jne tlbmiss */
    10920     off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
    10921 
    10922     /*
    10923      * 4. Check TLB page table level access flags and physical page revision #.
    10924      */
    10925     /* mov reg1, mask */
    10926     AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
    10927     uint64_t const fNoUser = (((pReNative->fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK) + 1) & IEMTLBE_F_PT_NO_USER;
    10928     uint64_t       fTlbe   = IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PT_NO_ACCESSED
    10929                            | fNoUser;
    10930     if (fAccess & IEM_ACCESS_TYPE_READ)
    10931         fTlbe |= IEMTLBE_F_PG_NO_READ;
    10932     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    10933         fTlbe |= IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY;
    10934     off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, fTlbe);
    10935 # if defined(RT_ARCH_AMD64)
    10936     /* and reg1, [reg2->fFlagsAndPhysRev] */
    10937     pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_R) | (pTlbState->idxReg2 < 8 ? 0 : X86_OP_REX_B);
    10938     pCodeBuf[off++] = 0x23;
    10939     off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg1,
    10940                                     pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, fFlagsAndPhysRev));
    10941 
    10942     /* cmp reg1, [pVCpu->iem.s.DataTlb.uTlbPhysRev] */
    10943     pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_R);
    10944     pCodeBuf[off++] = 0x3b;
    10945     off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg1, IEMNATIVE_REG_FIXED_PVMCPU,
    10946                                     RT_UOFFSETOF(VMCPUCC, iem.s.DataTlb.uTlbPhysRev));
    10947 # elif defined(RT_ARCH_ARM64)
    10948     off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, pTlbState->idxReg3, pTlbState->idxReg2,
    10949                                          RT_UOFFSETOF(IEMTLBENTRY, fFlagsAndPhysRev));
    10950     pCodeBuf[off++] = Armv8A64MkInstrAnd(pTlbState->idxReg1, pTlbState->idxReg1, pTlbState->idxReg3);
    10951     off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg3, RT_UOFFSETOF(VMCPUCC, iem.s.DataTlb.uTlbPhysRev));
    10952     off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg3);
    10953 # else
    10954 #  error "Port me"
    10955 # endif
    10956     /* jne tlbmiss */
    10957     off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
    10958 
    10959     /*
    10960      * 5. Check that pbMappingR3 isn't NULL (paranoia) and calculate the
    10961      *    resulting pointer.
    10962      */
    10963     /* mov  reg1, [reg2->pbMappingR3] */
    10964     off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
    10965                                          RT_UOFFSETOF(IEMTLBENTRY, pbMappingR3));
    10966     /* if (!reg1) goto tlbmiss; */
    10967     /** @todo eliminate the need for this test? */
    10968     off = iemNativeEmitTestIfGprIsZeroAndJmpToLabelEx(pReNative, pCodeBuf, off, pTlbState->idxReg1,
    10969                                                       true /*f64Bit*/, idxLabelTlbMiss);
    10970 
    10971     if (idxRegFlatPtr == idxRegMemResult) /* See step 1b. */
    10972     {
    10973         /* and result, 0xfff */
    10974         off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegMemResult, GUEST_PAGE_OFFSET_MASK);
    10975     }
    10976     else
    10977     {
    10978         Assert(idxRegFlatPtr == pTlbState->idxRegPtr);
    10979         /* result = regflat & 0xfff */
    10980         off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, idxRegMemResult, idxRegFlatPtr, GUEST_PAGE_OFFSET_MASK);
    10981     }
    10982     /* add result, reg1 */
    10983     off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, idxRegMemResult, pTlbState->idxReg1);
    10984 
    10985 # if 0
    10986     /*
    10987      * To verify the result we call a helper function.
    10988      *
    10989      * It's like the state logging, so parameters are passed on the stack.
    10990      * iemNativeHlpAsmSafeWrapCheckTlbLookup(pVCpu, result, addr, seg | (cbMem << 8) | (fAccess << 16))
    10991      */
    10992 #  ifdef RT_ARCH_AMD64
    10993     /* push     seg | (cbMem << 8) | (fAccess << 16) */
    10994     pCodeBuf[off++] = 0x68;
    10995     pCodeBuf[off++] = iSegReg;
    10996     pCodeBuf[off++] = cbMem;
    10997     pCodeBuf[off++] = RT_BYTE1(fAccess);
    10998     pCodeBuf[off++] = RT_BYTE2(fAccess);
    10999     /* push     pTlbState->idxRegPtr / immediate address. */
    11000     if (pTlbState->idxRegPtr != UINT8_MAX)
    11001     {
    11002         if (pTlbState->idxRegPtr >= 8)
    11003             pCodeBuf[off++] = X86_OP_REX_B;
    11004         pCodeBuf[off++] = 0x50 + (pTlbState->idxRegPtr & 7);
    11005     }
    11006     else
    11007     {
    11008         off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->uAbsPtr);
    11009         if (pTlbState->idxReg1 >= 8)
    11010             pCodeBuf[off++] = X86_OP_REX_B;
    11011         pCodeBuf[off++] = 0x50 + (pTlbState->idxReg1 & 7);
    11012     }
    11013     /* push     idxRegMemResult */
    11014     if (idxRegMemResult >= 8)
    11015         pCodeBuf[off++] = X86_OP_REX_B;
    11016     pCodeBuf[off++] = 0x50 + (idxRegMemResult & 7);
    11017     /* push     pVCpu */
    11018     pCodeBuf[off++] = 0x50 + IEMNATIVE_REG_FIXED_PVMCPU;
    11019     /* mov      reg1, helper */
    11020     off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, (uintptr_t)iemNativeHlpAsmSafeWrapCheckTlbLookup);
    11021     /* call     [reg1] */
    11022     pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_B);
    11023     pCodeBuf[off++] = 0xff;
    11024     pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, pTlbState->idxReg1 & 7);
    11025     /* The stack is cleaned up by helper function. */
    11026 
    11027 #  else
    11028 #   error "Port me"
    11029 #  endif
    11030 # endif
    11031 
    11032     IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    11033 
    11034     return off;
    11035 }
    11036 #endif /* IEMNATIVE_WITH_TLB_LOOKUP */
     10355
     10356/* The rest of the code is in IEMN8veRecompilerTlbLookup.h. */
    1103710357
    1103810358
     
    1130010620         * TlbLookup:
    1130110621         */
    11302         off = iemNativeEmitTlbLookup(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask,
    11303                                      enmOp == kIemNativeEmitMemOp_Store ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ,
    11304                                      idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult, offDisp);
     10622        off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask,
     10623                                           enmOp == kIemNativeEmitMemOp_Store ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ,
     10624                                           idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult, offDisp);
    1130510625
    1130610626        /*
     
    1202311343         * TlbLookup:
    1202411344         */
    12025         off = iemNativeEmitTlbLookup(pReNative, off, &TlbState, iSegReg, cbMemAccess, cbMemAccess - 1, IEM_ACCESS_TYPE_WRITE,
    12026                                      idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
     11345        off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMemAccess, cbMemAccess - 1,
     11346                                           IEM_ACCESS_TYPE_WRITE, idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
    1202711347
    1202811348        /*
     
    1237111691         * TlbLookup:
    1237211692         */
    12373         off = iemNativeEmitTlbLookup(pReNative, off, &TlbState, iSegReg, cbMem, cbMem - 1, IEM_ACCESS_TYPE_READ,
    12374                                      idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
     11693        off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, cbMem - 1, IEM_ACCESS_TYPE_READ,
     11694                                           idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
    1237511695
    1237611696        /*
     
    1288912209         * TlbLookup:
    1289012210         */
    12891         off = iemNativeEmitTlbLookup(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask, fAccess,
    12892                                      idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
     12211        off = iemNativeEmitTlbLookup<true>(pReNative, off, &TlbState, iSegReg, cbMem, fAlignMask, fAccess,
     12212                                           idxLabelTlbLookup, idxLabelTlbMiss, idxRegMemResult);
    1289312213# ifdef VBOX_WITH_STATISTICS
    1289412214        off = iemNativeEmitIncStamCounterInVCpu(pReNative, off, TlbState.idxReg1, TlbState.idxReg2,
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette