VirtualBox

Changeset 108278 in vbox


Ignore:
Timestamp:
Feb 18, 2025 3:46:53 PM (2 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167608
Message:

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

Location:
trunk/src/VBox/VMM
Files:
20 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108260 r108278  
    543543    RT_NOREF_PV(pszFunction);
    544544
    545 #ifdef IEM_WITH_SETJMP
    546545    VBOXSTRICTRC rcStrict;
    547546    IEM_TRY_SETJMP(pVCpu, rcStrict)
     
    555554    }
    556555    IEM_CATCH_LONGJMP_END(pVCpu);
    557 #else
    558     uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    559     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    560 #endif
    561556    if (rcStrict == VINF_SUCCESS)
    562557        pVCpu->iem.s.cInstructions++;
     
    604599            iemLogCurInstr(pVCpu, false, pszFunction);
    605600#endif
    606 #ifdef IEM_WITH_SETJMP
    607601            IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
    608602            {
     
    615609            }
    616610            IEM_CATCH_LONGJMP_END(pVCpu);
    617 #else
    618             IEM_OPCODE_GET_FIRST_U8(&b);
    619             rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    620 #endif
    621611            if (rcStrict == VINF_SUCCESS)
    622612            {
     
    874864    if (rcStrict == VINF_SUCCESS)
    875865    {
    876 #ifdef IEM_WITH_SETJMP
    877866        pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
    878867        IEM_TRY_SETJMP(pVCpu, rcStrict)
    879 #endif
    880868        {
    881869            /*
     
    960948            }
    961949        }
    962 #ifdef IEM_WITH_SETJMP
    963950        IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    964951        {
    965952            if (pVCpu->iem.s.cActiveMappings > 0)
    966953                iemMemRollback(pVCpu);
    967 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
     954#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    968955            rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    969 # endif
     956#endif
    970957            pVCpu->iem.s.cLongJumps++;
    971958        }
    972959        IEM_CATCH_LONGJMP_END(pVCpu);
    973 #endif
    974960
    975961        /*
     
    10371023    if (rcStrict == VINF_SUCCESS)
    10381024    {
    1039 #ifdef IEM_WITH_SETJMP
    10401025        pVCpu->iem.s.cActiveMappings     = 0; /** @todo wtf?!? */
    10411026        IEM_TRY_SETJMP(pVCpu, rcStrict)
    1042 #endif
    10431027        {
    10441028#ifdef IN_RING0
     
    11471131            }
    11481132        }
    1149 #ifdef IEM_WITH_SETJMP
    11501133        IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    11511134        {
     
    11551138        }
    11561139        IEM_CATCH_LONGJMP_END(pVCpu);
    1157 #endif
    11581140
    11591141        /*
  • trunk/src/VBox/VMM/VMMAll/IEMAllMem.cpp

    r108260 r108278  
    600600}
    601601
    602 #ifdef IEM_WITH_SETJMP
    603602
    604603/**
     
    679678}
    680679
    681 #endif /* IEM_WITH_SETJMP */
    682680
    683681#ifndef IN_RING3
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r107209 r108278  
    9393#ifndef IEM_WITH_DATA_TLB
    9494# error The data TLB must be enabled for the recompiler.
    95 #endif
    96 
    97 #ifndef IEM_WITH_SETJMP
    98 # error The setjmp approach must be enabled for the recompiler.
    9995#endif
    10096
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp

    r108260 r108278  
    9999# error The data TLB must be enabled for the recompiler.
    100100#endif
    101 
    102 #ifndef IEM_WITH_SETJMP
    103 # error The setjmp approach must be enabled for the recompiler.
    104 #endif
    105 
    106101
    107102
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMem-x86.cpp

    r108260 r108278  
    662662}
    663663
    664 #ifdef IEM_WITH_SETJMP
    665664
    666665/**
     
    11021101}
    11031102
    1104 #endif /* IEM_WITH_SETJMP */
    11051103
    11061104
     
    13741372
    13751373
    1376 #ifdef IEM_WITH_SETJMP
    13771374/**
    13781375 * Stores a data dqword, SSE aligned.
     
    13971394    Log5(("IEM WR dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));
    13981395}
    1399 #endif
    14001396
    14011397
     
    14301426
    14311427
    1432 #ifdef IEM_WITH_SETJMP
    14331428/**
    14341429 * Stores a data dqword, longjmp on error.
     
    14531448    Log5(("IEM WR qqword %d|%RGv: %.32Rhxs\n", iSegReg, GCPtrMem, pu256Dst));
    14541449}
    1455 #endif
    14561450
    14571451
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMemRWTmpl-x86.cpp.h

    r108226 r108278  
    5151 * Standard fetch function.
    5252 *
    53  * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
    54  * is defined.
     53 * This is used by CImpl code.
    5554 */
    5655VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
     
    7271
    7372
    74 #ifdef IEM_WITH_SETJMP
    7573/**
    7674 * Safe/fallback fetch function that longjmps on error.
    7775 */
    78 # ifdef TMPL_MEM_BY_REF
     76#ifdef TMPL_MEM_BY_REF
    7977void
    8078RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    8179{
    82 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     80# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    8381    pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
    84 #  endif
     82# endif
    8583    uint8_t              bUnmapInfo;
    8684    TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
     
    9088    Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
    9189}
    92 # else /* !TMPL_MEM_BY_REF */
     90#else /* !TMPL_MEM_BY_REF */
    9391TMPL_MEM_TYPE
    9492RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    9593{
    96 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     94# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    9795    pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
    98 #  endif
     96# endif
    9997    uint8_t              bUnmapInfo;
    10098    TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapSafeJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
     
    105103    return uRet;
    106104}
    107 # endif /* !TMPL_MEM_BY_REF */
    108 #endif /* IEM_WITH_SETJMP */
     105#endif /* !TMPL_MEM_BY_REF */
    109106
    110107
     
    113110 * Standard store function.
    114111 *
    115  * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
    116  * is defined.
     112 * This is used by CImpl code.
    117113 */
    118114VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
     
    146142
    147143
    148 #ifdef IEM_WITH_SETJMP
    149144/**
    150145 * Stores a data byte, longjmp on error.
     
    163158#endif
    164159{
    165 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    166     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    167 # endif
     160#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     161    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     162#endif
    168163#ifdef TMPL_MEM_BY_REF
    169164    Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
     
    181176    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    182177}
    183 #endif /* IEM_WITH_SETJMP */
    184 
    185 
    186 #ifdef IEM_WITH_SETJMP
     178
    187179
    188180/**
     
    200192                                                     uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    201193{
    202 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    203     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    204 # endif
     194#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     195    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     196#endif
    205197    Log8(("IEM AT/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    206198    *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
     
    224216                                                     uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    225217{
    226 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    227     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    228 # endif
     218#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     219    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     220#endif
    229221    Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    230222    *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
     
    248240                                                     uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    249241{
    250 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    251     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    252 # endif
     242#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     243    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     244#endif
    253245    Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    254246    *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
     
    272264                                                     uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    273265{
    274 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    275     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    276 # endif
     266#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     267    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     268#endif
    277269    Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
    278270    *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
     
    280272                                             IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD);
    281273}
    282 
    283 #endif /* IEM_WITH_SETJMP */
    284274
    285275
     
    483473
    484474
    485 # ifdef IEM_WITH_SETJMP
    486 
    487475/**
    488476 * Safe/fallback stack store function that longjmps on error.
     
    491479                                                           TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    492480{
    493 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    494     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    495 #  endif
     481# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     482    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     483# endif
    496484
    497485    uint8_t        bUnmapInfo;
     
    505493
    506494
    507 #  ifdef TMPL_WITH_PUSH_SREG
     495# ifdef TMPL_WITH_PUSH_SREG
    508496/**
    509497 * Safe/fallback stack SREG store function that longjmps on error.
     
    512500                                                               TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    513501{
    514 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    515     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    516 # endif
     502#  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     503    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     504#  endif
    517505
    518506    /* bs3-cpu-weird-1 explores this instruction. AMD 3990X does it by the book,
     
    558546    }
    559547}
    560 #  endif /* TMPL_WITH_PUSH_SREG */
     548# endif /* TMPL_WITH_PUSH_SREG */
    561549
    562550
     
    640628}
    641629
    642 #  ifdef TMPL_WITH_PUSH_SREG
     630# ifdef TMPL_WITH_PUSH_SREG
    643631/**
    644632 * Safe/fallback stack push function that longjmps on error.
     
    646634void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    647635{
    648 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    649     pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
    650 # endif
     636#  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     637    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     638#  endif
    651639
    652640    /* Decrement the stack pointer (prep). */
     
    676664    pVCpu->cpum.GstCtx.rsp = uNewRsp;
    677665}
    678 #  endif /* TMPL_WITH_PUSH_SREG */
    679 
    680 # endif /* IEM_WITH_SETJMP */
     666# endif /* TMPL_WITH_PUSH_SREG */
    681667
    682668#endif /* TMPL_MEM_WITH_STACK */
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllMemRWTmplInline-x86.cpp.h

    r108262 r108278  
    7878
    7979
    80 #ifdef IEM_WITH_SETJMP
    81 
    82 
    8380/*********************************************************************************************************************************
    8481*   Fetches                                                                                                                      *
     
    9996{
    10097    AssertCompile(sizeof(TMPL_MEM_TYPE) == TMPL_MEM_TYPE_SIZE);
    101 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     98#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    10299    /*
    103100     * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
    104101     */
    105102    RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
    106 #  if TMPL_MEM_TYPE_SIZE > 1
     103# if TMPL_MEM_TYPE_SIZE > 1
    107104    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
    108 #  endif
     105# endif
    109106    {
    110107        /*
     
    128125                 * Fetch and return the data.
    129126                 */
    130 #  ifdef IEM_WITH_TLB_STATISTICS
    131                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    132 #  endif
    133                 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    134                 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    135 #  ifdef TMPL_MEM_BY_REF
     127# ifdef IEM_WITH_TLB_STATISTICS
     128                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     129# endif
     130                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     131                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     132# ifdef TMPL_MEM_BY_REF
    136133                *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
    137134                LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
    138135                                         iSegReg, GCPtrMem, GCPtrEff, pValue));
    139136                return;
    140 #  else
     137# else
    141138                TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
    142139                LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE "\n",
    143140                                         iSegReg, GCPtrMem, GCPtrEff, uRet));
    144141                return uRet;
    145 #  endif
     142# endif
    146143            }
    147144        }
     
    151148       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    152149    LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
    153 # endif
    154 # ifdef TMPL_MEM_BY_REF
     150#endif
     151#ifdef TMPL_MEM_BY_REF
    155152    RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, iSegReg, GCPtrMem);
    156 # else
     153#else
    157154    return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem);
    158 # endif
     155#endif
    159156}
    160157
     
    163160 * Inlined flat addressing fetch function that longjumps on error.
    164161 */
    165 # ifdef TMPL_MEM_BY_REF
     162#ifdef TMPL_MEM_BY_REF
    166163DECL_INLINE_THROW(void)
    167164RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pValue, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    168 # else
     165#else
    169166DECL_INLINE_THROW(TMPL_MEM_TYPE)
    170167RT_CONCAT3(iemMemFlatFetchData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
     168#endif
     169{
     170    AssertMsg(   (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
     171              || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
     172              || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
     173#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     174    /*
     175     * Check that it doesn't cross a page boundrary.
     176     */
     177# if TMPL_MEM_TYPE_SIZE > 1
     178    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
     179# endif
     180    {
     181        /*
     182         * TLB lookup.
     183         */
     184        uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
     185        PCIEMTLBENTRY  pTlbe     = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
     186        if (RT_LIKELY(   pTlbe->uTag               == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
     187                      || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
     188        {
     189            /*
     190             * Check TLB page table level access flags.
     191             */
     192            AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
     193            uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
     194            if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
     195                                                         | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3  | fNoUser))
     196                          == pVCpu->iem.s.DataTlb.uTlbPhysRev))
     197            {
     198                /*
     199                 * Fetch and return the dword
     200                 */
     201# ifdef IEM_WITH_TLB_STATISTICS
     202                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     203# endif
     204                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     205                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     206# ifdef TMPL_MEM_BY_REF
     207                *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
     208                LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
     209                                         GCPtrMem, pValue));
     210                return;
     211# else
     212                TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
     213                LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
     214                return uRet;
     215# endif
     216            }
     217        }
     218    }
     219
     220    /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
     221       outdated page pointer, or other troubles.  (This will do a TLB load.) */
     222    LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
     223#endif
     224#ifdef TMPL_MEM_BY_REF
     225    RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
     226#else
     227    return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
     228#endif
     229}
     230
     231
     232/*********************************************************************************************************************************
     233*   Stores                                                                                                                       *
     234*********************************************************************************************************************************/
     235#ifndef TMPL_MEM_NO_STORE
     236
     237/**
     238 * Inlined store function that longjumps on error.
     239 *
     240 * @note The @a iSegRef is not allowed to be UINT8_MAX!
     241 */
     242DECL_INLINE_THROW(void)
     243RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
     244# ifdef TMPL_MEM_BY_REF
     245                                                 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
     246# else
     247                                                 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
     248# endif
     249{
     250# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     251    /*
     252     * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
     253     */
     254    RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
     255# if TMPL_MEM_TYPE_SIZE > 1
     256    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
     257# endif
     258    {
     259        /*
     260         * TLB lookup.
     261         */
     262        uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
     263        PCIEMTLBENTRY  pTlbe     = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
     264        if (RT_LIKELY(   pTlbe->uTag               == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
     265                      || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
     266        {
     267            /*
     268             * Check TLB page table level access flags.
     269             */
     270            AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
     271            uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
     272            if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
     273                                                         | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY   | IEMTLBE_F_PT_NO_WRITE
     274                                                         | IEMTLBE_F_NO_MAPPINGR3   | fNoUser))
     275                          == pVCpu->iem.s.DataTlb.uTlbPhysRev))
     276            {
     277                /*
     278                 * Store the value and return.
     279                 */
     280#  ifdef IEM_WITH_TLB_STATISTICS
     281                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     282#  endif
     283                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     284                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     285#  ifdef TMPL_MEM_BY_REF
     286                *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
     287                Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
     288                                          iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     289#  else
     290                *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
     291                Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
     292                                          iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
     293#  endif
     294                return;
     295            }
     296        }
     297    }
     298
     299    /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
     300       outdated page pointer, or other troubles.  (This will do a TLB load.) */
     301    Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
     302# endif
     303# ifdef TMPL_MEM_BY_REF
     304    RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
     305# else
     306    RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
     307# endif
     308}
     309
     310
     311/**
     312 * Inlined flat addressing store function that longjumps on error.
     313 */
     314DECL_INLINE_THROW(void)
     315RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
     316# ifdef TMPL_MEM_BY_REF
     317                                                    TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
     318# else
     319                                                    TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    171320# endif
    172321{
     
    195344            AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
    196345            uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
    197             if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
    198                                                          | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3  | fNoUser))
    199                           == pVCpu->iem.s.DataTlb.uTlbPhysRev))
    200             {
    201                 /*
    202                  * Fetch and return the dword
    203                  */
    204 #  ifdef IEM_WITH_TLB_STATISTICS
    205                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    206 #  endif
    207                 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    208                 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    209 #  ifdef TMPL_MEM_BY_REF
    210                 *pValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
    211                 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
    212                                          GCPtrMem, pValue));
    213                 return;
    214 #  else
    215                 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK];
    216                 LogEx(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uRet));
    217                 return uRet;
    218 #  endif
    219             }
    220         }
    221     }
    222 
    223     /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
    224        outdated page pointer, or other troubles.  (This will do a TLB load.) */
    225     LogEx(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    226 # endif
    227 # ifdef TMPL_MEM_BY_REF
    228     RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, pValue, UINT8_MAX, GCPtrMem);
    229 # else
    230     return RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem);
    231 # endif
    232 }
    233 
    234 
    235 /*********************************************************************************************************************************
    236 *   Stores                                                                                                                       *
    237 *********************************************************************************************************************************/
    238 # ifndef TMPL_MEM_NO_STORE
    239 
    240 /**
    241  * Inlined store function that longjumps on error.
    242  *
    243  * @note The @a iSegRef is not allowed to be UINT8_MAX!
    244  */
    245 DECL_INLINE_THROW(void)
    246 RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
    247 #  ifdef TMPL_MEM_BY_REF
    248                                                  TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
    249 #  else
    250                                                  TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    251 #  endif
    252 {
    253 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    254     /*
    255      * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
    256      */
    257     RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
    258 #  if TMPL_MEM_TYPE_SIZE > 1
    259     if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
    260 #  endif
    261     {
    262         /*
    263          * TLB lookup.
    264          */
    265         uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrEff);
    266         PCIEMTLBENTRY  pTlbe     = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
    267         if (RT_LIKELY(   pTlbe->uTag               == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
    268                       || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
    269         {
    270             /*
    271              * Check TLB page table level access flags.
    272              */
    273             AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
    274             uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
    275346            if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
    276347                                                         | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY   | IEMTLBE_F_PT_NO_WRITE
     
    281352                 * Store the value and return.
    282353                 */
    283 #   ifdef IEM_WITH_TLB_STATISTICS
    284                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    285 #   endif
    286                 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    287                 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    288 #   ifdef TMPL_MEM_BY_REF
    289                 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = *pValue;
    290                 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs (%04x:%RX64)\n",
    291                                           iSegReg, GCPtrMem, GCPtrEff, pValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    292 #   else
    293                 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
    294                 Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv=%RGv: " TMPL_MEM_FMT_TYPE " (%04x:%RX64)\n",
    295                                           iSegReg, GCPtrMem, GCPtrEff, uValue, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    296 #   endif
    297                 return;
    298             }
    299         }
    300     }
    301 
    302     /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
    303        outdated page pointer, or other troubles.  (This will do a TLB load.) */
    304     Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
    305 #  endif
     354#  ifdef IEM_WITH_TLB_STATISTICS
     355                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     356#  endif
     357                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     358                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    306359#  ifdef TMPL_MEM_BY_REF
    307     RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, pValue);
    308 #  else
    309     RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iSegReg, GCPtrMem, uValue);
    310 #  endif
    311 }
    312 
    313 
    314 /**
    315  * Inlined flat addressing store function that longjumps on error.
    316  */
    317 DECL_INLINE_THROW(void)
    318 RT_CONCAT3(iemMemFlatStoreData,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
    319 #  ifdef TMPL_MEM_BY_REF
    320                                                     TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
    321 #  else
    322                                                     TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    323 #  endif
    324 {
    325     AssertMsg(   (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT
    326               || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT
    327               || (pVCpu->iem.s.fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT, ("%#x\n", pVCpu->iem.s.fExec));
    328 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    329     /*
    330      * Check that it doesn't cross a page boundrary.
    331      */
    332 #  if TMPL_MEM_TYPE_SIZE > 1
    333     if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
    334 #  endif
    335     {
    336         /*
    337          * TLB lookup.
    338          */
    339         uint64_t const uTagNoRev = IEMTLB_CALC_TAG_NO_REV(GCPtrMem);
    340         PCIEMTLBENTRY  pTlbe     = IEMTLB_TAG_TO_EVEN_ENTRY(&pVCpu->iem.s.DataTlb, uTagNoRev);
    341         if (RT_LIKELY(   pTlbe->uTag               == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)
    342                       || (pTlbe = pTlbe + 1)->uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)))
    343         {
    344             /*
    345              * Check TLB page table level access flags.
    346              */
    347             AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
    348             uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
    349             if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE
    350                                                          | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY   | IEMTLBE_F_PT_NO_WRITE
    351                                                          | IEMTLBE_F_NO_MAPPINGR3   | fNoUser))
    352                           == pVCpu->iem.s.DataTlb.uTlbPhysRev))
    353             {
    354                 /*
    355                  * Store the value and return.
    356                  */
    357 #   ifdef IEM_WITH_TLB_STATISTICS
    358                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    359 #   endif
    360                 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    361                 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    362 #   ifdef TMPL_MEM_BY_REF
    363360                *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = *pValue;
    364361                Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: %." RT_XSTR(TMPL_MEM_TYPE_SIZE) "Rhxs\n",
    365362                                          GCPtrMem, pValue));
    366  else
     363else
    367364                *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
    368365                Log5Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " %RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
    369  endif
     366endif
    370367                return;
    371368            }
     
    376373       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    377374    Log6Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    378 #  endif
    379 #  ifdef TMPL_MEM_BY_REF
     375# endif
     376# ifdef TMPL_MEM_BY_REF
    380377    RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, pValue);
    381 #  else
     378# else
    382379    RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, UINT8_MAX, GCPtrMem, uValue);
    383 #  endif
    384 }
    385 
    386 # endif /* !TMPL_MEM_NO_STORE */
     380# endif
     381}
     382
     383#endif /* !TMPL_MEM_NO_STORE */
    387384
    388385
     
    390387*   Mapping / Direct Memory Access                                                                                               *
    391388*********************************************************************************************************************************/
    392 # ifndef TMPL_MEM_NO_MAPPING
     389#ifndef TMPL_MEM_NO_MAPPING
    393390
    394391/**
     
    401398                                                 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    402399{
    403 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     400# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    404401    /*
    405402     * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
     
    432429                 * Return the address.
    433430                 */
    434  ifdef IEM_WITH_TLB_STATISTICS
    435                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    436  endif
     431ifdef IEM_WITH_TLB_STATISTICS
     432                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     433endif
    437434                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    438435                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    448445       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    449446    Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
    450 #  endif
     447# endif
    451448    return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
    452449}
     
    462459                                                     RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    463460{
    464 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     461# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    465462    /*
    466463     * Check that the address doesn't cross a page boundrary.
     
    492489                 * Return the address.
    493490                 */
    494  ifdef IEM_WITH_TLB_STATISTICS
    495                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    496  endif
     491ifdef IEM_WITH_TLB_STATISTICS
     492                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     493endif
    497494                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    498495                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    508505       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    509506    Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    510 #  endif
     507# endif
    511508    return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
    512509}
    513510
    514 #  ifdef TMPL_MEM_WITH_ATOMIC_MAPPING
     511# ifdef TMPL_MEM_WITH_ATOMIC_MAPPING
    515512
    516513/**
     
    528525     */
    529526    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem);
    530 if TMPL_MEM_TYPE_SIZE > 1
     527 if TMPL_MEM_TYPE_SIZE > 1
    531528    if (RT_LIKELY(!(GCPtrEff & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
    532 endif
     529 endif
    533530    {
    534531        /*
     
    588585     * Check that the address doesn't cross a page boundrary.
    589586     */
    590 if TMPL_MEM_TYPE_SIZE > 1
     587 if TMPL_MEM_TYPE_SIZE > 1
    591588    if (RT_LIKELY(!(GCPtrMem & TMPL_MEM_TYPE_ALIGN))) /* strictly aligned, otherwise do fall back which knows th details. */
    592 endif
     589 endif
    593590    {
    594591        /*
     
    634631}
    635632
    636 #  endif /* TMPL_MEM_WITH_ATOMIC_MAPPING */
     633# endif /* TMPL_MEM_WITH_ATOMIC_MAPPING */
    637634
    638635/**
     
    643640                                                 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    644641{
    645 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     642# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    646643    /*
    647644     * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
     
    674671                 * Return the address.
    675672                 */
    676  ifdef IEM_WITH_TLB_STATISTICS
    677                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    678  endif
     673ifdef IEM_WITH_TLB_STATISTICS
     674                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     675endif
    679676                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    680677                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    690687       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    691688    Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
    692 #  endif
     689# endif
    693690    return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
    694691}
     
    702699                                                     RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    703700{
    704 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     701# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    705702    /*
    706703     * Check that the address doesn't cross a page boundrary.
     
    732729                 * Return the address.
    733730                 */
    734  ifdef IEM_WITH_TLB_STATISTICS
    735                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    736  endif
     731ifdef IEM_WITH_TLB_STATISTICS
     732                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     733endif
    737734                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    738735                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    748745       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    749746    Log8Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    750 #  endif
     747# endif
    751748    return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
    752749}
     
    760757                                                 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    761758{
    762 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     759# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    763760    /*
    764761     * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
     
    767764#  if TMPL_MEM_TYPE_SIZE > 1
    768765    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
    769 #endif
     766#  endif
    770767    {
    771768        /*
     
    790787                 * Return the address.
    791788                 */
    792  ifdef IEM_WITH_TLB_STATISTICS
    793                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    794  endif
     789ifdef IEM_WITH_TLB_STATISTICS
     790                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     791endif
    795792                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    796793                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    806803       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    807804    Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %u:%RGv falling back\n", LOG_FN_NAME, iSegReg, GCPtrMem));
    808 #  endif
     805# endif
    809806    return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem);
    810807}
     
    818815                                                     RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    819816{
    820 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     817# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    821818    /*
    822819     * Check that the address doesn't cross a page boundrary.
     
    863860       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    864861    Log4Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    865 #  endif
     862# endif
    866863    return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem);
    867864}
    868865
    869 # endif /* !TMPL_MEM_NO_MAPPING */
     866#endif /* !TMPL_MEM_NO_MAPPING */
    870867
    871868
     
    873870*   Stack Access                                                                                                                 *
    874871*********************************************************************************************************************************/
    875 # ifdef TMPL_MEM_WITH_STACK
    876 #  if TMPL_MEM_TYPE_SIZE > 8
    877 #   error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
    878 #  endif
    879 #  if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
    880 #   error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
    881 #  endif
    882 #  ifdef IEM_WITH_SETJMP
     872#ifdef TMPL_MEM_WITH_STACK
     873# if TMPL_MEM_TYPE_SIZE > 8
     874#  error "Stack not supported for this type size - please #undef TMPL_MEM_WITH_STACK"
     875# endif
     876# if TMPL_MEM_TYPE_SIZE > 1 && TMPL_MEM_TYPE_ALIGN + 1 < TMPL_MEM_TYPE_SIZE
     877#  error "Stack not supported for this alignment size - please #undef TMPL_MEM_WITH_STACK"
     878# endif
    883879
    884880/**
     
    888884RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    889885{
    890 #   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     886# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    891887    /*
    892888     * Apply segmentation and check that the item doesn't cross a page boundrary.
    893889     */
    894890    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
    895   if TMPL_MEM_TYPE_SIZE > 1
     891if TMPL_MEM_TYPE_SIZE > 1
    896892    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
    897   endif
     893endif
    898894    {
    899895        /*
     
    919915                 * Do the store and return.
    920916                 */
    921  ifdef IEM_WITH_TLB_STATISTICS
    922                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    923  endif
     917ifdef IEM_WITH_TLB_STATISTICS
     918                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     919endif
    924920                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    925921                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    934930       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    935931    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
    936 #   endif
     932# endif
    937933    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
    938934}
    939935
    940936
    941 #   ifdef TMPL_WITH_PUSH_SREG
     937# ifdef TMPL_WITH_PUSH_SREG
    942938/**
    943939 * Stack segment store function that longjmps on error.
     
    950946                                                      TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    951947{
    952   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     948if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    953949    /*
    954950     * Apply segmentation to the address and check that the item doesn't cross
     
    956952     */
    957953    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
    958 #     if TMPL_MEM_TYPE_SIZE > 1
     954#   if TMPL_MEM_TYPE_SIZE > 1
    959955    if (RT_LIKELY(   !(GCPtrEff & (sizeof(uint16_t) - 1U))
    960956                  || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
    961 #     endif
     957#   endif
    962958    {
    963959        /*
     
    983979                 * Do the push and return.
    984980                 */
    985 #      ifdef IEM_WITH_TLB_STATISTICS
    986                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    987 #      endif
     981#   ifdef IEM_WITH_TLB_STATISTICS
     982                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     983#   endif
    988984                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    989985                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    998994       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    999995    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
    1000    endif
     996endif
    1001997    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
    1002998}
    1003 #   endif /* TMPL_WITH_PUSH_SREG */
     999# endif /* TMPL_WITH_PUSH_SREG */
    10041000
    10051001
     
    10171013                && pVCpu->cpum.GstCtx.ss.u64Base == 0));
    10181014
    1019 #   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1015# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    10201016    /*
    10211017     * Check that the item doesn't cross a page boundrary.
    10221018     */
    1023   if TMPL_MEM_TYPE_SIZE > 1
     1019if TMPL_MEM_TYPE_SIZE > 1
    10241020    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
    1025   endif
     1021endif
    10261022    {
    10271023        /*
     
    10471043                 * Do the push and return.
    10481044                 */
    1049   ifdef IEM_WITH_TLB_STATISTICS
    1050                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1051   endif
     1045ifdef IEM_WITH_TLB_STATISTICS
     1046                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1047endif
    10521048                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    10531049                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    10631059       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    10641060    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    1065 #   endif
     1061# endif
    10661062    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
    10671063}
    10681064
    1069 #   ifdef TMPL_WITH_PUSH_SREG
     1065# ifdef TMPL_WITH_PUSH_SREG
    10701066/**
    10711067 * Flat stack segment store function that longjmps on error.
     
    10781074                                                          TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    10791075{
    1080   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1076if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    10811077    /*
    10821078     * Check that the item doesn't cross a page boundrary.
     
    11071103                 * Do the push and return.
    11081104                 */
    1109 #     ifdef IEM_WITH_TLB_STATISTICS
    1110                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1111 #     endif
     1105#   ifdef IEM_WITH_TLB_STATISTICS
     1106                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1107#   endif
    11121108                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    11131109                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    11231119       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    11241120    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    1125   endif
     1121endif
    11261122    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
    11271123}
    1128 #   endif /* TMPL_WITH_PUSH_SREG */
     1124# endif /* TMPL_WITH_PUSH_SREG */
    11291125
    11301126
     
    11351131RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    11361132{
    1137 #   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1133# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    11381134    /*
    11391135     * Apply segmentation to the address and check that the item doesn't cross
     
    11411137     */
    11421138    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
    1143   if TMPL_MEM_TYPE_SIZE > 1
     1139if TMPL_MEM_TYPE_SIZE > 1
    11441140    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
    1145   endif
     1141endif
    11461142    {
    11471143        /*
     
    11661162                 * Do the pop.
    11671163                 */
    1168  ifdef IEM_WITH_TLB_STATISTICS
    1169                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1170  endif
     1164ifdef IEM_WITH_TLB_STATISTICS
     1165                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1166endif
    11711167                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    11721168                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    11811177       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    11821178    Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
    1183 #   endif
     1179# endif
    11841180    return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
    11851181}
     
    11921188RT_CONCAT3(iemMemFlatFetchStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
    11931189{
    1194 #   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1190# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    11951191    /*
    11961192     * Check that the item doesn't cross a page boundrary.
    11971193     */
    1198   if TMPL_MEM_TYPE_SIZE > 1
     1194if TMPL_MEM_TYPE_SIZE > 1
    11991195    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
    1200   endif
     1196endif
    12011197    {
    12021198        /*
     
    12211217                 * Do the pop.
    12221218                 */
    1223   ifdef IEM_WITH_TLB_STATISTICS
    1224                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1225   endif
     1219ifdef IEM_WITH_TLB_STATISTICS
     1220                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1221endif
    12261222                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    12271223                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    12361232       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    12371233    Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
    1238 #   endif
     1234# endif
    12391235    return RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem);
    12401236}
     
    12471243RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    12481244{
    1249 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1245# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    12501246    /*
    12511247     * Decrement the stack pointer (prep), apply segmentation and check that
     
    12811277                 * Do the push and return.
    12821278                 */
    1283  ifdef IEM_WITH_TLB_STATISTICS
    1284                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1285  endif
     1279ifdef IEM_WITH_TLB_STATISTICS
     1280                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1281endif
    12861282                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    12871283                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    12981294       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    12991295    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
    1300 #  endif
     1296# endif
    13011297    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
    13021298}
     
    13111307    Assert(iGReg < 16);
    13121308
    1313 #  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1309# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    13141310    /*
    13151311     * Increment the stack pointer (prep), apply segmentation and check that
     
    13681364       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    13691365    Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
    1370 #  endif
     1366# endif
    13711367    RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
    13721368}
    13731369
    1374 #   ifdef TMPL_WITH_PUSH_SREG
     1370# ifdef TMPL_WITH_PUSH_SREG
    13751371/**
    13761372 * Stack segment push function that longjmps on error.
     
    13821378RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    13831379{
    1384   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1380if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    13851381    /* See fallback for details on this weirdness: */
    13861382    bool const    fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
     
    13941390    RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
    13951391    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, cbAccess, GCPtrTop);
    1396 #     if TMPL_MEM_TYPE_SIZE > 1
     1392#   if TMPL_MEM_TYPE_SIZE > 1
    13971393    if (RT_LIKELY(   !(GCPtrEff & (cbAccess - 1U))
    13981394                  || (   cbAccess == sizeof(uint16_t)
    13991395                      ? TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t)
    14001396                      : TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) ) ))
    1401 #    endif
     1397#   endif
    14021398    {
    14031399        /*
     
    14231419                 * Do the push and return.
    14241420                 */
    1425 #    ifdef IEM_WITH_TLB_STATISTICS
    1426                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1427 #    endif
     1421#   ifdef IEM_WITH_TLB_STATISTICS
     1422                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1423#   endif
    14281424                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    14291425                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    14581454       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    14591455    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
    1460   endif
     1456endif
    14611457    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
    14621458}
    1463 #   endif /* TMPL_WITH_PUSH_SREG */
    1464 
    1465 #   if TMPL_MEM_TYPE_SIZE != 8
     1459# endif /* TMPL_WITH_PUSH_SREG */
     1460
     1461# if TMPL_MEM_TYPE_SIZE != 8
    14661462
    14671463/**
     
    14751471           && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
    14761472           && pVCpu->cpum.GstCtx.ss.u64Base == 0);
    1477   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1473if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    14781474    /*
    14791475     * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
    14801476     */
    14811477    uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE);
    1482 #     if TMPL_MEM_TYPE_SIZE > 1
     1478#   if TMPL_MEM_TYPE_SIZE > 1
    14831479    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewEsp)))
    1484 #     endif
     1480#   endif
    14851481    {
    14861482        /*
     
    15061502                 * Do the push and return.
    15071503                 */
    1508 #     ifdef IEM_WITH_TLB_STATISTICS
    1509                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1510 #     endif
     1504#   ifdef IEM_WITH_TLB_STATISTICS
     1505                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1506#   endif
    15111507                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    15121508                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    15231519       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    15241520    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
    1525   endif
     1521endif
    15261522    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
    15271523}
     
    15351531{
    15361532    Assert(iGReg < 16);
    1537   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1533if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    15381534    /*
    15391535     * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
    15401536     */
    15411537    uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp;
    1542 #     if TMPL_MEM_TYPE_SIZE > 1
     1538#   if TMPL_MEM_TYPE_SIZE > 1
    15431539    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldEsp)))
    1544 #     endif
     1540#   endif
    15451541    {
    15461542        /*
     
    15651561                 * Do the pop and update the register values.
    15661562                 */
    1567 #     ifdef IEM_WITH_TLB_STATISTICS
    1568                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1569 #     endif
     1563#   ifdef IEM_WITH_TLB_STATISTICS
     1564                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1565#   endif
    15701566                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    15711567                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    15721568                TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK];
    15731569                pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); /* must be first for 16-bit */
    1574 #     if TMPL_MEM_TYPE_SIZE == 2
     1570#   if TMPL_MEM_TYPE_SIZE == 2
    15751571                pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
    1576 #     elif TMPL_MEM_TYPE_SIZE == 4
     1572#   elif TMPL_MEM_TYPE_SIZE == 4
    15771573                pVCpu->cpum.GstCtx.aGRegs[iGReg].u   = uValue;
    1578 #     else
    1579 #      error "TMPL_MEM_TYPE_SIZE"
    1580 #     endif
     1574#   else
     1575#    error "TMPL_MEM_TYPE_SIZE"
     1576#   endif
    15811577                Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE " (r%u)\n",
    15821578                                          uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
     
    15891585       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    15901586    Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uOldEsp));
    1591   endif
     1587endif
    15921588    RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
    15931589}
    15941590
    1595 #   endif /* TMPL_MEM_TYPE_SIZE != 8*/
    1596 
    1597 #   ifdef TMPL_WITH_PUSH_SREG
     1591# endif /* TMPL_MEM_TYPE_SIZE != 8*/
     1592
     1593# ifdef TMPL_WITH_PUSH_SREG
    15981594/**
    15991595 * 32-bit flat stack segment push function that longjmps on error.
     
    16051601RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    16061602{
    1607   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1603if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    16081604    /* See fallback for details on this weirdness: */
    16091605    bool const    fIsIntel = IEM_IS_GUEST_CPU_INTEL(pVCpu);
     
    16411637                 * Do the push and return.
    16421638                 */
    1643 #     ifdef IEM_WITH_TLB_STATISTICS
    1644                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1645 #     endif
     1639#   ifdef IEM_WITH_TLB_STATISTICS
     1640                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1641#   endif
    16461642                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    16471643                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    16761672       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    16771673    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX32 falling back\n", LOG_FN_NAME, uNewEsp));
    1678   endif
     1674endif
    16791675    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
    16801676}
    1681 #   endif /* TMPL_WITH_PUSH_SREG */
    1682 
    1683 #   if TMPL_MEM_TYPE_SIZE != 4
     1677# endif /* TMPL_WITH_PUSH_SREG */
     1678
     1679# if TMPL_MEM_TYPE_SIZE != 4
    16841680
    16851681/**
     
    16891685RT_CONCAT3(iemMemFlat64StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
    16901686{
    1691   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1687if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    16921688    /*
    16931689     * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
    16941690     */
    16951691    uint64_t const uNewRsp = pVCpu->cpum.GstCtx.rsp - sizeof(TMPL_MEM_TYPE);
    1696 #     if TMPL_MEM_TYPE_SIZE > 1
     1692#   if TMPL_MEM_TYPE_SIZE > 1
    16971693    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uNewRsp)))
    1698 #     endif
     1694#   endif
    16991695    {
    17001696        /*
     
    17201716                 * Do the push and return.
    17211717                 */
    1722 #     ifdef IEM_WITH_TLB_STATISTICS
    1723                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1724 #     endif
     1718#   ifdef IEM_WITH_TLB_STATISTICS
     1719                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1720#   endif
    17251721                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    17261722                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     
    17371733       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    17381734    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uNewRsp));
    1739   endif
     1735endif
    17401736    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue);
    17411737}
     
    17491745{
    17501746    Assert(iGReg < 16);
    1751   if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     1747if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
    17521748    /*
    17531749     * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
    17541750     */
    17551751    uint64_t const uOldRsp = pVCpu->cpum.GstCtx.rsp;
    1756 #     if TMPL_MEM_TYPE_SIZE > 1
     1752#   if TMPL_MEM_TYPE_SIZE > 1
    17571753    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(uOldRsp)))
    1758 #     endif
     1754#   endif
    17591755    {
    17601756        /*
     
    17791775                 * Do the push and return.
    17801776                 */
    1781 #     ifdef IEM_WITH_TLB_STATISTICS
    1782                 pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
    1783 #     endif
     1777#   ifdef IEM_WITH_TLB_STATISTICS
     1778                pVCpu->iem.s.DataTlb.cTlbInlineCodeHits++;
     1779#   endif
    17841780                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
    17851781                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
    17861782                TMPL_MEM_TYPE const uValue = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldRsp & GUEST_PAGE_OFFSET_MASK];
    17871783                pVCpu->cpum.GstCtx.rsp = uOldRsp + sizeof(TMPL_MEM_TYPE);  /* must be first for 16-bit */
    1788 #     if TMPL_MEM_TYPE_SIZE == 2
     1784#   if TMPL_MEM_TYPE_SIZE == 2
    17891785                pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
    1790 #     elif TMPL_MEM_TYPE_SIZE == 8
     1786#   elif TMPL_MEM_TYPE_SIZE == 8
    17911787                pVCpu->cpum.GstCtx.aGRegs[iGReg].u   = uValue;
    1792 #     else
    1793 #      error "TMPL_MEM_TYPE_SIZE"
    1794 #     endif
     1788#   else
     1789#    error "TMPL_MEM_TYPE_SIZE"
     1790#   endif
    17951791                Log9Ex(LOG_GROUP_IEM_MEM,("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX64 (->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
    17961792                                          uOldRsp, uOldRsp + sizeof(TMPL_MEM_TYPE), uValue, iGReg));
     
    18031799       outdated page pointer, or other troubles.  (This will do a TLB load.) */
    18041800    Log10Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RX64 falling back\n", LOG_FN_NAME, uOldRsp));
    1805  endif
     1801endif
    18061802    RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, iGReg);
    18071803}
    18081804
    1809 #   endif /* TMPL_MEM_TYPE_SIZE != 4 */
    1810 
    1811 #  endif /* IEM_WITH_SETJMP */
    1812 # endif /* TMPL_MEM_WITH_STACK */
    1813 
    1814 
    1815 #endif /* IEM_WITH_SETJMP */
     1805# endif /* TMPL_MEM_TYPE_SIZE != 4 */
     1806
     1807#endif /* TMPL_MEM_WITH_STACK */
     1808
    18161809
    18171810#undef TMPL_MEM_TYPE
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veRecompFuncs-x86.h

    r108269 r108278  
    7777#ifndef IEM_WITH_DATA_TLB
    7878# error The data TLB must be enabled for the recompiler.
    79 #endif
    80 
    81 #ifndef IEM_WITH_SETJMP
    82 # error The setjmp approach must be enabled for the recompiler.
    8379#endif
    8480
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpHlp-x86.cpp

    r108260 r108278  
    5959 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
    6060 *
    61  * @return  Strict VBox status code.
    62  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    63  * @param   bRm                 The ModRM byte.
    64  * @param   cbImmAndRspOffset   - First byte: The size of any immediate
    65  *                                following the effective address opcode bytes
    66  *                                (only for RIP relative addressing).
    67  *                              - Second byte: RSP displacement (for POP [ESP]).
    68  * @param   pGCPtrEff           Where to return the effective address.
    69  */
    70 VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
    71 {
    72     Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
    73 # define SET_SS_DEF() \
    74     do \
    75     { \
    76         if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
    77             pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
    78     } while (0)
    79 
    80     if (!IEM_IS_64BIT_CODE(pVCpu))
    81     {
    82 /** @todo Check the effective address size crap! */
    83         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
    84         {
    85             uint16_t u16EffAddr;
    86 
    87             /* Handle the disp16 form with no registers first. */
    88             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
    89                 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
    90             else
    91             {
    92                 /* Get the displacment. */
    93                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    94                 {
    95                     case 0:  u16EffAddr = 0;                             break;
    96                     case 1:  IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
    97                     case 2:  IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);       break;
    98                     default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
    99                 }
    100 
    101                 /* Add the base and index registers to the disp. */
    102                 switch (bRm & X86_MODRM_RM_MASK)
    103                 {
    104                     case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
    105                     case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
    106                     case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
    107                     case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
    108                     case 4: u16EffAddr += pVCpu->cpum.GstCtx.si;            break;
    109                     case 5: u16EffAddr += pVCpu->cpum.GstCtx.di;            break;
    110                     case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp;            SET_SS_DEF(); break;
    111                     case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx;            break;
    112                 }
    113             }
    114 
    115             *pGCPtrEff = u16EffAddr;
    116         }
    117         else
    118         {
    119             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    120             uint32_t u32EffAddr;
    121 
    122             /* Handle the disp32 form with no registers first. */
    123             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    124                 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
    125             else
    126             {
    127                 /* Get the register (or SIB) value. */
    128                 switch ((bRm & X86_MODRM_RM_MASK))
    129                 {
    130                     case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    131                     case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    132                     case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    133                     case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    134                     case 4: /* SIB */
    135                     {
    136                         uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    137 
    138                         /* Get the index and scale it. */
    139                         switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
    140                         {
    141                             case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    142                             case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    143                             case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    144                             case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    145                             case 4: u32EffAddr = 0; /*none */ break;
    146                             case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
    147                             case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    148                             case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    149                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    150                         }
    151                         u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    152 
    153                         /* add base */
    154                         switch (bSib & X86_SIB_BASE_MASK)
    155                         {
    156                             case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
    157                             case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
    158                             case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
    159                             case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
    160                             case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
    161                             case 5:
    162                                 if ((bRm & X86_MODRM_MOD_MASK) != 0)
    163                                 {
    164                                     u32EffAddr += pVCpu->cpum.GstCtx.ebp;
    165                                     SET_SS_DEF();
    166                                 }
    167                                 else
    168                                 {
    169                                     uint32_t u32Disp;
    170                                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    171                                     u32EffAddr += u32Disp;
    172                                 }
    173                                 break;
    174                             case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
    175                             case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
    176                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    177                         }
    178                         break;
    179                     }
    180                     case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
    181                     case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    182                     case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    183                     IEM_NOT_REACHED_DEFAULT_CASE_RET();
    184                 }
    185 
    186                 /* Get and add the displacement. */
    187                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    188                 {
    189                     case 0:
    190                         break;
    191                     case 1:
    192                     {
    193                         int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    194                         u32EffAddr += i8Disp;
    195                         break;
    196                     }
    197                     case 2:
    198                     {
    199                         uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    200                         u32EffAddr += u32Disp;
    201                         break;
    202                     }
    203                     default:
    204                         AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
    205                 }
    206 
    207             }
    208             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    209             *pGCPtrEff = u32EffAddr;
    210         }
    211     }
    212     else
    213     {
    214         uint64_t u64EffAddr;
    215 
    216         /* Handle the rip+disp32 form with no registers first. */
    217         if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    218         {
    219             IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
    220             u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
    221         }
    222         else
    223         {
    224             /* Get the register (or SIB) value. */
    225             switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
    226             {
    227                 case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    228                 case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    229                 case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    230                 case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    231                 case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
    232                 case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    233                 case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    234                 case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    235                 case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    236                 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    237                 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    238                 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    239                 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    240                 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    241                 /* SIB */
    242                 case 4:
    243                 case 12:
    244                 {
    245                     uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    246 
    247                     /* Get the index and scale it. */
    248                     switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
    249                     {
    250                         case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    251                         case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    252                         case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    253                         case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    254                         case  4: u64EffAddr = 0; /*none */ break;
    255                         case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
    256                         case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    257                         case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    258                         case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    259                         case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    260                         case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    261                         case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    262                         case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
    263                         case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    264                         case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    265                         case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    266                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    267                     }
    268                     u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    269 
    270                     /* add base */
    271                     switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
    272                     {
    273                         case  0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
    274                         case  1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
    275                         case  2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
    276                         case  3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
    277                         case  4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
    278                         case  6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
    279                         case  7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
    280                         case  8: u64EffAddr += pVCpu->cpum.GstCtx.r8;  break;
    281                         case  9: u64EffAddr += pVCpu->cpum.GstCtx.r9;  break;
    282                         case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
    283                         case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
    284                         case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
    285                         case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
    286                         case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
    287                         /* complicated encodings */
    288                         case 5:
    289                         case 13:
    290                             if ((bRm & X86_MODRM_MOD_MASK) != 0)
    291                             {
    292                                 if (!pVCpu->iem.s.uRexB)
    293                                 {
    294                                     u64EffAddr += pVCpu->cpum.GstCtx.rbp;
    295                                     SET_SS_DEF();
    296                                 }
    297                                 else
    298                                     u64EffAddr += pVCpu->cpum.GstCtx.r13;
    299                             }
    300                             else
    301                             {
    302                                 uint32_t u32Disp;
    303                                 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    304                                 u64EffAddr += (int32_t)u32Disp;
    305                             }
    306                             break;
    307                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    308                     }
    309                     break;
    310                 }
    311                 IEM_NOT_REACHED_DEFAULT_CASE_RET();
    312             }
    313 
    314             /* Get and add the displacement. */
    315             switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    316             {
    317                 case 0:
    318                     break;
    319                 case 1:
    320                 {
    321                     int8_t i8Disp;
    322                     IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    323                     u64EffAddr += i8Disp;
    324                     break;
    325                 }
    326                 case 2:
    327                 {
    328                     uint32_t u32Disp;
    329                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    330                     u64EffAddr += (int32_t)u32Disp;
    331                     break;
    332                 }
    333                 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
    334             }
    335 
    336         }
    337 
    338         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
    339             *pGCPtrEff = u64EffAddr;
    340         else
    341         {
    342             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    343             *pGCPtrEff = u64EffAddr & UINT32_MAX;
    344         }
    345     }
    346 
    347     Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
    348     return VINF_SUCCESS;
    349 }
    350 
    351 
    352 #ifdef IEM_WITH_SETJMP
    353 /**
    354  * Calculates the effective address of a ModR/M memory operand.
    355  *
    356  * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
    357  *
    35861 * May longjmp on internal error.
    35962 *
     
    36972{
    37073    Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
    371 # define SET_SS_DEF() \
     74#define SET_SS_DEF() \
    37275    do \
    37376    { \
     
    642345    return u64EffAddr & UINT32_MAX;
    643346}
    644 #endif /* IEM_WITH_SETJMP */
    645 
    646 
    647 /**
    648  * Calculates the effective address of a ModR/M memory operand, extended version
    649  * for use in the recompilers.
    650  *
    651  * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
    652  *
    653  * @return  Strict VBox status code.
    654  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    655  * @param   bRm                 The ModRM byte.
    656  * @param   cbImmAndRspOffset   - First byte: The size of any immediate
    657  *                                following the effective address opcode bytes
    658  *                                (only for RIP relative addressing).
    659  *                              - Second byte: RSP displacement (for POP [ESP]).
    660  * @param   pGCPtrEff           Where to return the effective address.
    661  * @param   puInfo              Extra info: 32-bit displacement (bits 31:0) and
    662  *                              SIB byte (bits 39:32).
    663  */
    664 VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT
    665 {
    666     Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
    667 # define SET_SS_DEF() \
    668     do \
    669     { \
    670         if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
    671             pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
    672     } while (0)
    673 
    674     uint64_t uInfo;
    675     if (!IEM_IS_64BIT_CODE(pVCpu))
    676     {
    677 /** @todo Check the effective address size crap! */
    678         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
    679         {
    680             uint16_t u16EffAddr;
    681 
    682             /* Handle the disp16 form with no registers first. */
    683             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
    684             {
    685                 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
    686                 uInfo = u16EffAddr;
    687             }
    688             else
    689             {
    690                 /* Get the displacment. */
    691                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    692                 {
    693                     case 0:  u16EffAddr = 0;                             break;
    694                     case 1:  IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
    695                     case 2:  IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);       break;
    696                     default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
    697                 }
    698                 uInfo = u16EffAddr;
    699 
    700                 /* Add the base and index registers to the disp. */
    701                 switch (bRm & X86_MODRM_RM_MASK)
    702                 {
    703                     case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
    704                     case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
    705                     case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
    706                     case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
    707                     case 4: u16EffAddr += pVCpu->cpum.GstCtx.si;            break;
    708                     case 5: u16EffAddr += pVCpu->cpum.GstCtx.di;            break;
    709                     case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp;            SET_SS_DEF(); break;
    710                     case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx;            break;
    711                 }
    712             }
    713 
    714             *pGCPtrEff = u16EffAddr;
    715         }
    716         else
    717         {
    718             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    719             uint32_t u32EffAddr;
    720 
    721             /* Handle the disp32 form with no registers first. */
    722             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    723             {
    724                 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
    725                 uInfo = u32EffAddr;
    726             }
    727             else
    728             {
    729                 /* Get the register (or SIB) value. */
    730                 uInfo = 0;
    731                 switch ((bRm & X86_MODRM_RM_MASK))
    732                 {
    733                     case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    734                     case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    735                     case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    736                     case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    737                     case 4: /* SIB */
    738                     {
    739                         uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    740                         uInfo = (uint64_t)bSib << 32;
    741 
    742                         /* Get the index and scale it. */
    743                         switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
    744                         {
    745                             case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    746                             case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    747                             case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    748                             case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    749                             case 4: u32EffAddr = 0; /*none */ break;
    750                             case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
    751                             case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    752                             case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    753                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    754                         }
    755                         u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    756 
    757                         /* add base */
    758                         switch (bSib & X86_SIB_BASE_MASK)
    759                         {
    760                             case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
    761                             case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
    762                             case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
    763                             case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
    764                             case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
    765                             case 5:
    766                                 if ((bRm & X86_MODRM_MOD_MASK) != 0)
    767                                 {
    768                                     u32EffAddr += pVCpu->cpum.GstCtx.ebp;
    769                                     SET_SS_DEF();
    770                                 }
    771                                 else
    772                                 {
    773                                     uint32_t u32Disp;
    774                                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    775                                     u32EffAddr += u32Disp;
    776                                     uInfo      |= u32Disp;
    777                                 }
    778                                 break;
    779                             case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
    780                             case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
    781                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    782                         }
    783                         break;
    784                     }
    785                     case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
    786                     case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    787                     case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    788                     IEM_NOT_REACHED_DEFAULT_CASE_RET();
    789                 }
    790 
    791                 /* Get and add the displacement. */
    792                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    793                 {
    794                     case 0:
    795                         break;
    796                     case 1:
    797                     {
    798                         int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    799                         u32EffAddr += i8Disp;
    800                         uInfo |= (uint32_t)(int32_t)i8Disp;
    801                         break;
    802                     }
    803                     case 2:
    804                     {
    805                         uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    806                         u32EffAddr += u32Disp;
    807                         uInfo      |= (uint32_t)u32Disp;
    808                         break;
    809                     }
    810                     default:
    811                         AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
    812                 }
    813 
    814             }
    815             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    816             *pGCPtrEff = u32EffAddr;
    817         }
    818     }
    819     else
    820     {
    821         uint64_t u64EffAddr;
    822 
    823         /* Handle the rip+disp32 form with no registers first. */
    824         if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    825         {
    826             IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
    827             uInfo = (uint32_t)u64EffAddr;
    828             u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
    829         }
    830         else
    831         {
    832             /* Get the register (or SIB) value. */
    833             uInfo = 0;
    834             switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
    835             {
    836                 case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    837                 case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    838                 case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    839                 case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    840                 case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
    841                 case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    842                 case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    843                 case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    844                 case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    845                 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    846                 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    847                 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    848                 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    849                 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    850                 /* SIB */
    851                 case 4:
    852                 case 12:
    853                 {
    854                     uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    855                     uInfo = (uint64_t)bSib << 32;
    856 
    857                     /* Get the index and scale it. */
    858                     switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
    859                     {
    860                         case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    861                         case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    862                         case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    863                         case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    864                         case  4: u64EffAddr = 0; /*none */ break;
    865                         case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
    866                         case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    867                         case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    868                         case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    869                         case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    870                         case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    871                         case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    872                         case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
    873                         case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    874                         case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    875                         case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    876                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    877                     }
    878                     u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    879 
    880                     /* add base */
    881                     switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
    882                     {
    883                         case  0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
    884                         case  1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
    885                         case  2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
    886                         case  3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
    887                         case  4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
    888                         case  6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
    889                         case  7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
    890                         case  8: u64EffAddr += pVCpu->cpum.GstCtx.r8;  break;
    891                         case  9: u64EffAddr += pVCpu->cpum.GstCtx.r9;  break;
    892                         case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
    893                         case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
    894                         case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
    895                         case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
    896                         case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
    897                         /* complicated encodings */
    898                         case 5:
    899                         case 13:
    900                             if ((bRm & X86_MODRM_MOD_MASK) != 0)
    901                             {
    902                                 if (!pVCpu->iem.s.uRexB)
    903                                 {
    904                                     u64EffAddr += pVCpu->cpum.GstCtx.rbp;
    905                                     SET_SS_DEF();
    906                                 }
    907                                 else
    908                                     u64EffAddr += pVCpu->cpum.GstCtx.r13;
    909                             }
    910                             else
    911                             {
    912                                 uint32_t u32Disp;
    913                                 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    914                                 u64EffAddr += (int32_t)u32Disp;
    915                                 uInfo      |= u32Disp;
    916                             }
    917                             break;
    918                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    919                     }
    920                     break;
    921                 }
    922                 IEM_NOT_REACHED_DEFAULT_CASE_RET();
    923             }
    924 
    925             /* Get and add the displacement. */
    926             switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    927             {
    928                 case 0:
    929                     break;
    930                 case 1:
    931                 {
    932                     int8_t i8Disp;
    933                     IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    934                     u64EffAddr += i8Disp;
    935                     uInfo      |= (uint32_t)(int32_t)i8Disp;
    936                     break;
    937                 }
    938                 case 2:
    939                 {
    940                     uint32_t u32Disp;
    941                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    942                     u64EffAddr += (int32_t)u32Disp;
    943                     uInfo      |= u32Disp;
    944                     break;
    945                 }
    946                 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
    947             }
    948 
    949         }
    950 
    951         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
    952             *pGCPtrEff = u64EffAddr;
    953         else
    954         {
    955             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    956             *pGCPtrEff = u64EffAddr & UINT32_MAX;
    957         }
    958     }
    959     *puInfo = uInfo;
    960 
    961     Log5(("iemOpHlpCalcRmEffAddrEx: EffAddr=%#010RGv uInfo=%RX64\n", *pGCPtrEff, uInfo));
    962     return VINF_SUCCESS;
    963 }
    964347
    965348/** @}  */
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp

    r108260 r108278  
    736736
    737737#endif /* !IEM_WITH_CODE_TLB */
    738 #ifndef IEM_WITH_SETJMP
    739 
    740 /**
    741  * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
    742  *
    743  * @returns Strict VBox status code.
    744  * @param   pVCpu               The cross context virtual CPU structure of the
    745  *                              calling thread.
    746  * @param   pb                  Where to return the opcode byte.
    747  */
    748 VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
    749 {
    750     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
    751     if (rcStrict == VINF_SUCCESS)
    752     {
    753         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    754         *pb = pVCpu->iem.s.abOpcode[offOpcode];
    755         pVCpu->iem.s.offOpcode = offOpcode + 1;
    756     }
    757     else
    758         *pb = 0;
    759     return rcStrict;
    760 }
    761 
    762 #else  /* IEM_WITH_SETJMP */
    763738
    764739/**
     
    770745uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    771746{
    772 # ifdef IEM_WITH_CODE_TLB
     747#ifdef IEM_WITH_CODE_TLB
    773748    uint8_t u8;
    774749    iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
    775750    return u8;
    776 # else
     751#else
    777752    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
    778753    if (rcStrict == VINF_SUCCESS)
    779754        return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
    780755    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    781 # endif
     756#endif
    782757}
    783758
    784 #endif /* IEM_WITH_SETJMP */
    785 
    786 #ifndef IEM_WITH_SETJMP
    787 
    788 /**
    789  * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
    790  *
    791  * @returns Strict VBox status code.
    792  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    793  * @param   pu16                Where to return the opcode dword.
    794  */
    795 VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
    796 {
    797     uint8_t      u8;
    798     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    799     if (rcStrict == VINF_SUCCESS)
    800         *pu16 = (int8_t)u8;
    801     return rcStrict;
    802 }
    803 
    804 
    805 /**
    806  * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
    807  *
    808  * @returns Strict VBox status code.
    809  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    810  * @param   pu32                Where to return the opcode dword.
    811  */
    812 VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    813 {
    814     uint8_t      u8;
    815     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    816     if (rcStrict == VINF_SUCCESS)
    817         *pu32 = (int8_t)u8;
    818     return rcStrict;
    819 }
    820 
    821 
    822 /**
    823  * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
    824  *
    825  * @returns Strict VBox status code.
    826  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    827  * @param   pu64                Where to return the opcode qword.
    828  */
    829 VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    830 {
    831     uint8_t      u8;
    832     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    833     if (rcStrict == VINF_SUCCESS)
    834         *pu64 = (int8_t)u8;
    835     return rcStrict;
    836 }
    837 
    838 #endif /* !IEM_WITH_SETJMP */
    839 
    840 
    841 #ifndef IEM_WITH_SETJMP
    842 
    843 /**
    844  * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
    845  *
    846  * @returns Strict VBox status code.
    847  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    848  * @param   pu16                Where to return the opcode word.
    849  */
    850 VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
    851 {
    852     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    853     if (rcStrict == VINF_SUCCESS)
    854     {
    855         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    856 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    857         *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    858 # else
    859         *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    860 # endif
    861         pVCpu->iem.s.offOpcode = offOpcode + 2;
    862     }
    863     else
    864         *pu16 = 0;
    865     return rcStrict;
    866 }
    867 
    868 #else  /* IEM_WITH_SETJMP */
    869759
    870760/**
     
    876766uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    877767{
    878 # ifdef IEM_WITH_CODE_TLB
     768#ifdef IEM_WITH_CODE_TLB
    879769    uint16_t u16;
    880770    iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
    881771    return u16;
    882 # else
     772#else
    883773    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    884774    if (rcStrict == VINF_SUCCESS)
     
    886776        uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    887777        pVCpu->iem.s.offOpcode += 2;
    888 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     778# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    889779        return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    890 #  else
     780# else
    891781        return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    892 #  endif
     782# endif
    893783    }
    894784    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    895 # endif
     785#endif
    896786}
    897787
    898 #endif /* IEM_WITH_SETJMP */
    899 
    900 #ifndef IEM_WITH_SETJMP
    901 
    902 /**
    903  * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
    904  *
    905  * @returns Strict VBox status code.
    906  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    907  * @param   pu32                Where to return the opcode double word.
    908  */
    909 VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    910 {
    911     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    912     if (rcStrict == VINF_SUCCESS)
    913     {
    914         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    915         *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    916         pVCpu->iem.s.offOpcode = offOpcode + 2;
    917     }
    918     else
    919         *pu32 = 0;
    920     return rcStrict;
    921 }
    922 
    923 
    924 /**
    925  * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
    926  *
    927  * @returns Strict VBox status code.
    928  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    929  * @param   pu64                Where to return the opcode quad word.
    930  */
    931 VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    932 {
    933     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    934     if (rcStrict == VINF_SUCCESS)
    935     {
    936         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    937         *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    938         pVCpu->iem.s.offOpcode = offOpcode + 2;
    939     }
    940     else
    941         *pu64 = 0;
    942     return rcStrict;
    943 }
    944 
    945 #endif /* !IEM_WITH_SETJMP */
    946 
    947 #ifndef IEM_WITH_SETJMP
    948 
    949 /**
    950  * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
    951  *
    952  * @returns Strict VBox status code.
    953  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    954  * @param   pu32                Where to return the opcode dword.
    955  */
    956 VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    957 {
    958     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    959     if (rcStrict == VINF_SUCCESS)
    960     {
    961         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    962 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    963         *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    964 # else
    965         *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    966                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    967                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    968                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    969 # endif
    970         pVCpu->iem.s.offOpcode = offOpcode + 4;
    971     }
    972     else
    973         *pu32 = 0;
    974     return rcStrict;
    975 }
    976 
    977 #else  /* IEM_WITH_SETJMP */
    978788
    979789/**
     
    985795uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    986796{
    987 # ifdef IEM_WITH_CODE_TLB
     797#ifdef IEM_WITH_CODE_TLB
    988798    uint32_t u32;
    989799    iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
    990800    return u32;
    991 # else
     801#else
    992802    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    993803    if (rcStrict == VINF_SUCCESS)
     
    995805        uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    996806        pVCpu->iem.s.offOpcode = offOpcode + 4;
    997 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     807# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    998808        return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    999 #  else
     809# else
    1000810        return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1001811                                   pVCpu->iem.s.abOpcode[offOpcode + 1],
    1002812                                   pVCpu->iem.s.abOpcode[offOpcode + 2],
    1003813                                   pVCpu->iem.s.abOpcode[offOpcode + 3]);
    1004 #  endif
     814# endif
    1005815    }
    1006816    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1007 # endif
     817#endif
    1008818}
    1009819
    1010 #endif /* IEM_WITH_SETJMP */
    1011 
    1012 #ifndef IEM_WITH_SETJMP
    1013 
    1014 /**
    1015  * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
    1016  *
    1017  * @returns Strict VBox status code.
    1018  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1019  * @param   pu64                Where to return the opcode dword.
    1020  */
    1021 VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1022 {
    1023     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    1024     if (rcStrict == VINF_SUCCESS)
    1025     {
    1026         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1027         *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1028                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    1029                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    1030                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    1031         pVCpu->iem.s.offOpcode = offOpcode + 4;
    1032     }
    1033     else
    1034         *pu64 = 0;
    1035     return rcStrict;
    1036 }
    1037 
    1038 
    1039 /**
    1040  * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
    1041  *
    1042  * @returns Strict VBox status code.
    1043  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1044  * @param   pu64                Where to return the opcode qword.
    1045  */
    1046 VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1047 {
    1048     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    1049     if (rcStrict == VINF_SUCCESS)
    1050     {
    1051         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1052         *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1053                                              pVCpu->iem.s.abOpcode[offOpcode + 1],
    1054                                              pVCpu->iem.s.abOpcode[offOpcode + 2],
    1055                                              pVCpu->iem.s.abOpcode[offOpcode + 3]);
    1056         pVCpu->iem.s.offOpcode = offOpcode + 4;
    1057     }
    1058     else
    1059         *pu64 = 0;
    1060     return rcStrict;
    1061 }
    1062 
    1063 #endif /* !IEM_WITH_SETJMP */
    1064 
    1065 #ifndef IEM_WITH_SETJMP
    1066 
    1067 /**
    1068  * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
    1069  *
    1070  * @returns Strict VBox status code.
    1071  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1072  * @param   pu64                Where to return the opcode qword.
    1073  */
    1074 VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    1075 {
    1076     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
    1077     if (rcStrict == VINF_SUCCESS)
    1078     {
    1079         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1080 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1081         *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1082 # else
    1083         *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1084                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    1085                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    1086                                     pVCpu->iem.s.abOpcode[offOpcode + 3],
    1087                                     pVCpu->iem.s.abOpcode[offOpcode + 4],
    1088                                     pVCpu->iem.s.abOpcode[offOpcode + 5],
    1089                                     pVCpu->iem.s.abOpcode[offOpcode + 6],
    1090                                     pVCpu->iem.s.abOpcode[offOpcode + 7]);
    1091 # endif
    1092         pVCpu->iem.s.offOpcode = offOpcode + 8;
    1093     }
    1094     else
    1095         *pu64 = 0;
    1096     return rcStrict;
    1097 }
    1098 
    1099 #else  /* IEM_WITH_SETJMP */
    1100820
    1101821/**
     
    1107827uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    1108828{
    1109 # ifdef IEM_WITH_CODE_TLB
     829#ifdef IEM_WITH_CODE_TLB
    1110830    uint64_t u64;
    1111831    iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
    1112832    return u64;
    1113 # else
     833#else
    1114834    VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
    1115835    if (rcStrict == VINF_SUCCESS)
     
    1117837        uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    1118838        pVCpu->iem.s.offOpcode = offOpcode + 8;
    1119 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     839# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    1120840        return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    1121 #  else
     841# else
    1122842        return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    1123843                                   pVCpu->iem.s.abOpcode[offOpcode + 1],
     
    1128848                                   pVCpu->iem.s.abOpcode[offOpcode + 6],
    1129849                                   pVCpu->iem.s.abOpcode[offOpcode + 7]);
    1130 #  endif
     850# endif
    1131851    }
    1132852    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1133 # endif
     853#endif
    1134854}
    1135855
    1136 #endif /* IEM_WITH_SETJMP */
    1137 
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllThrdTables-x86.h

    r108260 r108278  
    104104#endif
    105105
    106 #ifndef IEM_WITH_SETJMP
    107 # error The setjmp approach must be enabled for the recompiler.
    108 #endif
    109 
    110106
    111107/*********************************************************************************************************************************
     
    133129 */
    134130#undef IEM_MC_CALC_RM_EFF_ADDR
    135 #ifndef IEM_WITH_SETJMP
    136 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
    137     uint64_t uEffAddrInfo; \
    138     IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff), &uEffAddrInfo))
    139 #else
    140 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
     131#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
    141132    uint64_t uEffAddrInfo; \
    142133    ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo))
    143 #endif
    144134
    145135/*
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllXcpt-x86.cpp

    r108260 r108278  
    26012601}
    26022602
    2603 #ifdef IEM_WITH_SETJMP
    26042603/**
    26052604 * See iemRaiseXcptOrInt.  Will not return.
     
    26162615    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    26172616}
    2618 #endif
    26192617
    26202618
     
    26352633
    26362634
    2637 #ifdef IEM_WITH_SETJMP
    26382635/** \#DE - 00.  */
    26392636DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
     
    26412638    iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    26422639}
    2643 #endif
    26442640
    26452641
     
    26682664
    26692665
    2670 #ifdef IEM_WITH_SETJMP
    26712666/** \#UD - 06.  */
    26722667DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
     
    26742669    iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    26752670}
    2676 #endif
    26772671
    26782672
     
    26842678
    26852679
    2686 #ifdef IEM_WITH_SETJMP
    26872680/** \#NM - 07.  */
    26882681DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
     
    26902683    iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    26912684}
    2692 #endif
    26932685
    26942686
     
    27752767}
    27762768
    2777 #ifdef IEM_WITH_SETJMP
     2769
    27782770/** \#GP(0) - 0d.  */
    27792771DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
     
    27822774    iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    27832775}
    2784 #endif
    27852776
    27862777
     
    28132804}
    28142805
    2815 #ifdef IEM_WITH_SETJMP
     2806
    28162807/** \#GP(sel) - 0d, longjmp.  */
    28172808DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
     
    28232814                         IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    28242815}
    2825 #endif
     2816
    28262817
    28272818/** \#GP(sel) - 0d.  */
     
    28342825}
    28352826
    2836 #ifdef IEM_WITH_SETJMP
     2827
    28372828/** \#GP(sel) - 0d, longjmp.  */
    28382829DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
     
    28432834    iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    28442835}
    2845 #endif
    28462836
    28472837
     
    28552845}
    28562846
    2857 #ifdef IEM_WITH_SETJMP
     2847
    28582848/** \#GP(sel) - 0d, longjmp.  */
    28592849DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
     
    28622852    iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    28632853}
    2864 #endif
    28652854
    28662855
     
    29302919}
    29312920
    2932 #ifdef IEM_WITH_SETJMP
     2921
    29332922/** \#PF(n) - 0e, longjmp.  */
    29342923DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
     
    29372926    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
    29382927}
    2939 #endif
    29402928
    29412929
     
    29512939}
    29522940
    2953 #ifdef IEM_WITH_SETJMP
     2941
    29542942/** \#MF(0) - 10, longjmp.  */
    29552943DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
     
    29572945    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
    29582946}
    2959 #endif
    29602947
    29612948
     
    29662953}
    29672954
    2968 #ifdef IEM_WITH_SETJMP
     2955
    29692956/** \#AC(0) - 11, longjmp.  */
    29702957DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
     
    29722959    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
    29732960}
    2974 #endif
    29752961
    29762962
     
    29822968
    29832969
    2984 #ifdef IEM_WITH_SETJMP
    29852970/** \#XF(0)/\#XM(0) - 19s, longjmp.  */
    29862971DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
     
    29882973    IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
    29892974}
    2990 #endif
    29912975
    29922976
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineDecode-x86.h

    r108260 r108278  
    3636
    3737#ifndef IEM_WITH_OPAQUE_DECODER_STATE
    38 
    39 # ifndef IEM_WITH_SETJMP
    40 
    41 /**
    42  * Fetches the first opcode byte.
    43  *
    44  * @returns Strict VBox status code.
    45  * @param   pVCpu               The cross context virtual CPU structure of the
    46  *                              calling thread.
    47  * @param   pu8                 Where to return the opcode byte.
    48  */
    49 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
    50 {
    51     /*
    52      * Check for hardware instruction breakpoints.
    53      * Note! Guest breakpoints are only checked after POP SS or MOV SS on AMD CPUs.
    54      */
    55     if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
    56     { /* likely */ }
    57     else
    58     {
    59         VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
    60                                                        pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
    61                                                           !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
    62                                                        || IEM_IS_GUEST_CPU_AMD(pVCpu));
    63         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    64         { /* likely */ }
    65         else
    66         {
    67             *pu8 = 0xff; /* shut up gcc. sigh */
    68             if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
    69                 return iemRaiseDebugException(pVCpu);
    70             return rcStrict;
    71         }
    72     }
    73 
    74     /*
    75      * Fetch the first opcode byte.
    76      */
    77     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    78     if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
    79     {
    80         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
    81         *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
    82         return VINF_SUCCESS;
    83     }
    84     return iemOpcodeGetNextU8Slow(pVCpu, pu8);
    85 }
    86 
    87 # else  /* IEM_WITH_SETJMP */
    8838
    8939/**
     
    12070     * Fetch the first opcode byte.
    12171     */
    122 #  ifdef IEM_WITH_CODE_TLB
     72# ifdef IEM_WITH_CODE_TLB
    12373    uint8_t         bRet;
    12474    uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
     
    13282    else
    13383        bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
    134  ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
     84ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    13585    Assert(pVCpu->iem.s.offOpcode == 0);
    13686    pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
    137  endif
     87endif
    13888    return bRet;
    13989
    140 #  else /* !IEM_WITH_CODE_TLB */
     90# else /* !IEM_WITH_CODE_TLB */
    14191    uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
    14292    if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
     
    14696    }
    14797    return iemOpcodeGetNextU8SlowJmp(pVCpu);
    148 #  endif
    149 }
    150 
    151 # endif /* IEM_WITH_SETJMP */
     98# endif
     99}
    152100
    153101/**
     
    157105 * @remark Implicitly references pVCpu.
    158106 */
    159 # ifndef IEM_WITH_SETJMP
    160 #  define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
    161     do \
    162     { \
    163         VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
    164         if (rcStrict2 == VINF_SUCCESS) \
    165         { /* likely */ } \
    166         else \
    167             return rcStrict2; \
    168     } while (0)
    169 # else
    170 #  define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
    171 # endif /* IEM_WITH_SETJMP */
    172 
    173 
    174 # ifndef IEM_WITH_SETJMP
    175 
    176 /**
    177  * Fetches the next opcode byte.
    178  *
    179  * @returns Strict VBox status code.
    180  * @param   pVCpu               The cross context virtual CPU structure of the
    181  *                              calling thread.
    182  * @param   pu8                 Where to return the opcode byte.
    183  */
    184 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
    185 {
    186     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    187     if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
    188     {
    189         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
    190         *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
    191         return VINF_SUCCESS;
    192     }
    193     return iemOpcodeGetNextU8Slow(pVCpu, pu8);
    194 }
    195 
    196 # else  /* IEM_WITH_SETJMP */
     107# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
     108
    197109
    198110/**
     
    204116DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    205117{
    206 #  ifdef IEM_WITH_CODE_TLB
     118# ifdef IEM_WITH_CODE_TLB
    207119    uint8_t         bRet;
    208120    uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
     
    216128    else
    217129        bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
    218  ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
     130ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    219131    Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
    220132    pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
    221  endif
     133endif
    222134    return bRet;
    223135
    224 #  else /* !IEM_WITH_CODE_TLB */
     136# else /* !IEM_WITH_CODE_TLB */
    225137    uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
    226138    if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
     
    230142    }
    231143    return iemOpcodeGetNextU8SlowJmp(pVCpu);
    232 #  endif
    233 }
    234 
    235 # endif /* IEM_WITH_SETJMP */
     144# endif
     145}
    236146
    237147/**
     
    241151 * @remark Implicitly references pVCpu.
    242152 */
    243 # ifndef IEM_WITH_SETJMP
    244 #  define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
    245     do \
    246     { \
    247         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
    248         if (rcStrict2 == VINF_SUCCESS) \
    249         { /* likely */ } \
    250         else \
    251             return rcStrict2; \
    252     } while (0)
    253 # else
    254 #  define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
    255 # endif /* IEM_WITH_SETJMP */
    256 
    257 
    258 # ifndef IEM_WITH_SETJMP
    259 /**
    260  * Fetches the next signed byte from the opcode stream.
    261  *
    262  * @returns Strict VBox status code.
    263  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    264  * @param   pi8                 Where to return the signed byte.
    265  */
    266 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
    267 {
    268     return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
    269 }
    270 # endif /* !IEM_WITH_SETJMP */
    271 
     153# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
    272154
    273155/**
     
    278160 * @remark Implicitly references pVCpu.
    279161 */
    280 # ifndef IEM_WITH_SETJMP
    281 #  define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
    282     do \
    283     { \
    284         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
    285         if (rcStrict2 != VINF_SUCCESS) \
    286             return rcStrict2; \
    287     } while (0)
    288 # else /* IEM_WITH_SETJMP */
    289 #  define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    290 
    291 # endif /* IEM_WITH_SETJMP */
    292 
    293 
    294 # ifndef IEM_WITH_SETJMP
    295 /**
    296  * Fetches the next signed byte from the opcode stream, extending it to
    297  * unsigned 16-bit.
    298  *
    299  * @returns Strict VBox status code.
    300  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    301  * @param   pu16                Where to return the unsigned word.
    302  */
    303 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
    304 {
    305     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    306     if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
    307         return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
    308 
    309     *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
    310     pVCpu->iem.s.offOpcode = offOpcode + 1;
    311     return VINF_SUCCESS;
    312 }
    313 # endif /* !IEM_WITH_SETJMP */
     162# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    314163
    315164/**
     
    320169 * @remark Implicitly references pVCpu.
    321170 */
    322 # ifndef IEM_WITH_SETJMP
    323 #  define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
    324     do \
    325     { \
    326         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
    327         if (rcStrict2 != VINF_SUCCESS) \
    328             return rcStrict2; \
    329     } while (0)
    330 # else
    331 #  define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    332 # endif
    333 
    334 # ifndef IEM_WITH_SETJMP
    335 /**
    336  * Fetches the next signed byte from the opcode stream, extending it to
    337  * unsigned 32-bit.
    338  *
    339  * @returns Strict VBox status code.
    340  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    341  * @param   pu32                Where to return the unsigned dword.
    342  */
    343 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    344 {
    345     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    346     if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
    347         return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
    348 
    349     *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
    350     pVCpu->iem.s.offOpcode = offOpcode + 1;
    351     return VINF_SUCCESS;
    352 }
    353 # endif /* !IEM_WITH_SETJMP */
     171# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    354172
    355173/**
     
    360178 * @remark Implicitly references pVCpu.
    361179 */
    362 # ifndef IEM_WITH_SETJMP
    363 #  define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
    364     do \
    365     { \
    366         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
    367         if (rcStrict2 != VINF_SUCCESS) \
    368             return rcStrict2; \
    369     } while (0)
    370 # else
    371 #  define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    372 # endif
    373 
    374 
    375 # ifndef IEM_WITH_SETJMP
    376 /**
    377  * Fetches the next signed byte from the opcode stream, extending it to
    378  * unsigned 64-bit.
    379  *
    380  * @returns Strict VBox status code.
    381  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    382  * @param   pu64                Where to return the unsigned qword.
    383  */
    384 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    385 {
    386     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    387     if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
    388         return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
    389 
    390     *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
    391     pVCpu->iem.s.offOpcode = offOpcode + 1;
    392     return VINF_SUCCESS;
    393 }
    394 # endif /* !IEM_WITH_SETJMP */
     180# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    395181
    396182/**
     
    401187 * @remark Implicitly references pVCpu.
    402188 */
    403 # ifndef IEM_WITH_SETJMP
    404 #  define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
    405     do \
    406     { \
    407         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
    408         if (rcStrict2 != VINF_SUCCESS) \
    409             return rcStrict2; \
    410     } while (0)
    411 # else
    412 #  define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    413 # endif
    414 
    415 
    416 # ifndef IEM_WITH_SETJMP
    417 
    418 /**
    419  * Fetches the next opcode word.
    420  *
    421  * @returns Strict VBox status code.
    422  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    423  * @param   pu16                Where to return the opcode word.
    424  */
    425 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
    426 {
    427     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    428     if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
    429     {
    430         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
    431 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    432         *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    433 #  else
    434         *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    435 #  endif
    436         return VINF_SUCCESS;
    437     }
    438     return iemOpcodeGetNextU16Slow(pVCpu, pu16);
    439 }
    440 
    441 # else  /* IEM_WITH_SETJMP */
     189# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
     190
    442191
    443192/**
     
    449198DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    450199{
    451 #  ifdef IEM_WITH_CODE_TLB
     200# ifdef IEM_WITH_CODE_TLB
    452201    uint16_t        u16Ret;
    453202    uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
     
    457206    {
    458207        pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
    459  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     208ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    460209        u16Ret = *(uint16_t const *)&pbBuf[offBuf];
    461  else
     210else
    462211        u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
    463  endif
     212endif
    464213    }
    465214    else
    466215        u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
    467216
    468  ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
     217ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    469218    uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    470219    Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
    471 #    ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     220#   ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    472221    *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
    473 #    else
     222#   else
    474223    pVCpu->iem.s.abOpcode[offOpcode]     = RT_LO_U8(u16Ret);
    475224    pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
    476 #    endif
     225#   endif
    477226    pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
    478  endif
     227endif
    479228
    480229    return u16Ret;
    481230
    482 #  else /* !IEM_WITH_CODE_TLB */
     231# else /* !IEM_WITH_CODE_TLB */
    483232    uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    484233    if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
    485234    {
    486235        pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
    487  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     236ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    488237        return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    489  else
     238else
    490239        return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    491  endif
     240endif
    492241    }
    493242    return iemOpcodeGetNextU16SlowJmp(pVCpu);
    494 #  endif /* !IEM_WITH_CODE_TLB */
    495 }
    496 
    497 # endif /* IEM_WITH_SETJMP */
     243# endif /* !IEM_WITH_CODE_TLB */
     244}
    498245
    499246/**
     
    503250 * @remark Implicitly references pVCpu.
    504251 */
    505 # ifndef IEM_WITH_SETJMP
    506 #  define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
    507     do \
    508     { \
    509         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
    510         if (rcStrict2 != VINF_SUCCESS) \
    511             return rcStrict2; \
    512     } while (0)
    513 # else
    514 #  define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
    515 # endif
    516 
    517 # ifndef IEM_WITH_SETJMP
    518 /**
    519  * Fetches the next opcode word, zero extending it to a double word.
    520  *
    521  * @returns Strict VBox status code.
    522  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    523  * @param   pu32                Where to return the opcode double word.
    524  */
    525 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    526 {
    527     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    528     if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
    529         return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
    530 
    531     *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    532     pVCpu->iem.s.offOpcode = offOpcode + 2;
    533     return VINF_SUCCESS;
    534 }
    535 # endif /* !IEM_WITH_SETJMP */
     252# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
    536253
    537254/**
     
    542259 * @remark Implicitly references pVCpu.
    543260 */
    544 # ifndef IEM_WITH_SETJMP
    545 #  define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
    546     do \
    547     { \
    548         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
    549         if (rcStrict2 != VINF_SUCCESS) \
    550             return rcStrict2; \
    551     } while (0)
    552 # else
    553 #  define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
    554 # endif
    555 
    556 # ifndef IEM_WITH_SETJMP
    557 /**
    558  * Fetches the next opcode word, zero extending it to a quad word.
    559  *
    560  * @returns Strict VBox status code.
    561  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    562  * @param   pu64                Where to return the opcode quad word.
    563  */
    564 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    565 {
    566     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    567     if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
    568         return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
    569 
    570     *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    571     pVCpu->iem.s.offOpcode = offOpcode + 2;
    572     return VINF_SUCCESS;
    573 }
    574 # endif /* !IEM_WITH_SETJMP */
     261# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
    575262
    576263/**
     
    581268 * @remark Implicitly references pVCpu.
    582269 */
    583 # ifndef IEM_WITH_SETJMP
    584 #  define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
    585     do \
    586     { \
    587         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
    588         if (rcStrict2 != VINF_SUCCESS) \
    589             return rcStrict2; \
    590     } while (0)
    591 # else
    592 #  define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64)  (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
    593 # endif
    594 
    595 
    596 # ifndef IEM_WITH_SETJMP
    597 /**
    598  * Fetches the next signed word from the opcode stream.
    599  *
    600  * @returns Strict VBox status code.
    601  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    602  * @param   pi16                Where to return the signed word.
    603  */
    604 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
    605 {
    606     return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
    607 }
    608 # endif /* !IEM_WITH_SETJMP */
    609 
     270# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64)  (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
    610271
    611272/**
     
    616277 * @remark Implicitly references pVCpu.
    617278 */
    618 # ifndef IEM_WITH_SETJMP
    619 #  define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
    620     do \
    621     { \
    622         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
    623         if (rcStrict2 != VINF_SUCCESS) \
    624             return rcStrict2; \
    625     } while (0)
    626 # else
    627 #  define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
    628 # endif
    629 
    630 # ifndef IEM_WITH_SETJMP
    631 
    632 /**
    633  * Fetches the next opcode dword.
    634  *
    635  * @returns Strict VBox status code.
    636  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    637  * @param   pu32                Where to return the opcode double word.
    638  */
    639 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
    640 {
    641     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    642     if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
    643     {
    644         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
    645 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    646         *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    647 #  else
    648         *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    649                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    650                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    651                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    652 #  endif
    653         return VINF_SUCCESS;
    654     }
    655     return iemOpcodeGetNextU32Slow(pVCpu, pu32);
    656 }
    657 
    658 # else  /* IEM_WITH_SETJMP */
     279# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
     280
    659281
    660282/**
     
    666288DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    667289{
    668 #  ifdef IEM_WITH_CODE_TLB
     290# ifdef IEM_WITH_CODE_TLB
    669291    uint32_t u32Ret;
    670292    uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
     
    674296    {
    675297        pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
    676  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     298ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    677299        u32Ret = *(uint32_t const *)&pbBuf[offBuf];
    678  else
     300else
    679301        u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
    680302                                     pbBuf[offBuf + 1],
    681303                                     pbBuf[offBuf + 2],
    682304                                     pbBuf[offBuf + 3]);
    683  endif
     305endif
    684306    }
    685307    else
    686308        u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
    687309
    688  ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
     310ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    689311    uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    690312    Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
    691 #    ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     313#   ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    692314    *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
    693 #    else
     315#   else
    694316    pVCpu->iem.s.abOpcode[offOpcode]     = RT_BYTE1(u32Ret);
    695317    pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
    696318    pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
    697319    pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
    698 #    endif
     320#   endif
    699321    pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
    700  endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
     322endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
    701323
    702324    return u32Ret;
    703325
    704 #  else  /* !IEM_WITH_CODE_TLB */
     326# else  /* !IEM_WITH_CODE_TLB */
    705327    uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    706328    if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
    707329    {
    708330        pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
    709  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     331ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    710332        return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    711  else
     333else
    712334        return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    713335                                   pVCpu->iem.s.abOpcode[offOpcode + 1],
    714336                                   pVCpu->iem.s.abOpcode[offOpcode + 2],
    715337                                   pVCpu->iem.s.abOpcode[offOpcode + 3]);
    716  endif
     338endif
    717339    }
    718340    return iemOpcodeGetNextU32SlowJmp(pVCpu);
    719 #  endif
    720 }
    721 
    722 # endif /* IEM_WITH_SETJMP */
     341# endif
     342}
    723343
    724344/**
     
    728348 * @remark Implicitly references pVCpu.
    729349 */
    730 # ifndef IEM_WITH_SETJMP
    731 #  define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
    732     do \
    733     { \
    734         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
    735         if (rcStrict2 != VINF_SUCCESS) \
    736             return rcStrict2; \
    737     } while (0)
    738 # else
    739 #  define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
    740 # endif
    741 
    742 # ifndef IEM_WITH_SETJMP
    743 /**
    744  * Fetches the next opcode dword, zero extending it to a quad word.
    745  *
    746  * @returns Strict VBox status code.
    747  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    748  * @param   pu64                Where to return the opcode quad word.
    749  */
    750 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    751 {
    752     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    753     if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
    754         return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
    755 
    756     *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    757                                 pVCpu->iem.s.abOpcode[offOpcode + 1],
    758                                 pVCpu->iem.s.abOpcode[offOpcode + 2],
    759                                 pVCpu->iem.s.abOpcode[offOpcode + 3]);
    760     pVCpu->iem.s.offOpcode = offOpcode + 4;
    761     return VINF_SUCCESS;
    762 }
    763 # endif /* !IEM_WITH_SETJMP */
     350# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
    764351
    765352/**
     
    770357 * @remark Implicitly references pVCpu.
    771358 */
    772 # ifndef IEM_WITH_SETJMP
    773 #  define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
    774     do \
    775     { \
    776         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
    777         if (rcStrict2 != VINF_SUCCESS) \
    778             return rcStrict2; \
    779     } while (0)
    780 # else
    781 #  define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
    782 # endif
    783 
    784 
    785 # ifndef IEM_WITH_SETJMP
    786 /**
    787  * Fetches the next signed double word from the opcode stream.
    788  *
    789  * @returns Strict VBox status code.
    790  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    791  * @param   pi32                Where to return the signed double word.
    792  */
    793 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
    794 {
    795     return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
    796 }
    797 # endif
     359# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
    798360
    799361/**
     
    804366 * @remark Implicitly references pVCpu.
    805367 */
    806 # ifndef IEM_WITH_SETJMP
    807 #  define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
    808     do \
    809     { \
    810         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
    811         if (rcStrict2 != VINF_SUCCESS) \
    812             return rcStrict2; \
    813     } while (0)
    814 # else
    815 #  define IEM_OPCODE_GET_NEXT_S32(a_pi32)    (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
    816 # endif
    817 
    818 # ifndef IEM_WITH_SETJMP
    819 /**
    820  * Fetches the next opcode dword, sign extending it into a quad word.
    821  *
    822  * @returns Strict VBox status code.
    823  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    824  * @param   pu64                Where to return the opcode quad word.
    825  */
    826 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    827 {
    828     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    829     if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
    830         return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
    831 
    832     int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    833                                       pVCpu->iem.s.abOpcode[offOpcode + 1],
    834                                       pVCpu->iem.s.abOpcode[offOpcode + 2],
    835                                       pVCpu->iem.s.abOpcode[offOpcode + 3]);
    836     *pu64 = (uint64_t)(int64_t)i32;
    837     pVCpu->iem.s.offOpcode = offOpcode + 4;
    838     return VINF_SUCCESS;
    839 }
    840 # endif /* !IEM_WITH_SETJMP */
     368# define IEM_OPCODE_GET_NEXT_S32(a_pi32)    (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
    841369
    842370/**
     
    847375 * @remark Implicitly references pVCpu.
    848376 */
    849 # ifndef IEM_WITH_SETJMP
    850 #  define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
    851     do \
    852     { \
    853         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
    854         if (rcStrict2 != VINF_SUCCESS) \
    855             return rcStrict2; \
    856     } while (0)
    857 # else
    858 #  define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
    859 # endif
    860 
    861 # ifndef IEM_WITH_SETJMP
    862 
    863 /**
    864  * Fetches the next opcode qword.
    865  *
    866  * @returns Strict VBox status code.
    867  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    868  * @param   pu64                Where to return the opcode qword.
    869  */
    870 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
    871 {
    872     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    873     if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
    874     {
    875 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    876         *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    877 #  else
    878         *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    879                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    880                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    881                                     pVCpu->iem.s.abOpcode[offOpcode + 3],
    882                                     pVCpu->iem.s.abOpcode[offOpcode + 4],
    883                                     pVCpu->iem.s.abOpcode[offOpcode + 5],
    884                                     pVCpu->iem.s.abOpcode[offOpcode + 6],
    885                                     pVCpu->iem.s.abOpcode[offOpcode + 7]);
    886 #  endif
    887         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
    888         return VINF_SUCCESS;
    889     }
    890     return iemOpcodeGetNextU64Slow(pVCpu, pu64);
    891 }
    892 
    893 # else  /* IEM_WITH_SETJMP */
     377# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
     378
    894379
    895380/**
     
    901386DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    902387{
    903 #  ifdef IEM_WITH_CODE_TLB
     388# ifdef IEM_WITH_CODE_TLB
    904389    uint64_t        u64Ret;
    905390    uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
     
    909394    {
    910395        pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
    911  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     396ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    912397        u64Ret = *(uint64_t const *)&pbBuf[offBuf];
    913  else
     398else
    914399        u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
    915400                                     pbBuf[offBuf + 1],
     
    920405                                     pbBuf[offBuf + 6],
    921406                                     pbBuf[offBuf + 7]);
    922  endif
     407endif
    923408    }
    924409    else
    925410        u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
    926411
    927  ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
     412ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    928413    uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    929414    Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
    930 #    ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     415#   ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    931416    *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
    932 #    else
     417#   else
    933418    pVCpu->iem.s.abOpcode[offOpcode]     = RT_BYTE1(u64Ret);
    934419    pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
     
    939424    pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
    940425    pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
    941 #    endif
     426#   endif
    942427    pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
    943  endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
     428endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
    944429
    945430    return u64Ret;
    946431
    947 #  else /* !IEM_WITH_CODE_TLB */
     432# else /* !IEM_WITH_CODE_TLB */
    948433    uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    949434    if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
    950435    {
    951436        pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
    952  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
     437ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    953438        return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    954  else
     439else
    955440        return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    956441                                   pVCpu->iem.s.abOpcode[offOpcode + 1],
     
    961446                                   pVCpu->iem.s.abOpcode[offOpcode + 6],
    962447                                   pVCpu->iem.s.abOpcode[offOpcode + 7]);
    963  endif
     448endif
    964449    }
    965450    return iemOpcodeGetNextU64SlowJmp(pVCpu);
    966 #  endif /* !IEM_WITH_CODE_TLB */
    967 }
    968 
    969 # endif /* IEM_WITH_SETJMP */
     451# endif /* !IEM_WITH_CODE_TLB */
     452}
    970453
    971454/**
     
    975458 * @remark Implicitly references pVCpu.
    976459 */
    977 # ifndef IEM_WITH_SETJMP
    978 #  define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
    979     do \
    980     { \
    981         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
    982         if (rcStrict2 != VINF_SUCCESS) \
    983             return rcStrict2; \
    984     } while (0)
    985 # else
    986 #  define IEM_OPCODE_GET_NEXT_U64(a_pu64)    ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
    987 # endif
     460# define IEM_OPCODE_GET_NEXT_U64(a_pu64)    ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
    988461
    989462/**
     
    997470 *       used instead.  At least for now...
    998471 */
    999 # ifndef IEM_WITH_SETJMP
    1000 #  define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
    1001         RTGCPTR      GCPtrEff; \
    1002         VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
    1003         if (rcStrict != VINF_SUCCESS) \
    1004             return rcStrict; \
    1005     } while (0)
    1006 # else
    1007 #  define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
     472# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
    1008473        (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
    1009474    } while (0)
    1010 # endif
    1011 
    1012 #endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
    1013 
    1014 
    1015 #ifndef IEM_WITH_OPAQUE_DECODER_STATE
     475
     476
     477
    1016478
    1017479/**
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineMem-x86.h

    r108262 r108278  
    137137
    138138
    139 #ifdef IEM_WITH_SETJMP
    140139
    141140/** @todo slim this down   */
     
    278277}
    279278
    280 #endif /* IEM_WITH_SETJMP */
    281279
    282280/**
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMInternal-x86.h

    r108244 r108278  
    28462846VBOXSTRICTRC            iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
    28472847                                          uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
    2848 #ifdef IEM_WITH_SETJMP
    28492848DECL_NO_RETURN(void)    iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
    28502849                                             uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
    2851 #endif
    28522850VBOXSTRICTRC            iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
    2853 #ifdef IEM_WITH_SETJMP
    28542851DECL_NO_RETURN(void)    iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    2855 #endif
    28562852VBOXSTRICTRC            iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    28572853VBOXSTRICTRC            iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
    28582854VBOXSTRICTRC            iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
    2859 #ifdef IEM_WITH_SETJMP
    28602855DECL_NO_RETURN(void)    iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    2861 #endif
    28622856VBOXSTRICTRC            iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
    2863 #ifdef IEM_WITH_SETJMP
    28642857DECL_NO_RETURN(void)    iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    2865 #endif
    28662858VBOXSTRICTRC            iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
    28672859VBOXSTRICTRC            iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
     
    28752867VBOXSTRICTRC            iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
    28762868VBOXSTRICTRC            iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
    2877 #ifdef IEM_WITH_SETJMP
    28782869DECL_NO_RETURN(void)    iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    2879 #endif
    28802870VBOXSTRICTRC            iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
    28812871VBOXSTRICTRC            iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
    28822872VBOXSTRICTRC            iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
    2883 #ifdef IEM_WITH_SETJMP
    28842873DECL_NO_RETURN(void)    iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
    2885 #endif
    28862874VBOXSTRICTRC            iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
    2887 #ifdef IEM_WITH_SETJMP
    28882875DECL_NO_RETURN(void)    iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
    2889 #endif
    28902876VBOXSTRICTRC            iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
    2891 #ifdef IEM_WITH_SETJMP
    28922877DECL_NO_RETURN(void)    iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
    2893 #endif
    28942878VBOXSTRICTRC            iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
    2895 #ifdef IEM_WITH_SETJMP
    28962879DECL_NO_RETURN(void)    iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
    2897 #endif
    28982880VBOXSTRICTRC            iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
    2899 #ifdef IEM_WITH_SETJMP
    29002881DECL_NO_RETURN(void)    iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    2901 #endif
    29022882VBOXSTRICTRC            iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    2903 #ifdef IEM_WITH_SETJMP
    29042883DECL_NO_RETURN(void)    iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    2905 #endif
    29062884VBOXSTRICTRC            iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    2907 #ifdef IEM_WITH_SETJMP
    29082885DECL_NO_RETURN(void)    iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    2909 #endif
    29102886
    29112887void                    iemLogSyscallRealModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
     
    30293005VBOXSTRICTRC    iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
    30303006#endif
    3031 #ifdef IEM_WITH_SETJMP
    30323007uint8_t         iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    30333008uint16_t        iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    30343009uint32_t        iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    30353010uint64_t        iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    3036 #else
    3037 VBOXSTRICTRC    iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
    3038 VBOXSTRICTRC    iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
    3039 VBOXSTRICTRC    iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
    3040 VBOXSTRICTRC    iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    3041 VBOXSTRICTRC    iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
    3042 VBOXSTRICTRC    iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
    3043 VBOXSTRICTRC    iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    3044 VBOXSTRICTRC    iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
    3045 VBOXSTRICTRC    iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    3046 VBOXSTRICTRC    iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    3047 VBOXSTRICTRC    iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    3048 #endif
    30493011
    30503012VBOXSTRICTRC    iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
     
    30663028VBOXSTRICTRC    iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
    30673029                                    RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
    3068 #ifdef IEM_WITH_SETJMP
    30693030uint8_t         iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    30703031uint16_t        iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
     
    30833044void            iemMemFetchDataU256NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    30843045void            iemMemFetchDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    3085 # if 0 /* these are inlined now */
     3046#if 0 /* these are inlined now */
    30863047uint8_t         iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    30873048uint16_t        iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
     
    30973058void            iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    30983059void            iemMemFetchDataU256AlignedAvxJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    3099 # endif
     3060#endif
    31003061void            iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    3101 #endif
    31023062
    31033063VBOXSTRICTRC    iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
     
    31203080VBOXSTRICTRC    iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
    31213081VBOXSTRICTRC    iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    3122 #ifdef IEM_WITH_SETJMP
    31233082void            iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
    31243083void            iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
     
    31333092void            iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP;
    31343093void            iemMemStoreDataD80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTPBCD80U pd80Value) IEM_NOEXCEPT_MAY_LONGJMP;
    3135 #if 0
     3094#if 0 /* inlined */
    31363095void            iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
    31373096void            iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
     
    31453104void            iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    31463105void            iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    3147 #endif
    3148 
    3149 #ifdef IEM_WITH_SETJMP
     3106
    31503107uint8_t        *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    31513108uint8_t        *iemMemMapDataU8AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
     
    31743131PRTUINT128U     iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    31753132PCRTUINT128U    iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    3176 #endif
    31773133
    31783134VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
     
    31983154VBOXSTRICTRC    iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    31993155
    3200 #ifdef IEM_WITH_SETJMP
    32013156void            iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    32023157void            iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
     
    32263181uint32_t        iemMemFetchStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    32273182uint64_t        iemMemFetchStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    3228 
    3229 #endif
    32303183
    32313184/** @} */
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMOpHlp-x86.h

    r108261 r108278  
    654654      == (pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT))
    655655
    656 VBOXSTRICTRC    iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT;
    657 VBOXSTRICTRC    iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff, uint64_t *puInfo) RT_NOEXCEPT;
    658 #ifdef IEM_WITH_SETJMP
    659 RTGCPTR         iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP;
    660 RTGCPTR         iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    661 #endif
     656RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP;
     657RTGCPTR iemOpHlpCalcRmEffAddrJmpEx(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, uint64_t *puInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    662658
    663659/** @}  */
  • trunk/src/VBox/VMM/include/IEMInline.h

    r108260 r108278  
    351351 */
    352352
    353 #ifdef IEM_WITH_SETJMP
    354 
    355353DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    356354{
    357 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     355#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    358356    if (RT_LIKELY(bMapInfo == 0))
    359357        return;
    360 # endif
     358#endif
    361359    iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
    362360}
     
    365363DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    366364{
    367 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     365#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    368366    if (RT_LIKELY(bMapInfo == 0))
    369367        return;
    370 # endif
     368#endif
    371369    iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo);
    372370}
     
    375373DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    376374{
    377 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     375#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    378376    if (RT_LIKELY(bMapInfo == 0))
    379377        return;
    380 # endif
     378#endif
    381379    iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
    382380}
     
    385383DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    386384{
    387 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     385#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    388386    if (RT_LIKELY(bMapInfo == 0))
    389387        return;
    390 # endif
     388#endif
    391389    iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
    392390}
     
    394392DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
    395393{
    396 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     394#if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
    397395    if (RT_LIKELY(bMapInfo == 0))
    398396        return;
    399 # endif
     397#endif
    400398    iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
    401399}
    402 
    403 #endif /* IEM_WITH_SETJMP */
    404 
    405400
    406401/** @} */
  • trunk/src/VBox/VMM/include/IEMInternal-armv8.h

    r108186 r108278  
    5656#endif
    5757
    58 /** @def IEM_WITH_SETJMP
    59  * Enables alternative status code handling using setjmps.
    60  *
    61  * This adds a bit of expense via the setjmp() call since it saves all the
    62  * non-volatile registers.  However, it eliminates return code checks and allows
    63  * for more optimal return value passing (return regs instead of stack buffer).
    64  */
    65 #if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
    66 # define IEM_WITH_SETJMP
    67 #endif
    68 
    6958/** @def IEM_WITH_THROW_CATCH
    7059 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
    71  * mode code when IEM_WITH_SETJMP is in effect.
     60 * mode code.
    7261 *
    7362 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
     
    8069 * Linux, but it should be quite a bit faster for normal code.
    8170 */
    82 #if (defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
     71#if (defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
    8372 || defined(DOXYGEN_RUNNING)
    8473# define IEM_WITH_THROW_CATCH
     
    9281 * @param   a_rc        The status code jump back with / throw.
    9382 */
    94 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
    95 # ifdef IEM_WITH_THROW_CATCH
    96 #  define IEM_DO_LONGJMP(a_pVCpu, a_rc)  throw int(a_rc)
    97 # else
    98 #  define IEM_DO_LONGJMP(a_pVCpu, a_rc)  longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
    99 # endif
     83#ifdef IEM_WITH_THROW_CATCH
     84# define IEM_DO_LONGJMP(a_pVCpu, a_rc)  throw int(a_rc)
     85#else
     86# define IEM_DO_LONGJMP(a_pVCpu, a_rc)  longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
    10087#endif
    10188
     
    130117 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
    131118 */
    132 #if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
     119#if defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH)
    133120# define IEM_NOEXCEPT_MAY_LONGJMP   RT_NOEXCEPT_EX(false)
    134121#else
     
    12221209VBOXSTRICTRC            iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
    12231210                                          uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
    1224 #ifdef IEM_WITH_SETJMP
    12251211DECL_NO_RETURN(void)    iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
    12261212                                             uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
    1227 #endif
    12281213VBOXSTRICTRC            iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
    12291214VBOXSTRICTRC            iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    12301215VBOXSTRICTRC            iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
    12311216VBOXSTRICTRC            iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
    1232 #ifdef IEM_WITH_SETJMP
    12331217DECL_NO_RETURN(void)    iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
    1234 #endif
    12351218VBOXSTRICTRC            iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
    12361219VBOXSTRICTRC            iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    1237 #ifdef IEM_WITH_SETJMP
    12381220DECL_NO_RETURN(void)    iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    1239 #endif
    12401221VBOXSTRICTRC            iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    12411222
     
    12871268VBOXSTRICTRC    iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    12881269VBOXSTRICTRC    iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    1289 #ifdef IEM_WITH_SETJMP
    12901270uint8_t         iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    12911271uint16_t        iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
     
    12961276void            iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    12971277void            iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    1298 #endif
    12991278
    13001279VBOXSTRICTRC    iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
     
    13091288VBOXSTRICTRC    iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
    13101289VBOXSTRICTRC    iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
    1311 #ifdef IEM_WITH_SETJMP
    13121290void            iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
    13131291void            iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
     
    13161294void            iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    13171295void            iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    1318 #endif
    13191296
    13201297VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r108226 r108278  
    6565#endif
    6666
    67 /** @def IEM_WITH_SETJMP
    68  * Enables alternative status code handling using setjmps.
    69  *
    70  * This adds a bit of expense via the setjmp() call since it saves all the
    71  * non-volatile registers.  However, it eliminates return code checks and allows
    72  * for more optimal return value passing (return regs instead of stack buffer).
    73  */
    74 #if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
    75 # define IEM_WITH_SETJMP
    76 #endif
    77 
    7867/** @def IEM_WITH_THROW_CATCH
    7968 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
    80  * mode code when IEM_WITH_SETJMP is in effect.
     69 * mode code.
    8170 *
    8271 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
     
    8978 * Linux, but it should be quite a bit faster for normal code.
    9079 */
    91 #if defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER)) /* ASM-NOINC-START */
     80#if defined(__cplusplus) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER)) /* ASM-NOINC-START */
    9281# define IEM_WITH_THROW_CATCH
    9382#endif /*ASM-NOINC-END*/
     
    183172 * @param   a_rc        The status code jump back with / throw.
    184173 */
    185 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
    186 # ifdef IEM_WITH_THROW_CATCH
    187 #  ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    188 #   define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \
     174#ifdef IEM_WITH_THROW_CATCH
     175# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
     176#  define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \
    189177            if ((a_pVCpu)->iem.s.pvTbFramePointerR3) \
    190178                iemNativeTbLongJmp((a_pVCpu)->iem.s.pvTbFramePointerR3, (a_rc)); \
    191179            throw int(a_rc); \
    192180        } while (0)
    193 #  else
    194 #   define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
    195 #  endif
    196181# else
    197 #  define IEM_DO_LONGJMP(a_pVCpu, a_rc)  longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
     182#  define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
    198183# endif
     184#else
     185# define IEM_DO_LONGJMP(a_pVCpu, a_rc)  longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
    199186#endif
    200187
     
    229216 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
    230217 */
    231 #if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
     218#if defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH)
    232219# define IEM_NOEXCEPT_MAY_LONGJMP   RT_NOEXCEPT_EX(false)
    233220#else
     
    25372524 * @param   a_pVCpu     The cross context virtual CPU structure of the calling EMT.
    25382525 */
    2539 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
    2540 # ifdef IEM_WITH_THROW_CATCH
    2541 #  define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
     2526#ifdef IEM_WITH_THROW_CATCH
     2527# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
    25422528        a_rcTarget = VINF_SUCCESS; \
    25432529        try
    2544 #  define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
     2530# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
    25452531        IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
    2546 #  define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
     2532# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
    25472533        catch (int rcThrown) \
    25482534        { \
    25492535            a_rcTarget = rcThrown
    2550 #  define IEM_CATCH_LONGJMP_END(a_pVCpu) \
     2536# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
    25512537        } \
    25522538        ((void)0)
    2553 # else  /* !IEM_WITH_THROW_CATCH */
    2554 #  define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
     2539#else  /* !IEM_WITH_THROW_CATCH */
     2540# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
    25552541        jmp_buf  JmpBuf; \
    25562542        jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
    25572543        (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
    25582544        if ((rcStrict = setjmp(JmpBuf)) == 0)
    2559 #  define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
     2545# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
    25602546        pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
    25612547        (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
    25622548        if ((rcStrict = setjmp(JmpBuf)) == 0)
    2563 #  define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
     2549# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
    25642550        else \
    25652551        { \
    25662552            ((void)0)
    2567 #  define IEM_CATCH_LONGJMP_END(a_pVCpu) \
     2553# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
    25682554        } \
    25692555        (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
    2570 # endif /* !IEM_WITH_THROW_CATCH */
    2571 #endif  /* IEM_WITH_SETJMP */
     2556#endif /* !IEM_WITH_THROW_CATCH */
    25722557
    25732558
     
    31713156void            iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
    31723157
    3173 #ifdef IEM_WITH_SETJMP
    31743158void            iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    31753159void            iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
     
    31783162void            iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    31793163void            iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    3180 #endif
    31813164
    31823165void            iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT;
  • trunk/src/VBox/VMM/include/IEMMc.h

    r108269 r108278  
    970970    do { IEM_MC_INT_CLEAR_ZMM_256_UP(a_iYReg); } while (0)
    971971
    972 #ifndef IEM_WITH_SETJMP
    973 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
    974     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
    975 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
    976     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
    977 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
    978     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
    979 #else
    980 # define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
     972#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
    981973    ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    982 # define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
     974#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
    983975    ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
    984 # define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
     976#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
    985977    ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
    986978
    987 # define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
     979#define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
    988980    ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    989 # define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
     981#define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
    990982    ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
    991 # define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
     983#define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
    992984    ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
    993 #endif
    994 
    995 #ifndef IEM_WITH_SETJMP
    996 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
    997     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
    998 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    999     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1000 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
    1001     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
    1002 # define IEM_MC_FETCH_MEM_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1003     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1004 #else
    1005 # define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
     985
     986#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
    1006987    ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1007 # define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     988#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1008989    ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1009 # define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
     990#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
    1010991    ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1011 # define IEM_MC_FETCH_MEM_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     992#define IEM_MC_FETCH_MEM_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1012993    ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1013994
    1014 # define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
     995#define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
    1015996    ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    1016 # define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
     997#define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
    1017998    ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1018 # define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
     999#define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
    10191000    ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    1020 # define IEM_MC_FETCH_MEM_FLAT_I16_DISP(a_i16Dst, a_GCPtrMem, a_offDisp) \
     1001#define IEM_MC_FETCH_MEM_FLAT_I16_DISP(a_i16Dst, a_GCPtrMem, a_offDisp) \
    10211002    ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1022 #endif
    1023 
    1024 #ifndef IEM_WITH_SETJMP
    1025 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    1026     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
    1027 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1028     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1029 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
    1030     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
    1031 # define IEM_MC_FETCH_MEM_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1032     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1033 #else
    1034 # define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     1003
     1004#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    10351005    ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1036 # define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     1006#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    10371007    ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1038 # define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
     1008#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
    10391009    ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1040 # define IEM_MC_FETCH_MEM_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     1010#define IEM_MC_FETCH_MEM_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    10411011    ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    10421012
    1043 # define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
     1013#define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
    10441014    ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1045 # define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
     1015#define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
    10461016    ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1047 # define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
     1017#define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
    10481018    ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1049 # define IEM_MC_FETCH_MEM_FLAT_I32_DISP(a_i32Dst, a_GCPtrMem, a_offDisp) \
     1019#define IEM_MC_FETCH_MEM_FLAT_I32_DISP(a_i32Dst, a_GCPtrMem, a_offDisp) \
    10501020    ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1051 #endif
    1052 
    1053 #ifndef IEM_WITH_SETJMP
    1054 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1055     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
    1056 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1057     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1058 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1059     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
    1060 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
    1061     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
    1062 #else
    1063 # define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1021
     1022#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    10641023    ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1065 # define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
     1024#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    10661025    ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1067 # define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1026#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
    10681027    ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1069 # define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
     1028#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
    10701029    ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    10711030
    1072 # define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
     1031#define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
    10731032    ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
    1074 # define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
     1033#define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
    10751034    ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1076 # define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
     1035#define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
    10771036    ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
    1078 # define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
     1037#define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
    10791038    ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
    1080 #endif
    1081 
    1082 #ifndef IEM_WITH_SETJMP
    1083 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
    1084     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u, (a_iSeg), (a_GCPtrMem)))
    1085 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
    1086     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).u, (a_iSeg), (a_GCPtrMem)))
    1087 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
    1088     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
    1089 # define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
    1090     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataD80(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem)))
    1091 #else
    1092 # define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
     1039
     1040#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
    10931041    ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1094 # define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
     1042#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
    10951043    ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1096 # define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
     1044#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
    10971045    iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
    1098 # define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
     1046#define IEM_MC_FETCH_MEM_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
    10991047    iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
    11001048
    1101 # define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
     1049#define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
    11021050    ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1103 # define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
     1051#define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
    11041052    ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
    1105 # define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
     1053#define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
    11061054    iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem))
    1107 # define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
     1055#define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
    11081056    iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem))
    1109 #endif
    1110 
    1111 #ifndef IEM_WITH_SETJMP
    1112 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
    1113     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
    1114 # define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
    1115     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
    1116 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
    1117     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
    1118 
    1119 # define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
    1120     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
    1121 # define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
    1122     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem)))
    1123 
    1124 # define IEM_MC_FETCH_MEM_U128_NO_AC_AND_XREG_U128(a_u128Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1125         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
    1126         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1127         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1128     } while (0)
    1129 
    1130 # define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1131         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2))); \
    1132         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1133         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1134     } while (0)
    1135 
    1136 # define IEM_MC_FETCH_MEM_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1137         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128NoAc(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2))); \
    1138         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1139         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1140     } while (0)
    1141 
    1142 # define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do { \
    1143         (a_Dst).uSrc2.uXmm.au64[0] = 0; \
    1144         (a_Dst).uSrc2.uXmm.au64[1] = 0; \
    1145         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_Dst).uSrc2.uXmm.au32[(a_iDWord2)], (a_iSeg2), (a_GCPtrMem2))); \
    1146         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1147         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1148     } while (0)
    1149 
    1150 # define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do { \
    1151         (a_Dst).uSrc2.uXmm.au64[1] = 0; \
    1152         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_Dst).uSrc2.uXmm.au64[(a_iQWord2)], (a_iSeg2), (a_GCPtrMem2))); \
    1153         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1154         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1155     } while (0)
    1156 
    1157 # define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1158         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
    1159         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1160         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1161         (a_Dst).u64Rax        = pVCpu->cpum.GstCtx.rax; \
    1162         (a_Dst).u64Rdx        = pVCpu->cpum.GstCtx.rdx; \
    1163     } while (0)
    1164 # define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1165         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2))); \
    1166         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1167         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1168         (a_Dst).u64Rax        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
    1169         (a_Dst).u64Rdx        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
    1170     } while (0)
    1171 
    1172 #else
    1173 # define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
     1057
     1058#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
    11741059    iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
    1175 # define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
     1060#define IEM_MC_FETCH_MEM_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
    11761061    iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
    1177 # define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
     1062#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
    11781063    iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
    11791064
    1180 # define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
     1065#define IEM_MC_FETCH_MEM_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
    11811066    iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
    1182 # define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
     1067#define IEM_MC_FETCH_MEM_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
    11831068    iemMemFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
    1184 # define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
     1069#define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
    11851070    iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
    11861071
    1187 # define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
     1072#define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
    11881073    iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
    1189 # define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
     1074#define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
    11901075    iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
    1191 # define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
     1076#define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
    11921077    iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
    11931078
    1194 # define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
     1079#define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
    11951080    iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
    1196 # define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
     1081#define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
    11971082    iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
    1198 # define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
     1083#define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
    11991084    iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
    12001085
    1201 # define IEM_MC_FETCH_MEM_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
     1086#define IEM_MC_FETCH_MEM_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    12021087        iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
    12031088        (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    12041089        (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    12051090    } while (0)
    1206 # define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
     1091#define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    12071092        iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
    12081093        (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12101095    } while (0)
    12111096
    1212 # define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
     1097#define IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    12131098        iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
    12141099        (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12161101    } while (0)
    12171102
    1218 # define IEM_MC_FETCH_MEM_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
     1103#define IEM_MC_FETCH_MEM_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    12191104        iemMemFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
    12201105        (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12221107    } while (0)
    12231108
    1224 # define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
     1109#define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    12251110        iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
    12261111        (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12281113    } while (0)
    12291114
    1230 # define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
     1115#define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    12311116        iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
    12321117        (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12341119    } while (0)
    12351120
    1236 # define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do {  \
     1121#define IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do {  \
    12371122        (a_Dst).uSrc2.uXmm.au64[0] = 0; \
    12381123        (a_Dst).uSrc2.uXmm.au64[1] = 0; \
     
    12411126        (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    12421127    } while (0)
    1243 # define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \
     1128#define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \
    12441129        (a_Dst).uSrc2.uXmm.au64[0] = 0; \
    12451130        (a_Dst).uSrc2.uXmm.au64[1] = 0; \
     
    12491134    } while (0)
    12501135
    1251 # define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do {  \
     1136#define IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do {  \
    12521137        (a_Dst).uSrc2.uXmm.au64[!(a_iQWord2)] = 0; \
    12531138        (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)]  = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
     
    12551140        (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    12561141    } while (0)
    1257 # define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do {  \
     1142#define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do {  \
    12581143        (a_Dst).uSrc2.uXmm.au64[1] = 0; \
    12591144        (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)]  = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem2)); \
     
    12631148
    12641149
    1265 # define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
     1150#define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    12661151        iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
    12671152        (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12701155        (a_Dst).u64Rdx        = pVCpu->cpum.GstCtx.rdx; \
    12711156    } while (0)
    1272 # define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
     1157#define IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    12731158        iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
    12741159        (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12781163    } while (0)
    12791164
    1280 # define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
     1165#define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    12811166        iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
    12821167        (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12851170        (a_Dst).u64Rdx        = pVCpu->cpum.GstCtx.rdx; \
    12861171    } while (0)
    1287 # define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
     1172#define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    12881173        iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
    12891174        (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
     
    12931178    } while (0)
    12941179
    1295 #endif
    1296 
    1297 #ifndef IEM_WITH_SETJMP
    1298 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
    1299     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
    1300 # define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
    1301     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
    1302 # define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
    1303     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
    1304 
    1305 # define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
    1306     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
    1307 # define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
    1308     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
    1309 # define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
    1310     IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedAvx(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem)))
    1311 
    1312 # define IEM_MC_FETCH_MEM_YMM_NO_AC_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_iSeg2, a_GCPtrMem2) do { \
    1313         uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \
    1314         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256NoAc(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_iSeg2), (a_GCPtrMem2))); \
    1315         (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[0]; \
    1316         (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[1]; \
    1317         (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[0]; \
    1318         (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[1]; \
    1319     } while (0)
    1320 
    1321 #else
    1322 # define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
     1180
     1181#define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
    13231182    iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
    1324 # define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
     1183#define IEM_MC_FETCH_MEM_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
    13251184    iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
    1326 # define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
     1185#define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
    13271186    iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
    13281187
    1329 # define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
     1188#define IEM_MC_FETCH_MEM_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
    13301189    iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
    1331 # define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
     1190#define IEM_MC_FETCH_MEM_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
    13321191    iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
    1333 # define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
     1192#define IEM_MC_FETCH_MEM_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
    13341193    iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
    13351194
    1336 # define IEM_MC_FETCH_MEM_YMM_NO_AC_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_iSeg2, a_GCPtrMem2) do { \
     1195#define IEM_MC_FETCH_MEM_YMM_NO_AC_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_iSeg2, a_GCPtrMem2) do { \
    13371196        uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \
    13381197        iemMemFetchDataU256NoAcJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_iSeg2), (a_GCPtrMem2)); \
     
    13431202    } while (0)
    13441203
    1345 # define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
     1204#define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
    13461205    iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
    1347 # define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
     1206#define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
    13481207    iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
    1349 # define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
     1208#define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
    13501209    iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
    13511210
    1352 # define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
     1211#define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
    13531212    iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
    1354 # define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
     1213#define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
    13551214    iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
    1356 # define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
     1215#define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
    13571216    iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
    13581217
    1359 # define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_GCPtrMem2) do { \
     1218#define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_GCPtrMem2) do { \
    13601219        uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \
    13611220        iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_GCPtrMem2)); \
     
    13661225    } while (0)
    13671226
    1368 #endif
    1369 
    1370 
    1371 
    1372 #ifndef IEM_WITH_SETJMP
    1373 # define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
    1374     do { \
    1375         uint8_t u8Tmp; \
    1376         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
    1377         (a_u16Dst) = u8Tmp; \
    1378     } while (0)
    1379 # define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    1380     do { \
    1381         uint8_t u8Tmp; \
    1382         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
    1383         (a_u32Dst) = u8Tmp; \
    1384     } while (0)
    1385 # define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1386     do { \
    1387         uint8_t u8Tmp; \
    1388         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
    1389         (a_u64Dst) = u8Tmp; \
    1390     } while (0)
    1391 # define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    1392     do { \
    1393         uint16_t u16Tmp; \
    1394         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
    1395         (a_u32Dst) = u16Tmp; \
    1396     } while (0)
    1397 # define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1398     do { \
    1399         uint16_t u16Tmp; \
    1400         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
    1401         (a_u64Dst) = u16Tmp; \
    1402     } while (0)
    1403 # define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1404     do { \
    1405         uint32_t u32Tmp; \
    1406         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
    1407         (a_u64Dst) = u32Tmp; \
    1408     } while (0)
    1409 #else  /* IEM_WITH_SETJMP */
    1410 # define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
     1227
     1228
     1229#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
    14111230    ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1412 # define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     1231#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    14131232    ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1414 # define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1233#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    14151234    ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1416 # define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     1235#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    14171236    ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1418 # define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1237#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    14191238    ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1420 # define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1239#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    14211240    ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    14221241
    1423 # define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
     1242#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16(a_u16Dst, a_GCPtrMem) \
    14241243    ((a_u16Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    1425 # define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
     1244#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32(a_u32Dst, a_GCPtrMem) \
    14261245    ((a_u32Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    1427 # define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
     1246#define IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64(a_u64Dst, a_GCPtrMem) \
    14281247    ((a_u64Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    1429 # define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
     1248#define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32(a_u32Dst, a_GCPtrMem) \
    14301249    ((a_u32Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    1431 # define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
     1250#define IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64(a_u64Dst, a_GCPtrMem) \
    14321251    ((a_u64Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    1433 # define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
     1252#define IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64(a_u64Dst, a_GCPtrMem) \
    14341253    ((a_u64Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1435 #endif /* IEM_WITH_SETJMP */
    1436 
    1437 #ifndef IEM_WITH_SETJMP
    1438 # define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
    1439     do { \
    1440         uint8_t u8Tmp; \
    1441         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
    1442         (a_u16Dst) = (int8_t)u8Tmp; \
    1443     } while (0)
    1444 # define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    1445     do { \
    1446         uint8_t u8Tmp; \
    1447         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
    1448         (a_u32Dst) = (int8_t)u8Tmp; \
    1449     } while (0)
    1450 # define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1451     do { \
    1452         uint8_t u8Tmp; \
    1453         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
    1454         (a_u64Dst) = (int8_t)u8Tmp; \
    1455     } while (0)
    1456 # define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    1457     do { \
    1458         uint16_t u16Tmp; \
    1459         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
    1460         (a_u32Dst) = (int16_t)u16Tmp; \
    1461     } while (0)
    1462 # define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1463     do { \
    1464         uint16_t u16Tmp; \
    1465         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
    1466         (a_u64Dst) = (int16_t)u16Tmp; \
    1467     } while (0)
    1468 # define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1469     do { \
    1470         uint32_t u32Tmp; \
    1471         IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
    1472         (a_u64Dst) = (int32_t)u32Tmp; \
    1473     } while (0)
    1474 #else  /* IEM_WITH_SETJMP */
    1475 # define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
     1254
     1255#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
    14761256    ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1477 # define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     1257#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    14781258    ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1479 # define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1259#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    14801260    ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1481 # define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
     1261#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    14821262    ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1483 # define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1263#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    14841264    ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1485 # define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
     1265#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    14861266    ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    14871267
    1488 # define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
     1268#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U16(a_u16Dst, a_GCPtrMem) \
    14891269    ((a_u16Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    1490 # define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
     1270#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U32(a_u32Dst, a_GCPtrMem) \
    14911271    ((a_u32Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    1492 # define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
     1272#define IEM_MC_FETCH_MEM_FLAT_U8_SX_U64(a_u64Dst, a_GCPtrMem) \
    14931273    ((a_u64Dst) = (int8_t)iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    1494 # define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
     1274#define IEM_MC_FETCH_MEM_FLAT_U16_SX_U32(a_u32Dst, a_GCPtrMem) \
    14951275    ((a_u32Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    1496 # define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
     1276#define IEM_MC_FETCH_MEM_FLAT_U16_SX_U64(a_u64Dst, a_GCPtrMem) \
    14971277    ((a_u64Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    1498 # define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
     1278#define IEM_MC_FETCH_MEM_FLAT_U32_SX_U64(a_u64Dst, a_GCPtrMem) \
    14991279    ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1500 #endif /* IEM_WITH_SETJMP */
    1501 
    1502 #ifndef IEM_WITH_SETJMP
    1503 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
    1504     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
    1505 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
    1506     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
    1507 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
    1508     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
    1509 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
    1510     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
    1511 #else
    1512 # define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
     1280
     1281#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
    15131282    iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
    1514 # define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
     1283#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
    15151284    iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
    1516 # define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
     1285#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
    15171286    iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
    1518 # define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
     1287#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
    15191288    iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
    15201289
    1521 # define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
     1290#define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
    15221291    iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
    1523 # define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
     1292#define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
    15241293    iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
    1525 # define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
     1294#define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
    15261295    iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
    1527 # define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
     1296#define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
    15281297    iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
    1529 #endif
    1530 
    1531 #ifndef IEM_WITH_SETJMP
    1532 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
    1533     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
    1534 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
    1535     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
    1536 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
    1537     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
    1538 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
    1539     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
    1540 #else
    1541 # define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
     1298
     1299#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
    15421300    iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
    1543 # define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
     1301#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
    15441302    iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
    1545 # define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
     1303#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
    15461304    iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
    1547 # define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
     1305#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
    15481306    iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
    15491307
    1550 # define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
     1308#define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
    15511309    iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
    1552 # define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
     1310#define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
    15531311    iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
    1554 # define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
     1312#define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
    15551313    iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
    1556 # define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
     1314#define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
    15571315    iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
    1558 #endif
    15591316
    15601317#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst,  a_i8C)     *(a_pi8Dst)  = (a_i8C)
     
    15751332    } while (0)
    15761333
    1577 #ifndef IEM_WITH_SETJMP
    1578 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
    1579     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
    1580 # define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
    1581     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value)))
    1582 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
    1583     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
    1584 #else
    1585 # define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
     1334#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
    15861335    iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
    1587 # define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
     1336#define IEM_MC_STORE_MEM_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
    15881337    iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
    1589 # define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
     1338#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
    15901339    iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
    15911340
    1592 # define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
     1341#define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
    15931342    iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
    1594 # define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
     1343#define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
    15951344    iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
    1596 # define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
     1345#define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
    15971346    iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
    1598 #endif
    1599 
    1600 #ifndef IEM_WITH_SETJMP
    1601 # define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
    1602     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
    1603 # define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
    1604     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256NoAc(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
    1605 # define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
    1606     IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
    1607 #else
    1608 # define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
     1347
     1348#define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
    16091349    iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    1610 # define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
     1350#define IEM_MC_STORE_MEM_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
    16111351    iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    1612 # define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
     1352#define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
    16131353    iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    16141354
    1615 # define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
     1355#define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
    16161356    iemMemFlatStoreDataU256Jmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
    1617 # define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
     1357#define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
    16181358    iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
    1619 # define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
     1359#define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
    16201360    iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
    1621 #endif
    16221361
    16231362/* Regular stack push and pop: */
    1624 #ifndef IEM_WITH_SETJMP
    1625 # define IEM_MC_PUSH_U16(a_u16Value)            IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
    1626 # define IEM_MC_PUSH_U32(a_u32Value)            IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
    1627 # define IEM_MC_PUSH_U32_SREG(a_uSegVal)        IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
    1628 # define IEM_MC_PUSH_U64(a_u64Value)            IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
    1629 
    1630 # define IEM_MC_POP_GREG_U16(a_iGReg)           IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
    1631 # define IEM_MC_POP_GREG_U32(a_iGReg)           IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
    1632 # define IEM_MC_POP_GREG_U64(a_iGReg)           IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
    1633 #else
    1634 # define IEM_MC_PUSH_U16(a_u16Value)            iemMemStackPushU16Jmp(pVCpu, (a_u16Value))
    1635 # define IEM_MC_PUSH_U32(a_u32Value)            iemMemStackPushU32Jmp(pVCpu, (a_u32Value))
    1636 # define IEM_MC_PUSH_U32_SREG(a_uSegVal)        iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))
    1637 # define IEM_MC_PUSH_U64(a_u64Value)            iemMemStackPushU64Jmp(pVCpu, (a_u64Value))
    1638 
    1639 # define IEM_MC_POP_GREG_U16(a_iGReg)           iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg))
    1640 # define IEM_MC_POP_GREG_U32(a_iGReg)           iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg))
    1641 # define IEM_MC_POP_GREG_U64(a_iGReg)           iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg))
    1642 #endif
     1363#define IEM_MC_PUSH_U16(a_u16Value)             iemMemStackPushU16Jmp(pVCpu, (a_u16Value))
     1364#define IEM_MC_PUSH_U32(a_u32Value)             iemMemStackPushU32Jmp(pVCpu, (a_u32Value))
     1365#define IEM_MC_PUSH_U32_SREG(a_uSegVal)         iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))
     1366#define IEM_MC_PUSH_U64(a_u64Value)             iemMemStackPushU64Jmp(pVCpu, (a_u64Value))
     1367
     1368#define IEM_MC_POP_GREG_U16(a_iGReg)            iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg))
     1369#define IEM_MC_POP_GREG_U32(a_iGReg)            iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg))
     1370#define IEM_MC_POP_GREG_U64(a_iGReg)            iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg))
    16431371
    16441372/* 32-bit flat stack push and pop: */
    1645 #ifndef IEM_WITH_SETJMP
    1646 # define IEM_MC_FLAT32_PUSH_U16(a_u16Value)     IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
    1647 # define IEM_MC_FLAT32_PUSH_U32(a_u32Value)     IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
    1648 # define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_uSegVal)))
    1649 
    1650 # define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg)    IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
    1651 # define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg)    IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU32(pVCpu, (a_iGReg)))
    1652 #else
    1653 # define IEM_MC_FLAT32_PUSH_U16(a_u16Value)     iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value))
    1654 # define IEM_MC_FLAT32_PUSH_U32(a_u32Value)     iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value))
    1655 # define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal) iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal))
    1656 
    1657 # define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg)    iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg))
    1658 # define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg)    iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg))
    1659 #endif
     1373#define IEM_MC_FLAT32_PUSH_U16(a_u16Value)      iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value))
     1374#define IEM_MC_FLAT32_PUSH_U32(a_u32Value)      iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value))
     1375#define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal)  iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal))
     1376
     1377#define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg)     iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg))
     1378#define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg)     iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg))
    16601379
    16611380/* 64-bit flat stack push and pop: */
    1662 #ifndef IEM_WITH_SETJMP
    1663 # define IEM_MC_FLAT64_PUSH_U16(a_u16Value)     IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
    1664 # define IEM_MC_FLAT64_PUSH_U64(a_u64Value)     IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
    1665 
    1666 # define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg)    IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU16(pVCpu, (a_iGReg)))
    1667 # define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg)    IEM_MC_RETURN_ON_FAILURE(iemMemStackPopGRegU64(pVCpu, (a_iGReg)))
    1668 #else
    1669 # define IEM_MC_FLAT64_PUSH_U16(a_u16Value)     iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value))
    1670 # define IEM_MC_FLAT64_PUSH_U64(a_u64Value)     iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value))
    1671 
    1672 # define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg)    iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
    1673 # define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg)    iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
    1674 #endif
     1381#define IEM_MC_FLAT64_PUSH_U16(a_u16Value)      iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value))
     1382#define IEM_MC_FLAT64_PUSH_U64(a_u64Value)      iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value))
     1383
     1384#define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg)     iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
     1385#define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg)     iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
    16751386
    16761387
     
    16881399 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    16891400 */
    1690 #ifndef IEM_WITH_SETJMP
    1691 # define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1692     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
    1693                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
    1694 #else
    1695 # define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1401#define IEM_MC_MEM_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    16961402    (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1697 #endif
    16981403
    16991404/**
     
    17071412 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    17081413 */
    1709 #ifndef IEM_WITH_SETJMP
    1710 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1711     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
    1712                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
    1713 #else
    1714 # define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1414#define IEM_MC_MEM_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    17151415    (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1716 #endif
    17171416
    17181417/**
     
    17261425 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    17271426 */
    1728 #ifndef IEM_WITH_SETJMP
    1729 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1730     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
    1731                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
    1732 #else
    1733 # define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1427#define IEM_MC_MEM_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    17341428    (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1735 #endif
    17361429
    17371430/**
     
    17451438 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    17461439 */
    1747 #ifndef IEM_WITH_SETJMP
    1748 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1749     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), (a_iSeg), \
    1750                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
    1751 #else
    1752 # define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1440#define IEM_MC_MEM_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    17531441    (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1754 #endif
    17551442
    17561443/**
     
    17641451 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    17651452 */
    1766 #ifndef IEM_WITH_SETJMP
    1767 # define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1768     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
    1769                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, 0))
    1770 #else
    1771 # define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     1453#define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    17721454    (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1773 #endif
    17741455
    17751456/**
     
    17831464 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    17841465 */
    1785 #ifndef IEM_WITH_SETJMP
    1786 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1787     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
    1788                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, 0))
    1789 #else
    1790 # define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     1466#define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    17911467    (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1792 #endif
    17931468
    17941469/**
     
    18021477 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    18031478 */
    1804 #ifndef IEM_WITH_SETJMP
    1805 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1806     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
    1807                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, 0))
    1808 #else
    1809 # define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     1479#define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    18101480    (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1811 #endif
    18121481
    18131482/**
     
    18211490 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    18221491 */
    1823 #ifndef IEM_WITH_SETJMP
    1824 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1825     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu8Mem), &(a_bUnmapInfo), sizeof(uint8_t), UINT8_MAX, \
    1826                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, 0))
    1827 #else
    1828 # define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
     1492#define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    18291493    (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1830 #endif
    18311494
    18321495
     
    18431506 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    18441507 */
    1845 #ifndef IEM_WITH_SETJMP
    1846 # define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1847     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
    1848                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
    1849 #else
    1850 # define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1508#define IEM_MC_MEM_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    18511509    (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1852 #endif
    18531510
    18541511/**
     
    18621519 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    18631520 */
    1864 #ifndef IEM_WITH_SETJMP
    1865 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1866     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
    1867                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
    1868 #else
    1869 # define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1521#define IEM_MC_MEM_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    18701522    (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1871 #endif
    18721523
    18731524/**
     
    18811532 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    18821533 */
    1883 #ifndef IEM_WITH_SETJMP
    1884 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1885     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
    1886                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
    1887 #else
    1888 # define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1534#define IEM_MC_MEM_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    18891535    (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1890 #endif
    18911536
    18921537/**
     
    19001545 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    19011546 */
    1902 #ifndef IEM_WITH_SETJMP
    1903 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1904     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), (a_iSeg), \
    1905                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
    1906 #else
    1907 # define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1547#define IEM_MC_MEM_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    19081548    (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1909 #endif
    19101549
    19111550/**
     
    19191558 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    19201559 */
    1921 #ifndef IEM_WITH_SETJMP
    1922 # define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1923     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
    1924                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint16_t) - 1))
    1925 #else
    1926 # define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1560#define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    19271561    (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1928 #endif
    19291562
    19301563/**
     
    19381571 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    19391572 */
    1940 #ifndef IEM_WITH_SETJMP
    1941 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1942     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
    1943                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint16_t) - 1))
    1944 #else
    1945 # define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1573#define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    19461574    (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1947 #endif
    19481575
    19491576/**
     
    19571584 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    19581585 */
    1959 #ifndef IEM_WITH_SETJMP
    1960 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1961     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
    1962                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint16_t) - 1))
    1963 #else
    1964 # define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1586#define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    19651587    (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1966 #endif
    19671588
    19681589/**
     
    19761597 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    19771598 */
    1978 #ifndef IEM_WITH_SETJMP
    1979 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1980    IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu16Mem), &(a_bUnmapInfo), sizeof(uint16_t), UINT8_MAX, \
    1981                                       (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint16_t) - 1))
    1982 #else
    1983 # define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1599#define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    19841600    (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1985 #endif
    19861601
    19871602/** int16_t alias. */
    1988 #ifndef IEM_WITH_SETJMP
    1989 # define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1990          IEM_MC_MEM_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
    1991 #else
    1992 # define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1603#define IEM_MC_MEM_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    19931604    (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1994 #endif
    19951605
    19961606/** Flat int16_t alias. */
    1997 #ifndef IEM_WITH_SETJMP
    1998 # define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1999          IEM_MC_MEM_FLAT_MAP_U16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem)
    2000 #else
    2001 # define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
     1607#define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
    20021608    (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2003 #endif
    20041609
    20051610
     
    20161621 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    20171622 */
    2018 #ifndef IEM_WITH_SETJMP
    2019 # define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2020     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
    2021                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
    2022 #else
    2023 # define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1623#define IEM_MC_MEM_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    20241624    (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2025 #endif
    20261625
    20271626/**
     
    20351634 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    20361635 */
    2037 #ifndef IEM_WITH_SETJMP
    2038 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2039     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
    2040                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
    2041 #else
    2042 # define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1636#define IEM_MC_MEM_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    20431637    (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2044 #endif
    20451638
    20461639/**
     
    20541647 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    20551648 */
    2056 #ifndef IEM_WITH_SETJMP
    2057 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2058     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
    2059                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
    2060 #else
    2061 # define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1649#define IEM_MC_MEM_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    20621650    (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2063 #endif
    20641651
    20651652/**
     
    20731660 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    20741661 */
    2075 #ifndef IEM_WITH_SETJMP
    2076 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2077     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), (a_iSeg), \
    2078                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
    2079 #else
    2080 # define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1662#define IEM_MC_MEM_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    20811663    (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2082 #endif
    20831664
    20841665/**
     
    20921673 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    20931674 */
    2094 #ifndef IEM_WITH_SETJMP
    2095 # define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    2096     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
    2097                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint32_t) - 1))
    2098 #else
    2099 # define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1675#define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    21001676    (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2101 #endif
    21021677
    21031678/**
     
    21111686 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    21121687 */
    2113 #ifndef IEM_WITH_SETJMP
    2114 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    2115     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
    2116                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint32_t) - 1))
    2117 #else
    2118 # define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1688#define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    21191689    (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2120 #endif
    21211690
    21221691/**
     
    21301699 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    21311700 */
    2132 #ifndef IEM_WITH_SETJMP
    2133 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    2134     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
    2135                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint32_t) - 1))
    2136 #else
    2137 # define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1701#define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    21381702    (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2139 #endif
    21401703
    21411704/**
     
    21491712 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    21501713 */
    2151 #ifndef IEM_WITH_SETJMP
    2152 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    2153     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu32Mem), &(a_bUnmapInfo), sizeof(uint32_t), UINT8_MAX, \
    2154                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint32_t) - 1))
    2155 #else
    2156 # define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1714#define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    21571715    (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2158 #endif
    21591716
    21601717/** int32_t alias. */
    2161 #ifndef IEM_WITH_SETJMP
    2162 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2163          IEM_MC_MEM_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
    2164 #else
    2165 # define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1718#define IEM_MC_MEM_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    21661719    (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2167 #endif
    21681720
    21691721/** Flat int32_t alias. */
    2170 #ifndef IEM_WITH_SETJMP
    2171 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
    2172          IEM_MC_MEM_FLAT_MAP_U32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem)
    2173 #else
    2174 # define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1722#define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
    21751723    (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2176 #endif
    21771724
    21781725/** RTFLOAT32U alias. */
    2179 #ifndef IEM_WITH_SETJMP
    2180 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2181          IEM_MC_MEM_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
    2182 #else
    2183 # define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1726#define IEM_MC_MEM_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    21841727    (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2185 #endif
    21861728
    21871729/** Flat RTFLOAT32U alias. */
    2188 #ifndef IEM_WITH_SETJMP
    2189 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
    2190          IEM_MC_MEM_FLAT_MAP_U32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem)
    2191 #else
    2192 # define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
     1730#define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
    21931731    (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2194 #endif
    21951732
    21961733
     
    22071744 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    22081745 */
    2209 #ifndef IEM_WITH_SETJMP
    2210 # define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2211     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
    2212                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
    2213 #else
    2214 # define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1746#define IEM_MC_MEM_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    22151747    (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2216 #endif
    22171748
    22181749/**
     
    22261757 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    22271758 */
    2228 #ifndef IEM_WITH_SETJMP
    2229 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2230     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
    2231                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
    2232 #else
    2233 # define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1759#define IEM_MC_MEM_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    22341760    (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2235 #endif
    22361761
    22371762/**
     
    22451770 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    22461771 */
    2247 #ifndef IEM_WITH_SETJMP
    2248 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2249     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
    2250                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    2251 #else
    2252 # define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1772#define IEM_MC_MEM_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    22531773    (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2254 #endif
    22551774
    22561775/**
     
    22641783 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    22651784 */
    2266 #ifndef IEM_WITH_SETJMP
    2267 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2268     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), (a_iSeg), \
    2269                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
    2270 #else
    2271 # define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1785#define IEM_MC_MEM_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    22721786    (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2273 #endif
    22741787
    22751788/**
     
    22831796 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    22841797 */
    2285 #ifndef IEM_WITH_SETJMP
    2286 # define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    2287     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
    2288                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(uint64_t) - 1))
    2289 #else
    2290 # define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     1798#define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    22911799    (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2292 #endif
    22931800
    22941801/**
     
    23021809 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    23031810 */
    2304 #ifndef IEM_WITH_SETJMP
    2305 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    2306     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
    2307                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(uint64_t) - 1))
    2308 #else
    2309 # define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     1811#define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    23101812    (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2311 #endif
    23121813
    23131814/**
     
    23211822 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    23221823 */
    2323 #ifndef IEM_WITH_SETJMP
    2324 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    2325     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
    2326                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    2327 #else
    2328 # define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     1824#define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    23291825    (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2330 #endif
    23311826
    23321827/**
     
    23401835 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    23411836 */
    2342 #ifndef IEM_WITH_SETJMP
    2343 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    2344     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu64Mem), &(a_bUnmapInfo), sizeof(uint64_t), UINT8_MAX, \
    2345                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(uint64_t) - 1))
    2346 #else
    2347 # define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
     1837#define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    23481838    (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2349 #endif
    23501839
    23511840/** int64_t alias. */
    2352 #ifndef IEM_WITH_SETJMP
    2353 # define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2354          IEM_MC_MEM_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
    2355 #else
    2356 # define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1841#define IEM_MC_MEM_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    23571842    (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2358 #endif
    23591843
    23601844/** Flat int64_t alias. */
    2361 #ifndef IEM_WITH_SETJMP
    2362 # define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
    2363          IEM_MC_MEM_FLAT_MAP_U64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem)
    2364 #else
    2365 # define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
     1845#define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
    23661846    (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2367 #endif
    23681847
    23691848/** RTFLOAT64U alias. */
    2370 #ifndef IEM_WITH_SETJMP
    2371 # define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2372          IEM_MC_MEM_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem)
    2373 #else
    2374 # define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1849#define IEM_MC_MEM_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    23751850    (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2376 #endif
    23771851
    23781852/** Flat RTFLOAT64U alias. */
    2379 #ifndef IEM_WITH_SETJMP
    2380 # define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
    2381          IEM_MC_MEM_FLAT_MAP_U64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem)
    2382 #else
    2383 # define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
     1853#define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
    23841854    (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2385 #endif
    23861855
    23871856
     
    23981867 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    23991868 */
    2400 #ifndef IEM_WITH_SETJMP
    2401 # define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2402     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
    2403                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128U) - 1))
    2404 #else
    2405 # define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1869#define IEM_MC_MEM_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    24061870    (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2407 #endif
    24081871
    24091872/**
     
    24171880 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    24181881 */
    2419 #ifndef IEM_WITH_SETJMP
    2420 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2421     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128U), (a_iSeg), \
    2422                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128U) - 1))
    2423 #else
    2424 # define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1882#define IEM_MC_MEM_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    24251883    (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2426 #endif
    24271884
    24281885/**
     
    24361893 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    24371894 */
    2438 #ifndef IEM_WITH_SETJMP
    2439 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2440     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
    2441                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
    2442 #else
    2443 # define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1895#define IEM_MC_MEM_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    24441896    (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2445 #endif
    24461897
    24471898/**
     
    24551906 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    24561907 */
    2457 #ifndef IEM_WITH_SETJMP
    2458 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2459     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), (a_iSeg), \
    2460                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
    2461 #else
    2462 # define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1908#define IEM_MC_MEM_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    24631909    (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2464 #endif
    24651910
    24661911/**
     
    24741919 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    24751920 */
    2476 #ifndef IEM_WITH_SETJMP
    2477 # define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    2478     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
    2479                                        (a_GCPtrMem), IEM_ACCESS_DATA_ATOMIC, sizeof(RTUINT128) - 1))
    2480 #else
    2481 # define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     1921#define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    24821922    (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2483 #endif
    24841923
    24851924/**
     
    24931932 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    24941933 */
    2495 #ifndef IEM_WITH_SETJMP
    2496 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    2497     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
    2498                                        (a_GCPtrMem), IEM_ACCESS_DATA_RW, sizeof(RTUINT128) - 1))
    2499 #else
    2500 # define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     1934#define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    25011935    (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2502 #endif
    25031936
    25041937/**
     
    25121945 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    25131946 */
    2514 #ifndef IEM_WITH_SETJMP
    2515 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    2516     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
    2517                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(RTUINT128) - 1))
    2518 #else
    2519 # define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     1947#define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    25201948    (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2521 #endif
    25221949
    25231950/**
     
    25311958 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    25321959 */
    2533 #ifndef IEM_WITH_SETJMP
    2534 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    2535     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pu128Mem), &(a_bUnmapInfo), sizeof(RTUINT128), UINT8_MAX, \
    2536                                        (a_GCPtrMem), IEM_ACCESS_DATA_R, sizeof(RTUINT128) - 1))
    2537 #else
    2538 # define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
     1960#define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    25391961    (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2540 #endif
    25411962
    25421963
     
    25531974 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    25541975 */
    2555 #ifndef IEM_WITH_SETJMP
    2556 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2557     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
    2558                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    2559 #else
    2560 # define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     1976#define IEM_MC_MEM_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    25611977    (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2562 #endif
    25631978
    25641979/**
     
    25711986 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    25721987 */
    2573 #ifndef IEM_WITH_SETJMP
    2574 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
    2575     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pr80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
    2576                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    2577 #else
    2578 # define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
     1988#define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
    25791989    (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2580 #endif
    25811990
    25821991
     
    25912000 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    25922001 */
    2593 #ifndef IEM_WITH_SETJMP
    2594 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    2595     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), (a_iSeg), \
    2596                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    2597 #else
    2598 # define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
     2002#define IEM_MC_MEM_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    25992003    (a_pd80Mem) = iemMemMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    2600 #endif
    26012004
    26022005/**
     
    26092012 * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    26102013 */
    2611 #ifndef IEM_WITH_SETJMP
    2612 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
    2613     IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pd80Mem), &(a_bUnmapInfo), sizeof(RTFLOAT80U), UINT8_MAX, \
    2614                                        (a_GCPtrMem), IEM_ACCESS_DATA_W, sizeof(uint64_t) - 1))
    2615 #else
    2616 # define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
     2014#define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
    26172015    (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2618 #endif
    26192016
    26202017
     
    26262023 * @note        Implictly frees the a_bMapInfo variable.
    26272024 */
    2628 #ifndef IEM_WITH_SETJMP
    2629 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo)         IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
    2630 #else
    2631 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo)         iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
    2632 #endif
     2025#define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo)          iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
    26332026
    26342027/** Commits the memory and unmaps guest memory previously mapped ATOMIC.
     
    26362029 * @note        Implictly frees the a_bMapInfo variable.
    26372030 */
    2638 #ifndef IEM_WITH_SETJMP
    2639 # define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo)     IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
    2640 #else
    2641 # define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo)     iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
    2642 #endif
     2031#define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo)      iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
    26432032
    26442033/** Commits the memory and unmaps guest memory previously mapped W.
     
    26462035 * @note        Implictly frees the a_bMapInfo variable.
    26472036 */
    2648 #ifndef IEM_WITH_SETJMP
    2649 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo)         IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
    2650 #else
    2651 # define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo)         iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
    2652 #endif
     2037#define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo)          iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
    26532038
    26542039/** Commits the memory and unmaps guest memory previously mapped R.
     
    26562041 * @note        Implictly frees the a_bMapInfo variable.
    26572042 */
    2658 #ifndef IEM_WITH_SETJMP
    2659 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo)         IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo))
    2660 #else
    2661 # define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo)         iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
    2662 #endif
     2043#define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo)          iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
    26632044
    26642045
     
    26732054 * @note        Implictly frees both the a_bMapInfo and a_u16FSW variables.
    26742055 */
    2675 #ifndef IEM_WITH_SETJMP
    2676 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
    2677         if (   !(a_u16FSW & X86_FSW_ES) \
    2678             || !(  (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
    2679                  & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
    2680             IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, a_bMapInfo)); \
    2681         else \
    2682             iemMemRollbackAndUnmap(pVCpu, (a_pvMem), IEM_ACCESS_DATA_W); \
    2683     } while (0)
    2684 #else
    2685 # define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
     2056#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
    26862057        if (   !(a_u16FSW & X86_FSW_ES) \
    26872058            || !(  (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
     
    26912062            iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \
    26922063    } while (0)
    2693 #endif
    26942064
    26952065/** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
    26962066 * @note        Implictly frees the a_bMapInfo variable. */
    2697 #ifndef IEM_WITH_SETJMP
    2698 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo)       iemMemRollbackAndUnmap(pVCpu, a_bMapInfo)
    2699 #else
    2700 # define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo)       iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
    2701 #endif
     2067#define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo)        iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
    27022068
    27032069
    27042070
    27052071/** Calculate efficient address from R/M. */
    2706 #ifndef IEM_WITH_SETJMP
    2707 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
    2708     IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff)))
    2709 #else
    2710 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
     2072#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
    27112073    ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
    2712 #endif
    27132074
    27142075
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette