VirtualBox

Changeset 108312 in vbox


Ignore:
Timestamp:
Feb 20, 2025 3:34:57 PM (3 weeks ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167656
Message:

VMM/IEM: Split out the x86 specific MC codes into IEMMc-x86.h. jiraref:VBP-1531

Location:
trunk/src/VBox/VMM
Files:
1 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMMc-x86.h

    r108310 r108312  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - IEM_MC_XXX.
     3 * IEM - Interpreted Execution Manager - IEM_MC_XXX, x86 target.
    44 */
    55
     
    2626 */
    2727
    28 #ifndef VMM_INCLUDED_SRC_include_IEMMc_h
    29 #define VMM_INCLUDED_SRC_include_IEMMc_h
     28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMMc_x86_h
     29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMMc_x86_h
    3030#ifndef RT_WITHOUT_PRAGMA_ONCE
    3131# pragma once
     
    3333
    3434
    35 /** @name   "Microcode" macros.
    36  *
    37  * The idea is that we should be able to use the same code to interpret
    38  * instructions as well as recompiler instructions.  Thus this obfuscation.
    39  *
     35/** @name   "Microcode" macros, x86 specifics and overrides.
    4036 * @{
    4137 */
    4238
    43 #define IEM_MC_BEGIN(a_fMcFlags, a_fCImplFlags) {
    44 #define IEM_MC_END()                            }
    45 
    46 
    47 /** Dummy MC that prevents native recompilation. */
    48 #define IEM_MC_NO_NATIVE_RECOMPILE()                    ((void)0)
    49 
    50 /** Advances RIP, finishes the instruction and returns.
    51  * This may include raising debug exceptions and such. */
     39/*
     40 * We override all the PC updating MCs:
     41 */
     42
     43#undef  IEM_MC_ADVANCE_PC_AND_FINISH
    5244#define IEM_MC_ADVANCE_PC_AND_FINISH()                  return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
    5345
    54 
    55 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     46#undef  IEM_MC_REL_JMP_S8_AND_FINISH
    5647#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
    5748    return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
    58 /** Sets RIP (may trigger \#GP), finishes the instruction and returns.
    59  * @note only usable in 16-bit op size mode.  */
     49
     50/** @note X86: only usable in 16-bit op size mode.  */
     51#undef  IEM_MC_REL_JMP_S16_AND_FINISH
    6052#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
    6153    return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
    62 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     54
     55#undef  IEM_MC_REL_JMP_S32_AND_FINISH
    6356#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
    6457    return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
    65 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     58
     59#undef  IEM_MC_IND_JMP_U16_AND_FINISH
    6660#define IEM_MC_IND_JMP_U16_AND_FINISH(a_u16NewIP) \
    6761    return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), IEM_GET_INSTR_LEN(pVCpu))
    68 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     62
     63#undef  IEM_MC_IND_JMP_U32_AND_FINISH
    6964#define IEM_MC_IND_JMP_U32_AND_FINISH(a_u32NewIP) \
    7065    return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP), IEM_GET_INSTR_LEN(pVCpu))
    71 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     66
     67#undef  IEM_MC_IND_JMP_U64_AND_FINISH
    7268#define IEM_MC_IND_JMP_U64_AND_FINISH(a_u64NewIP) \
    7369    return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP), IEM_GET_INSTR_LEN(pVCpu))
    7470
    75 /** Sets RIP (may trigger \#GP), finishes the instruction and returns.
    76  * @note only usable in 16-bit op size mode.  */
     71/** @note only usable in 16-bit op size mode.  */
     72#undef  IEM_MC_REL_CALL_S16_AND_FINISH
    7773#define IEM_MC_REL_CALL_S16_AND_FINISH(a_i16) \
    7874    return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
    79 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     75
     76#undef  IEM_MC_REL_CALL_S32_AND_FINISH
    8077#define IEM_MC_REL_CALL_S32_AND_FINISH(a_i32) \
    8178    return iemRegEip32RelativeCallS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32))
    82 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     79
     80#undef  IEM_MC_REL_CALL_S64_AND_FINISH
    8381#define IEM_MC_REL_CALL_S64_AND_FINISH(a_i64) \
    8482    return iemRegRip64RelativeCallS64AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i64))
    85 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     83
     84#undef  IEM_MC_IND_CALL_U16_AND_FINISH
    8685#define IEM_MC_IND_CALL_U16_AND_FINISH(a_u16NewIP) \
    8786    return iemRegIp16IndirectCallU16AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u16NewIP))
    88 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     87
     88#undef  IEM_MC_IND_CALL_U32_AND_FINISH
    8989#define IEM_MC_IND_CALL_U32_AND_FINISH(a_u32NewIP) \
    9090    return iemRegEip32IndirectCallU32AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u32NewIP))
    91 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
     91
     92#undef  IEM_MC_IND_CALL_U64_AND_FINISH
    9293#define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP) \
    9394    return iemRegRip64IndirectCallU64AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u64NewIP))
     
    201202
    202203
    203 #define IEM_MC_LOCAL(a_Type, a_Name)                    a_Type a_Name
    204 #define IEM_MC_LOCAL_ASSIGN(a_Type, a_Name, a_Value)    a_Type a_Name = (a_Value)
    205 #define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value)     a_Type const a_Name = (a_Value)
    206 #define IEM_MC_NOREF(a_Name)                            RT_NOREF_PV(a_Name) /* NOP/liveness hack */
    207 #define IEM_MC_ARG(a_Type, a_Name, a_iArg)              a_Type a_Name
    208 #define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg)       a_Type const a_Name = (a_Value)
    209 #define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg)   a_Type const a_Name = &(a_Local)
    210204/** @note IEMAllInstPython.py duplicates the expansion. */
    211205#define IEM_MC_ARG_EFLAGS(a_Name, a_iArg)               uint32_t const a_Name = pVCpu->cpum.GstCtx.eflags.u
     
    230224#define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput)
    231225
    232 /** ASSUMES the source variable not used after this statement. */
    233 #define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) (a_VarDst) = (a_VarSrcEol)
    234 
    235 #define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg)          (a_u8Dst)  = iemGRegFetchU8(pVCpu, (a_iGReg))
    236 #define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg)  (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
    237 #define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg)  (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
    238 #define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg)  (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
    239 #define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg)  (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
    240 #define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg)  (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
    241 #define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg)  (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
    242 #define IEM_MC_FETCH_GREG_I16(a_i16Dst, a_iGReg)        (a_i16Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
    243 #define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg)        (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
    244 #define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
    245 #define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
    246 #define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
    247 #define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
    248 #define IEM_MC_FETCH_GREG_I32(a_i32Dst, a_iGReg)        (a_i32Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
    249 #define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg)        (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
    250 #define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
    251 #define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
    252 #define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg)        (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
    253 #define IEM_MC_FETCH_GREG_U64_ZX_U64                    IEM_MC_FETCH_GREG_U64
    254 #define IEM_MC_FETCH_GREG_PAIR_U32(a_u64Dst, a_iGRegLo, a_iGRegHi) do { \
    255         (a_u64Dst).s.Lo = iemGRegFetchU32(pVCpu, (a_iGRegLo)); \
    256         (a_u64Dst).s.Hi = iemGRegFetchU32(pVCpu, (a_iGRegHi)); \
    257     } while(0)
    258 #define IEM_MC_FETCH_GREG_PAIR_U64(a_u128Dst, a_iGRegLo, a_iGRegHi) do { \
    259         (a_u128Dst).s.Lo = iemGRegFetchU64(pVCpu, (a_iGRegLo)); \
    260         (a_u128Dst).s.Hi = iemGRegFetchU64(pVCpu, (a_iGRegHi)); \
    261     } while(0)
     226
    262227#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
    263228        IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
     
    281246        (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
    282247    } while (0)
     248
    283249/** @note Not for IOPL or IF testing or modification. */
    284250#define IEM_MC_FETCH_EFLAGS(a_EFlags)                   (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
     
    290256#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value)        *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
    291257#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value)      *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
    292 #define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value)      *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
    293 #define IEM_MC_STORE_GREG_I32(a_iGReg, a_i32Value)      *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_i32Value) /* clear high bits. */
    294 #define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value)      *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
    295 #define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value)      *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
    296258#define IEM_MC_STORE_GREG_U8_CONST                      IEM_MC_STORE_GREG_U8
    297259#define IEM_MC_STORE_GREG_U16_CONST                     IEM_MC_STORE_GREG_U16
    298 #define IEM_MC_STORE_GREG_U32_CONST                     IEM_MC_STORE_GREG_U32
    299 #define IEM_MC_STORE_GREG_U64_CONST                     IEM_MC_STORE_GREG_U64
    300 #define IEM_MC_STORE_GREG_PAIR_U32(a_iGRegLo, a_iGRegHi, a_u64Value) do { \
    301         *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint32_t)(a_u64Value).s.Lo; \
    302         *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint32_t)(a_u64Value).s.Hi; \
    303     } while(0)
    304 #define IEM_MC_STORE_GREG_PAIR_U64(a_iGRegLo, a_iGRegHi, a_u128Value) do { \
    305         *iemGRegRefU64(pVCpu, (a_iGRegLo)) = (uint64_t)(a_u128Value).s.Lo; \
    306         *iemGRegRefU64(pVCpu, (a_iGRegHi)) = (uint64_t)(a_u128Value).s.Hi; \
    307     } while(0)
    308 #define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg)             *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
    309260
    310261/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
     
    317268        *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
    318269    } while (0)
     270
    319271#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
    320272    do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
     
    340292#define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) IEM_MC_REF_EFLAGS(a_pEFlags)
    341293
     294/** x86: preserve upper register bits.   */
    342295#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value)        *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
    343 #define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
    344     do { \
    345         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    346         *pu32Reg += (a_u32Value); \
    347         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
    348     } while (0)
    349 #define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value)        *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
    350 
     296
     297/** x86: preserve upper register bits.   */
    351298#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8Const)         *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u8Const)
    352 #define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
    353     do { \
    354         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    355         *pu32Reg -= (a_u8Const); \
    356         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
    357     } while (0)
    358 #define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const)          *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u8Const)
    359299#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const)     do { (a_u16Value) -= a_u16Const; } while (0)
    360300
    361 #define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg)    do { (a_u8Value)  += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
    362 #define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg)  do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
    363 #define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg)  do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
    364 #define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg)  do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
    365 #define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
    366 #define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
    367 #define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
    368 
    369 #define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask)        do { (a_u8Local)  &= (a_u8Mask);  } while (0)
    370 #define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask)     do { (a_u16Local) &= (a_u16Mask); } while (0)
    371 #define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask)     do { (a_u32Local) &= (a_u32Mask); } while (0)
    372 #define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask)     do { (a_u64Local) &= (a_u64Mask); } while (0)
    373 
    374 #define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask)         do { (a_u16Arg) &= (a_u16Mask); } while (0)
    375 #define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask)         do { (a_u32Arg) &= (a_u32Mask); } while (0)
    376 #define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask)         do { (a_u64Arg) &= (a_u64Mask); } while (0)
    377 
    378 #define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask)         do { (a_u8Local)  |= (a_u8Mask);  } while (0)
    379 #define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask)      do { (a_u16Local) |= (a_u16Mask); } while (0)
    380 #define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask)      do { (a_u32Local) |= (a_u32Mask); } while (0)
    381 
    382 #define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift)      do { (a_i16Local) >>= (a_cShift);  } while (0)
    383 #define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift)      do { (a_i32Local) >>= (a_cShift);  } while (0)
    384 #define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift)      do { (a_i64Local) >>= (a_cShift);  } while (0)
    385 
    386 #define IEM_MC_SHR_LOCAL_U8(a_u8Local, a_cShift)        do { (a_u8Local)  >>= (a_cShift);  } while (0)
    387 
    388 #define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift)      do { (a_i16Local) <<= (a_cShift);  } while (0)
    389 #define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift)      do { (a_i32Local) <<= (a_cShift);  } while (0)
    390 #define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift)      do { (a_i64Local) <<= (a_cShift);  } while (0)
    391 
    392 #define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask)     do { (a_u32Local) &= (a_u32Mask); } while (0)
    393 
    394 #define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask)      do { (a_u32Local) |= (a_u32Mask); } while (0)
    395 
     301/** x86: preserve upper register bits.   */
    396302#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value)          *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
     303/** x86: preserve upper register bits.   */
    397304#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value)        *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
    398 #define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
    399     do { \
    400         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    401         *pu32Reg &= (a_u32Value); \
    402         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
    403     } while (0)
    404 #define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value)        *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
    405 
     305
     306/** x86: preserve upper register bits.   */
    406307#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value)           *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
     308/** x86: preserve upper register bits.   */
    407309#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value)         *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
    408 #define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
    409     do { \
    410         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    411         *pu32Reg |= (a_u32Value); \
    412         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
    413     } while (0)
    414 #define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value)         *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
    415 
    416 #define IEM_MC_BSWAP_LOCAL_U16(a_u16Local)              (a_u16Local) = RT_BSWAP_U16((a_u16Local));
    417 #define IEM_MC_BSWAP_LOCAL_U32(a_u32Local)              (a_u32Local) = RT_BSWAP_U32((a_u32Local));
    418 #define IEM_MC_BSWAP_LOCAL_U64(a_u64Local)              (a_u64Local) = RT_BSWAP_U64((a_u64Local));
    419310
    420311/** @note Not for IOPL or IF modification. */
     
    959850    do { IEM_MC_INT_CLEAR_ZMM_256_UP(a_iYReg); } while (0)
    960851
    961 #define IEM_MC_FETCH_MEM_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
    962     ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    963 #define IEM_MC_FETCH_MEM16_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
    964     ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
    965 #define IEM_MC_FETCH_MEM32_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
    966     ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
    967 
    968 #define IEM_MC_FETCH_MEM_FLAT_U8(a_u8Dst, a_GCPtrMem) \
    969     ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem)))
    970 #define IEM_MC_FETCH_MEM16_FLAT_U8(a_u8Dst, a_GCPtrMem16) \
    971     ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem16)))
    972 #define IEM_MC_FETCH_MEM32_FLAT_U8(a_u8Dst, a_GCPtrMem32) \
    973     ((a_u8Dst) = iemMemFlatFetchDataU8Jmp(pVCpu, (a_GCPtrMem32)))
    974 
    975 #define IEM_MC_FETCH_MEM_SEG_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
    976     ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    977 #define IEM_MC_FETCH_MEM_SEG_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    978     ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    979 #define IEM_MC_FETCH_MEM_SEG_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
    980     ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    981 #define IEM_MC_FETCH_MEM_SEG_I16_DISP(a_i16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    982     ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    983 
    984 #define IEM_MC_FETCH_MEM_FLAT_U16(a_u16Dst, a_GCPtrMem) \
    985     ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    986 #define IEM_MC_FETCH_MEM_FLAT_U16_DISP(a_u16Dst, a_GCPtrMem, a_offDisp) \
    987     ((a_u16Dst) = iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    988 #define IEM_MC_FETCH_MEM_FLAT_I16(a_i16Dst, a_GCPtrMem) \
    989     ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem)))
    990 #define IEM_MC_FETCH_MEM_FLAT_I16_DISP(a_i16Dst, a_GCPtrMem, a_offDisp) \
    991     ((a_i16Dst) = (int16_t)iemMemFlatFetchDataU16Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    992 
    993 #define IEM_MC_FETCH_MEM_SEG_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
    994     ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    995 #define IEM_MC_FETCH_MEM_SEG_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    996     ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    997 #define IEM_MC_FETCH_MEM_SEG_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
    998     ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    999 #define IEM_MC_FETCH_MEM_SEG_I32_DISP(a_i32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1000     ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1001 
    1002 #define IEM_MC_FETCH_MEM_FLAT_U32(a_u32Dst, a_GCPtrMem) \
    1003     ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1004 #define IEM_MC_FETCH_MEM_FLAT_U32_DISP(a_u32Dst, a_GCPtrMem, a_offDisp) \
    1005     ((a_u32Dst) = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1006 #define IEM_MC_FETCH_MEM_FLAT_I32(a_i32Dst, a_GCPtrMem) \
    1007     ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1008 #define IEM_MC_FETCH_MEM_FLAT_I32_DISP(a_i32Dst, a_GCPtrMem, a_offDisp) \
    1009     ((a_i32Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1010 
    1011 #define IEM_MC_FETCH_MEM_SEG_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1012     ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1013 #define IEM_MC_FETCH_MEM_SEG_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
    1014     ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
    1015 #define IEM_MC_FETCH_MEM_SEG_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
    1016     ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1017 #define IEM_MC_FETCH_MEM_SEG_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
    1018     ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1019 
    1020 #define IEM_MC_FETCH_MEM_FLAT_U64(a_u64Dst, a_GCPtrMem) \
    1021     ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
    1022 #define IEM_MC_FETCH_MEM_FLAT_U64_DISP(a_u64Dst, a_GCPtrMem, a_offDisp) \
    1023     ((a_u64Dst) = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem) + (a_offDisp)))
    1024 #define IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128(a_u64Dst, a_GCPtrMem) \
    1025     ((a_u64Dst) = iemMemFlatFetchDataU64AlignedU128Jmp(pVCpu, (a_GCPtrMem)))
    1026 #define IEM_MC_FETCH_MEM_FLAT_I64(a_i64Dst, a_GCPtrMem) \
    1027     ((a_i64Dst) = (int64_t)iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
    1028 
    1029 #define IEM_MC_FETCH_MEM_SEG_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
    1030     ((a_r32Dst).u = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1031 #define IEM_MC_FETCH_MEM_SEG_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
    1032     ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1033852#define IEM_MC_FETCH_MEM_SEG_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
    1034853    iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
     
    1036855    iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
    1037856
    1038 #define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
    1039     ((a_r32Dst).u = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    1040 #define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
    1041     ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
    1042857#define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
    1043858    iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem))
    1044859#define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
    1045860    iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem))
    1046 
    1047 #define IEM_MC_FETCH_MEM_SEG_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
    1048     iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
    1049 #define IEM_MC_FETCH_MEM_SEG_U128_NO_AC(a_u128Dst, a_iSeg, a_GCPtrMem) \
    1050     iemMemFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
    1051 #define IEM_MC_FETCH_MEM_SEG_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
    1052     iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
    1053861
    1054862#define IEM_MC_FETCH_MEM_SEG_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
     
    1059867    iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
    1060868
    1061 #define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
    1062     iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
    1063 #define IEM_MC_FETCH_MEM_FLAT_U128_NO_AC(a_u128Dst, a_GCPtrMem) \
    1064     iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
    1065 #define IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE(a_u128Dst, a_GCPtrMem) \
    1066     iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
    1067 
    1068869#define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
    1069870    iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
     
    1167968    } while (0)
    1168969
    1169 
    1170 #define IEM_MC_FETCH_MEM_SEG_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
    1171     iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
    1172 #define IEM_MC_FETCH_MEM_SEG_U256_NO_AC(a_u256Dst, a_iSeg, a_GCPtrMem) \
    1173     iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
    1174 #define IEM_MC_FETCH_MEM_SEG_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
    1175     iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
    1176970
    1177971#define IEM_MC_FETCH_MEM_SEG_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
     
    1190984        (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[1]; \
    1191985    } while (0)
    1192 
    1193 #define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
    1194     iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
    1195 #define IEM_MC_FETCH_MEM_FLAT_U256_NO_AC(a_u256Dst, a_GCPtrMem) \
    1196     iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
    1197 #define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
    1198     iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
    1199986
    1200987#define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
     
    12681055    ((a_u64Dst) = (int32_t)iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem)))
    12691056
    1270 #define IEM_MC_STORE_MEM_SEG_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
    1271     iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
    1272 #define IEM_MC_STORE_MEM_SEG_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
    1273     iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
    1274 #define IEM_MC_STORE_MEM_SEG_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
    1275     iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
    1276 #define IEM_MC_STORE_MEM_SEG_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
    1277     iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
    1278 
    1279 #define IEM_MC_STORE_MEM_FLAT_U8(a_GCPtrMem, a_u8Value) \
    1280     iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8Value))
    1281 #define IEM_MC_STORE_MEM_FLAT_U16(a_GCPtrMem, a_u16Value) \
    1282     iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16Value))
    1283 #define IEM_MC_STORE_MEM_FLAT_U32(a_GCPtrMem, a_u32Value) \
    1284     iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32Value))
    1285 #define IEM_MC_STORE_MEM_FLAT_U64(a_GCPtrMem, a_u64Value) \
    1286     iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64Value))
    1287 
    1288 #define IEM_MC_STORE_MEM_SEG_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
    1289     iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
    1290 #define IEM_MC_STORE_MEM_SEG_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
    1291     iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
    1292 #define IEM_MC_STORE_MEM_SEG_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
    1293     iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
    1294 #define IEM_MC_STORE_MEM_SEG_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
    1295     iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
    1296 
    1297 #define IEM_MC_STORE_MEM_FLAT_U8_CONST(a_GCPtrMem, a_u8C) \
    1298     iemMemFlatStoreDataU8Jmp(pVCpu, (a_GCPtrMem), (a_u8C))
    1299 #define IEM_MC_STORE_MEM_FLAT_U16_CONST(a_GCPtrMem, a_u16C) \
    1300     iemMemFlatStoreDataU16Jmp(pVCpu, (a_GCPtrMem), (a_u16C))
    1301 #define IEM_MC_STORE_MEM_FLAT_U32_CONST(a_GCPtrMem, a_u32C) \
    1302     iemMemFlatStoreDataU32Jmp(pVCpu, (a_GCPtrMem), (a_u32C))
    1303 #define IEM_MC_STORE_MEM_FLAT_U64_CONST(a_GCPtrMem, a_u64C) \
    1304     iemMemFlatStoreDataU64Jmp(pVCpu, (a_GCPtrMem), (a_u64C))
    1305 
    1306 #define IEM_MC_STORE_MEM_BY_REF_I8_CONST( a_pi8Dst,  a_i8C)     *(a_pi8Dst)  = (a_i8C)
    1307 #define IEM_MC_STORE_MEM_BY_REF_I16_CONST(a_pi16Dst, a_i16C)    *(a_pi16Dst) = (a_i16C)
    1308 #define IEM_MC_STORE_MEM_BY_REF_I32_CONST(a_pi32Dst, a_i32C)    *(a_pi32Dst) = (a_i32C)
    1309 #define IEM_MC_STORE_MEM_BY_REF_I64_CONST(a_pi64Dst, a_i64C)    *(a_pi64Dst) = (a_i64C)
    1310 #define IEM_MC_STORE_MEM_BY_REF_R32_NEG_QNAN(a_pr32Dst)         (a_pr32Dst)->u = UINT32_C(0xffc00000)
    1311 #define IEM_MC_STORE_MEM_BY_REF_R64_NEG_QNAN(a_pr64Dst)         (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
    13121057#define IEM_MC_STORE_MEM_BY_REF_R80_NEG_QNAN(a_pr80Dst) \
    13131058    do { \
     
    13211066    } while (0)
    13221067
    1323 #define IEM_MC_STORE_MEM_SEG_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
    1324     iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
    1325 #define IEM_MC_STORE_MEM_SEG_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
    1326     iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
    13271068#define IEM_MC_STORE_MEM_SEG_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
    13281069    iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
    13291070
    1330 #define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
    1331     iemMemFlatStoreDataU128Jmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
    1332 #define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
    1333     iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
    13341071#define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
    13351072    iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
    13361073
    1337 #define IEM_MC_STORE_MEM_SEG_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
    1338     iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    1339 #define IEM_MC_STORE_MEM_SEG_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
    1340     iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    13411074#define IEM_MC_STORE_MEM_SEG_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
    13421075    iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    13431076
    1344 #define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
    1345     iemMemFlatStoreDataU256Jmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
    1346 #define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
    1347     iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
    13481077#define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
    13491078    iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
     1079
    13501080
    13511081/* Regular stack push and pop: */
     
    13731103#define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg)     iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
    13741104#define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg)     iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
    1375 
    1376 
    1377 /* 8-bit */
    1378 
    1379 /**
    1380  * Maps guest memory for byte atomic read+write direct (or bounce) buffer
    1381  * acccess, for atomic operations.
    1382  *
    1383  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1384  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1385  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1386  * @param[in]  a_GCPtrMem   The memory address.
    1387  * @remarks Will return/long jump on errors.
    1388  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1389  */
    1390 #define IEM_MC_MEM_SEG_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1391     (a_pu8Mem) = iemMemMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1392 
    1393 /**
    1394  * Maps guest memory for byte read+write direct (or bounce) buffer acccess.
    1395  *
    1396  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1397  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1398  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1399  * @param[in]  a_GCPtrMem   The memory address.
    1400  * @remarks Will return/long jump on errors.
    1401  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1402  */
    1403 #define IEM_MC_MEM_SEG_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1404     (a_pu8Mem) = iemMemMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1405 
    1406 /**
    1407  * Maps guest memory for byte writeonly direct (or bounce) buffer acccess.
    1408  *
    1409  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1410  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1411  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1412  * @param[in]  a_GCPtrMem   The memory address.
    1413  * @remarks Will return/long jump on errors.
    1414  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1415  */
    1416 #define IEM_MC_MEM_SEG_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1417     (a_pu8Mem) = iemMemMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1418 
    1419 /**
    1420  * Maps guest memory for byte readonly direct (or bounce) buffer acccess.
    1421  *
    1422  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1423  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1424  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1425  * @param[in]  a_GCPtrMem   The memory address.
    1426  * @remarks Will return/long jump on errors.
    1427  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1428  */
    1429 #define IEM_MC_MEM_SEG_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1430     (a_pu8Mem) = iemMemMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1431 
    1432 /**
    1433  * Maps guest memory for byte atomic read+write direct (or bounce) buffer
    1434  * acccess, flat address variant.
    1435  *
    1436  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1437  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1438  * @param[in]  a_GCPtrMem   The memory address.
    1439  * @remarks Will return/long jump on errors.
    1440  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1441  */
    1442 #define IEM_MC_MEM_FLAT_MAP_U8_ATOMIC(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1443     (a_pu8Mem) = iemMemFlatMapDataU8AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1444 
    1445 /**
    1446  * Maps guest memory for byte read+write direct (or bounce) buffer acccess, flat
    1447  * address variant.
    1448  *
    1449  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1450  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1451  * @param[in]  a_GCPtrMem   The memory address.
    1452  * @remarks Will return/long jump on errors.
    1453  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1454  */
    1455 #define IEM_MC_MEM_FLAT_MAP_U8_RW(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1456     (a_pu8Mem) = iemMemFlatMapDataU8RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1457 
    1458 /**
    1459  * Maps guest memory for byte writeonly direct (or bounce) buffer acccess, flat
    1460  * address variant.
    1461  *
    1462  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1463  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1464  * @param[in]  a_GCPtrMem   The memory address.
    1465  * @remarks Will return/long jump on errors.
    1466  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1467  */
    1468 #define IEM_MC_MEM_FLAT_MAP_U8_WO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1469     (a_pu8Mem) = iemMemFlatMapDataU8WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1470 
    1471 /**
    1472  * Maps guest memory for byte readonly direct (or bounce) buffer acccess, flat
    1473  * address variant.
    1474  *
    1475  * @param[out] a_pu8Mem     Where to return the pointer to the mapping.
    1476  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1477  * @param[in]  a_GCPtrMem   The memory address.
    1478  * @remarks Will return/long jump on errors.
    1479  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1480  */
    1481 #define IEM_MC_MEM_FLAT_MAP_U8_RO(a_pu8Mem, a_bUnmapInfo, a_GCPtrMem) \
    1482     (a_pu8Mem) = iemMemFlatMapDataU8RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1483 
    1484 
    1485 /* 16-bit */
    1486 
    1487 /**
    1488  * Maps guest memory for word atomic read+write direct (or bounce) buffer acccess.
    1489  *
    1490  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1491  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1492  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1493  * @param[in]  a_GCPtrMem   The memory address.
    1494  * @remarks Will return/long jump on errors.
    1495  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1496  */
    1497 #define IEM_MC_MEM_SEG_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1498     (a_pu16Mem) = iemMemMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1499 
    1500 /**
    1501  * Maps guest memory for word read+write direct (or bounce) buffer acccess.
    1502  *
    1503  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1504  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1505  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1506  * @param[in]  a_GCPtrMem   The memory address.
    1507  * @remarks Will return/long jump on errors.
    1508  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1509  */
    1510 #define IEM_MC_MEM_SEG_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1511     (a_pu16Mem) = iemMemMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1512 
    1513 /**
    1514  * Maps guest memory for word writeonly direct (or bounce) buffer acccess.
    1515  *
    1516  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1517  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1518  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1519  * @param[in]  a_GCPtrMem   The memory address.
    1520  * @remarks Will return/long jump on errors.
    1521  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1522  */
    1523 #define IEM_MC_MEM_SEG_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1524     (a_pu16Mem) = iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1525 
    1526 /**
    1527  * Maps guest memory for word readonly direct (or bounce) buffer acccess.
    1528  *
    1529  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1530  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1531  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1532  * @param[in]  a_GCPtrMem   The memory address.
    1533  * @remarks Will return/long jump on errors.
    1534  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1535  */
    1536 #define IEM_MC_MEM_SEG_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1537     (a_pu16Mem) = iemMemMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1538 
    1539 /**
    1540  * Maps guest memory for word atomic read+write direct (or bounce) buffer
    1541  * acccess, flat address variant.
    1542  *
    1543  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1544  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1545  * @param[in]  a_GCPtrMem   The memory address.
    1546  * @remarks Will return/long jump on errors.
    1547  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1548  */
    1549 #define IEM_MC_MEM_FLAT_MAP_U16_ATOMIC(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1550     (a_pu16Mem) = iemMemFlatMapDataU16AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1551 
    1552 /**
    1553  * Maps guest memory for word read+write direct (or bounce) buffer acccess, flat
    1554  * address variant.
    1555  *
    1556  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1557  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1558  * @param[in]  a_GCPtrMem   The memory address.
    1559  * @remarks Will return/long jump on errors.
    1560  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1561  */
    1562 #define IEM_MC_MEM_FLAT_MAP_U16_RW(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1563     (a_pu16Mem) = iemMemFlatMapDataU16RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1564 
    1565 /**
    1566  * Maps guest memory for word writeonly direct (or bounce) buffer acccess, flat
    1567  * address variant.
    1568  *
    1569  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1570  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1571  * @param[in]  a_GCPtrMem   The memory address.
    1572  * @remarks Will return/long jump on errors.
    1573  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1574  */
    1575 #define IEM_MC_MEM_FLAT_MAP_U16_WO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1576     (a_pu16Mem) = iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1577 
    1578 /**
    1579  * Maps guest memory for word readonly direct (or bounce) buffer acccess, flat
    1580  * address variant.
    1581  *
    1582  * @param[out] a_pu16Mem    Where to return the pointer to the mapping.
    1583  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1584  * @param[in]  a_GCPtrMem   The memory address.
    1585  * @remarks Will return/long jump on errors.
    1586  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1587  */
    1588 #define IEM_MC_MEM_FLAT_MAP_U16_RO(a_pu16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1589     (a_pu16Mem) = iemMemFlatMapDataU16RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1590 
    1591 /** int16_t alias. */
    1592 #define IEM_MC_MEM_SEG_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1593     (a_pi16Mem) = (int16_t *)iemMemMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1594 
    1595 /** Flat int16_t alias. */
    1596 #define IEM_MC_MEM_FLAT_MAP_I16_WO(a_pi16Mem, a_bUnmapInfo, a_GCPtrMem) \
    1597     (a_pi16Mem) = (int16_t *)iemMemFlatMapDataU16WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1598 
    1599 
    1600 /* 32-bit */
    1601 
    1602 /**
    1603  * Maps guest memory for dword atomic read+write direct (or bounce) buffer acccess.
    1604  *
    1605  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1606  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1607  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1608  * @param[in]  a_GCPtrMem   The memory address.
    1609  * @remarks Will return/long jump on errors.
    1610  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1611  */
    1612 #define IEM_MC_MEM_SEG_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1613     (a_pu32Mem) = iemMemMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1614 
    1615 /**
    1616  * Maps guest memory for dword read+write direct (or bounce) buffer acccess.
    1617  *
    1618  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1619  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1620  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1621  * @param[in]  a_GCPtrMem   The memory address.
    1622  * @remarks Will return/long jump on errors.
    1623  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1624  */
    1625 #define IEM_MC_MEM_SEG_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1626     (a_pu32Mem) = iemMemMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1627 
    1628 /**
    1629  * Maps guest memory for dword writeonly direct (or bounce) buffer acccess.
    1630  *
    1631  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1632  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1633  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1634  * @param[in]  a_GCPtrMem   The memory address.
    1635  * @remarks Will return/long jump on errors.
    1636  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1637  */
    1638 #define IEM_MC_MEM_SEG_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1639     (a_pu32Mem) = iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1640 
    1641 /**
    1642  * Maps guest memory for dword readonly direct (or bounce) buffer acccess.
    1643  *
    1644  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1645  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1646  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1647  * @param[in]  a_GCPtrMem   The memory address.
    1648  * @remarks Will return/long jump on errors.
    1649  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1650  */
    1651 #define IEM_MC_MEM_SEG_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1652     (a_pu32Mem) = iemMemMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1653 
    1654 /**
    1655  * Maps guest memory for dword atomic read+write direct (or bounce) buffer
    1656  * acccess, flat address variant.
    1657  *
    1658  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1659  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1660  * @param[in]  a_GCPtrMem   The memory address.
    1661  * @remarks Will return/long jump on errors.
    1662  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1663  */
    1664 #define IEM_MC_MEM_FLAT_MAP_U32_ATOMIC(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    1665     (a_pu32Mem) = iemMemFlatMapDataU32AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1666 
    1667 /**
    1668  * Maps guest memory for dword read+write direct (or bounce) buffer acccess,
    1669  * flat address variant.
    1670  *
    1671  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1672  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1673  * @param[in]  a_GCPtrMem   The memory address.
    1674  * @remarks Will return/long jump on errors.
    1675  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1676  */
    1677 #define IEM_MC_MEM_FLAT_MAP_U32_RW(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    1678     (a_pu32Mem) = iemMemFlatMapDataU32RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1679 
    1680 /**
    1681  * Maps guest memory for dword writeonly direct (or bounce) buffer acccess, flat
    1682  * address variant.
    1683  *
    1684  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1685  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1686  * @param[in]  a_GCPtrMem   The memory address.
    1687  * @remarks Will return/long jump on errors.
    1688  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1689  */
    1690 #define IEM_MC_MEM_FLAT_MAP_U32_WO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    1691     (a_pu32Mem) = iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1692 
    1693 /**
    1694  * Maps guest memory for dword readonly direct (or bounce) buffer acccess, flat
    1695  * address variant.
    1696  *
    1697  * @param[out] a_pu32Mem    Where to return the pointer to the mapping.
    1698  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1699  * @param[in]  a_GCPtrMem   The memory address.
    1700  * @remarks Will return/long jump on errors.
    1701  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1702  */
    1703 #define IEM_MC_MEM_FLAT_MAP_U32_RO(a_pu32Mem, a_bUnmapInfo, a_GCPtrMem) \
    1704     (a_pu32Mem) = iemMemFlatMapDataU32RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1705 
    1706 /** int32_t alias. */
    1707 #define IEM_MC_MEM_SEG_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1708     (a_pi32Mem) = (int32_t *)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1709 
    1710 /** Flat int32_t alias. */
    1711 #define IEM_MC_MEM_FLAT_MAP_I32_WO(a_pi32Mem, a_bUnmapInfo, a_GCPtrMem) \
    1712     (a_pi32Mem) = (int32_t *)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1713 
    1714 /** RTFLOAT32U alias. */
    1715 #define IEM_MC_MEM_SEG_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1716     (a_pr32Mem) = (PRTFLOAT32U)iemMemMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1717 
    1718 /** Flat RTFLOAT32U alias. */
    1719 #define IEM_MC_MEM_FLAT_MAP_R32_WO(a_pr32Mem, a_bUnmapInfo, a_GCPtrMem) \
    1720     (a_pr32Mem) = (PRTFLOAT32U)iemMemFlatMapDataU32WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1721 
    1722 
    1723 /* 64-bit */
    1724 
    1725 /**
    1726  * Maps guest memory for qword atomic read+write direct (or bounce) buffer acccess.
    1727  *
    1728  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1729  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1730  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1731  * @param[in]  a_GCPtrMem   The memory address.
    1732  * @remarks Will return/long jump on errors.
    1733  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1734  */
    1735 #define IEM_MC_MEM_SEG_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1736     (a_pu64Mem) = iemMemMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1737 
    1738 /**
    1739  * Maps guest memory for qword read+write direct (or bounce) buffer acccess.
    1740  *
    1741  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1742  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1743  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1744  * @param[in]  a_GCPtrMem   The memory address.
    1745  * @remarks Will return/long jump on errors.
    1746  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1747  */
    1748 #define IEM_MC_MEM_SEG_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1749     (a_pu64Mem) = iemMemMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1750 
    1751 /**
    1752  * Maps guest memory for qword writeonly direct (or bounce) buffer acccess.
    1753  *
    1754  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1755  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1756  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1757  * @param[in]  a_GCPtrMem   The memory address.
    1758  * @remarks Will return/long jump on errors.
    1759  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1760  */
    1761 #define IEM_MC_MEM_SEG_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1762     (a_pu64Mem) = iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1763 
    1764 /**
    1765  * Maps guest memory for qword readonly direct (or bounce) buffer acccess.
    1766  *
    1767  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1768  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1769  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1770  * @param[in]  a_GCPtrMem   The memory address.
    1771  * @remarks Will return/long jump on errors.
    1772  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1773  */
    1774 #define IEM_MC_MEM_SEG_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1775     (a_pu64Mem) = iemMemMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1776 
    1777 /**
    1778  * Maps guest memory for qword atomic read+write direct (or bounce) buffer
    1779  * acccess, flat address variant.
    1780  *
    1781  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1782  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1783  * @param[in]  a_GCPtrMem   The memory address.
    1784  * @remarks Will return/long jump on errors.
    1785  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1786  */
    1787 #define IEM_MC_MEM_FLAT_MAP_U64_ATOMIC(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    1788     (a_pu64Mem) = iemMemFlatMapDataU64AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1789 
    1790 /**
    1791  * Maps guest memory for qword read+write direct (or bounce) buffer acccess,
    1792  * flat address variant.
    1793  *
    1794  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1795  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1796  * @param[in]  a_GCPtrMem   The memory address.
    1797  * @remarks Will return/long jump on errors.
    1798  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1799  */
    1800 #define IEM_MC_MEM_FLAT_MAP_U64_RW(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    1801     (a_pu64Mem) = iemMemFlatMapDataU64RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1802 
    1803 /**
    1804  * Maps guest memory for qword writeonly direct (or bounce) buffer acccess, flat
    1805  * address variant.
    1806  *
    1807  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1808  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1809  * @param[in]  a_GCPtrMem   The memory address.
    1810  * @remarks Will return/long jump on errors.
    1811  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1812  */
    1813 #define IEM_MC_MEM_FLAT_MAP_U64_WO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    1814     (a_pu64Mem) = iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1815 
    1816 /**
    1817  * Maps guest memory for qword readonly direct (or bounce) buffer acccess, flat
    1818  * address variant.
    1819  *
    1820  * @param[out] a_pu64Mem    Where to return the pointer to the mapping.
    1821  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1822  * @param[in]  a_GCPtrMem   The memory address.
    1823  * @remarks Will return/long jump on errors.
    1824  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1825  */
    1826 #define IEM_MC_MEM_FLAT_MAP_U64_RO(a_pu64Mem, a_bUnmapInfo, a_GCPtrMem) \
    1827     (a_pu64Mem) = iemMemFlatMapDataU64RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1828 
    1829 /** int64_t alias. */
    1830 #define IEM_MC_MEM_SEG_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1831     (a_pi64Mem) = (int64_t *)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1832 
    1833 /** Flat int64_t alias. */
    1834 #define IEM_MC_MEM_FLAT_MAP_I64_WO(a_pi64Mem, a_bUnmapInfo, a_GCPtrMem) \
    1835     (a_pi64Mem) = (int64_t *)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1836 
    1837 /** RTFLOAT64U alias. */
    1838 #define IEM_MC_MEM_SEG_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1839     (a_pr64Mem) = (PRTFLOAT64U)iemMemMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1840 
    1841 /** Flat RTFLOAT64U alias. */
    1842 #define IEM_MC_MEM_FLAT_MAP_R64_WO(a_pr64Mem, a_bUnmapInfo, a_GCPtrMem) \
    1843     (a_pr64Mem) = (PRTFLOAT64U)iemMemFlatMapDataU64WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1844 
    1845 
    1846 /* 128-bit */
    1847 
    1848 /**
    1849  * Maps guest memory for dqword atomic read+write direct (or bounce) buffer acccess.
    1850  *
    1851  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1852  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1853  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1854  * @param[in]  a_GCPtrMem   The memory address.
    1855  * @remarks Will return/long jump on errors.
    1856  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1857  */
    1858 #define IEM_MC_MEM_SEG_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1859     (a_pu128Mem) = iemMemMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1860 
    1861 /**
    1862  * Maps guest memory for dqword read+write direct (or bounce) buffer acccess.
    1863  *
    1864  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1865  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1866  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1867  * @param[in]  a_GCPtrMem   The memory address.
    1868  * @remarks Will return/long jump on errors.
    1869  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1870  */
    1871 #define IEM_MC_MEM_SEG_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1872     (a_pu128Mem) = iemMemMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1873 
    1874 /**
    1875  * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess.
    1876  *
    1877  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1878  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1879  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1880  * @param[in]  a_GCPtrMem   The memory address.
    1881  * @remarks Will return/long jump on errors.
    1882  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1883  */
    1884 #define IEM_MC_MEM_SEG_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1885     (a_pu128Mem) = iemMemMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1886 
    1887 /**
    1888  * Maps guest memory for dqword readonly direct (or bounce) buffer acccess.
    1889  *
    1890  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1891  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1892  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1893  * @param[in]  a_GCPtrMem   The memory address.
    1894  * @remarks Will return/long jump on errors.
    1895  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1896  */
    1897 #define IEM_MC_MEM_SEG_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1898     (a_pu128Mem) = iemMemMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1899 
    1900 /**
    1901  * Maps guest memory for dqword atomic read+write direct (or bounce) buffer
    1902  * access, flat address variant.
    1903  *
    1904  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1905  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1906  * @param[in]  a_GCPtrMem   The memory address.
    1907  * @remarks Will return/long jump on errors.
    1908  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC
    1909  */
    1910 #define IEM_MC_MEM_FLAT_MAP_U128_ATOMIC(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    1911     (a_pu128Mem) = iemMemFlatMapDataU128AtJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1912 
    1913 /**
    1914  * Maps guest memory for dqword read+write direct (or bounce) buffer acccess,
    1915  * flat address variant.
    1916  *
    1917  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1918  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1919  * @param[in]  a_GCPtrMem   The memory address.
    1920  * @remarks Will return/long jump on errors.
    1921  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RW
    1922  */
    1923 #define IEM_MC_MEM_FLAT_MAP_U128_RW(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    1924     (a_pu128Mem) = iemMemFlatMapDataU128RwJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1925 
    1926 /**
    1927  * Maps guest memory for dqword writeonly direct (or bounce) buffer acccess,
    1928  * flat address variant.
    1929  *
    1930  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1931  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1932  * @param[in]  a_GCPtrMem   The memory address.
    1933  * @remarks Will return/long jump on errors.
    1934  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1935  */
    1936 #define IEM_MC_MEM_FLAT_MAP_U128_WO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    1937     (a_pu128Mem) = iemMemFlatMapDataU128WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1938 
    1939 /**
    1940  * Maps guest memory for dqword readonly direct (or bounce) buffer acccess, flat
    1941  * address variant.
    1942  *
    1943  * @param[out] a_pu128Mem   Where to return the pointer to the mapping.
    1944  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1945  * @param[in]  a_GCPtrMem   The memory address.
    1946  * @remarks Will return/long jump on errors.
    1947  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_RO
    1948  */
    1949 #define IEM_MC_MEM_FLAT_MAP_U128_RO(a_pu128Mem, a_bUnmapInfo, a_GCPtrMem) \
    1950     (a_pu128Mem) = iemMemFlatMapDataU128RoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    19511105
    19521106
     
    20031157#define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
    20041158    (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2005 
    2006 
    2007 
    2008 /* commit + unmap */
    2009 
    2010 /** Commits the memory and unmaps guest memory previously mapped RW.
    2011  * @remarks     May return.
    2012  * @note        Implictly frees the a_bMapInfo variable.
    2013  */
    2014 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RW(a_bMapInfo)          iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
    2015 
    2016 /** Commits the memory and unmaps guest memory previously mapped ATOMIC.
    2017  * @remarks     May return.
    2018  * @note        Implictly frees the a_bMapInfo variable.
    2019  */
    2020 #define IEM_MC_MEM_COMMIT_AND_UNMAP_ATOMIC(a_bMapInfo)      iemMemCommitAndUnmapRwJmp(pVCpu, (a_bMapInfo))
    2021 
    2022 /** Commits the memory and unmaps guest memory previously mapped W.
    2023  * @remarks     May return.
    2024  * @note        Implictly frees the a_bMapInfo variable.
    2025  */
    2026 #define IEM_MC_MEM_COMMIT_AND_UNMAP_WO(a_bMapInfo)          iemMemCommitAndUnmapWoJmp(pVCpu, (a_bMapInfo))
    2027 
    2028 /** Commits the memory and unmaps guest memory previously mapped R.
    2029  * @remarks     May return.
    2030  * @note        Implictly frees the a_bMapInfo variable.
    2031  */
    2032 #define IEM_MC_MEM_COMMIT_AND_UNMAP_RO(a_bMapInfo)          iemMemCommitAndUnmapRoJmp(pVCpu, (a_bMapInfo))
    20331159
    20341160
     
    20521178    } while (0)
    20531179
    2054 /** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
    2055  * @note        Implictly frees the a_bMapInfo variable. */
    2056 #define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo)        iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
    2057 
    20581180
    20591181
     
    20611183#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
    20621184    ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
    2063 
    2064 
    2065 /** The @a a_fSupportedHosts mask are ORed together RT_ARCH_VAL_XXX values. */
    2066 #define IEM_MC_NATIVE_IF(a_fSupportedHosts)                               if (false) {
    2067 #define IEM_MC_NATIVE_ELSE()                                              } else {
    2068 #define IEM_MC_NATIVE_ENDIF()                                             } ((void)0)
    2069 
    2070 #define IEM_MC_NATIVE_EMIT_0(a_fnEmitter)
    2071 #define IEM_MC_NATIVE_EMIT_1(a_fnEmitter, a0)                             (void)(a0)
    2072 #define IEM_MC_NATIVE_EMIT_2(a_fnEmitter, a0, a1)                         (void)(a0), (void)(a1)
    2073 #define IEM_MC_NATIVE_EMIT_2_EX(a_fnEmitter, a0, a1)                      (void)(a0), (void)(a1)
    2074 #define IEM_MC_NATIVE_EMIT_3(a_fnEmitter, a0, a1, a2)                     (void)(a0), (void)(a1), (void)(a2)
    2075 #define IEM_MC_NATIVE_EMIT_4(a_fnEmitter, a0, a1, a2, a3)                 (void)(a0), (void)(a1), (void)(a2), (void)(a3)
    2076 #define IEM_MC_NATIVE_EMIT_5(a_fnEmitter, a0, a1, a2, a3, a4)             (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4)
    2077 #define IEM_MC_NATIVE_EMIT_6(a_fnEmitter, a0, a1, a2, a3, a4, a5)         (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5)
    2078 #define IEM_MC_NATIVE_EMIT_7(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6)     (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6)
    2079 #define IEM_MC_NATIVE_EMIT_8(a_fnEmitter, a0, a1, a2, a3, a4, a5, a6, a7) (void)(a0), (void)(a1), (void)(a2), (void)(a3), (void)(a4), (void)(a5), (void)(a6), (void)(a7)
    2080 
    2081 /** This can be used to direct the register allocator when dealing with
    2082  * x86/AMD64 instructions (like SHL reg,CL) that takes fixed registers. */
    2083 #define IEM_MC_NATIVE_SET_AMD64_HOST_REG_FOR_LOCAL(a_VarNm, a_idxHostReg) ((void)0)
    2084 
    2085 
    2086 #define IEM_MC_CALL_VOID_AIMPL_0(a_pfn)                   (a_pfn)()
    2087 #define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0)               (a_pfn)((a0))
    2088 #define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1)           (a_pfn)((a0), (a1))
    2089 #define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2)       (a_pfn)((a0), (a1), (a2))
    2090 #define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3)   (a_pfn)((a0), (a1), (a2), (a3))
    2091 #define IEM_MC_CALL_AIMPL_3(a_rcType, a_rc, a_pfn, a0, a1, a2)      a_rcType const a_rc = (a_pfn)((a0), (a1), (a2))
    2092 #define IEM_MC_CALL_AIMPL_4(a_rcType, a_rc, a_pfn, a0, a1, a2, a3)  a_rcType const a_rc = (a_pfn)((a0), (a1), (a2), (a3))
    2093 
    2094 
    2095 /** @def IEM_MC_CALL_CIMPL_HLP_RET
    2096  * Helper macro for check that all important IEM_CIMPL_F_XXX bits are set.
    2097  */
    2098 #ifdef VBOX_STRICT
    2099 # define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) \
    2100     do { \
    2101         uint8_t      const cbInstr     = IEM_GET_INSTR_LEN(pVCpu); /* may be flushed */ \
    2102         uint16_t     const uCsBefore   = pVCpu->cpum.GstCtx.cs.Sel; \
    2103         uint64_t     const uRipBefore  = pVCpu->cpum.GstCtx.rip; \
    2104         uint32_t     const fEflBefore  = pVCpu->cpum.GstCtx.eflags.u; \
    2105         uint32_t     const fExecBefore = pVCpu->iem.s.fExec; \
    2106         VBOXSTRICTRC const rcStrictHlp = a_CallExpr; \
    2107         if (rcStrictHlp == VINF_SUCCESS) \
    2108         { \
    2109             uint64_t const fRipMask = (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT ? UINT64_MAX : UINT32_MAX; \
    2110             AssertMsg(   ((a_fFlags) & IEM_CIMPL_F_BRANCH_ANY) \
    2111                       || (   ((uRipBefore + cbInstr) & fRipMask) == pVCpu->cpum.GstCtx.rip \
    2112                           && uCsBefore  == pVCpu->cpum.GstCtx.cs.Sel) \
    2113                       || (   ((a_fFlags) & IEM_CIMPL_F_REP) \
    2114                           && uRipBefore == pVCpu->cpum.GstCtx.rip \
    2115                           && uCsBefore  == pVCpu->cpum.GstCtx.cs.Sel), \
    2116                       ("CS:RIP=%04x:%08RX64 + %x -> %04x:%08RX64, expected %04x:%08RX64\n", uCsBefore, uRipBefore, cbInstr, \
    2117                        pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uCsBefore, (uRipBefore + cbInstr) & fRipMask)); \
    2118             if ((a_fFlags) & IEM_CIMPL_F_RFLAGS) \
    2119             { /* No need to check fEflBefore */ Assert(!((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS)); } \
    2120             else if ((a_fFlags) & IEM_CIMPL_F_STATUS_FLAGS) \
    2121                 AssertMsg(   (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)) \
    2122                           == (fEflBefore                  & ~(X86_EFL_STATUS_BITS | X86_EFL_RF)), \
    2123                           ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
    2124             else \
    2125                 AssertMsg(   (pVCpu->cpum.GstCtx.eflags.u & ~(X86_EFL_RF)) \
    2126                           == (fEflBefore                  & ~(X86_EFL_RF)), \
    2127                           ("EFL=%#RX32 -> %#RX32\n", fEflBefore, pVCpu->cpum.GstCtx.eflags.u)); \
    2128             if (!((a_fFlags) & IEM_CIMPL_F_MODE)) \
    2129             { \
    2130                 uint32_t fExecRecalc = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS); \
    2131                 AssertMsg(   fExecBefore == fExecRecalc \
    2132                              /* in case ES, DS or SS was external initially (happens alot with HM): */ \
    2133                           || (   fExecBefore == (fExecRecalc & ~IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK) \
    2134                               && (fExecRecalc & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT), \
    2135                           ("fExec=%#x -> %#x (diff %#x)\n", fExecBefore, fExecRecalc, fExecBefore ^ fExecRecalc)); \
    2136             } \
    2137         } \
    2138         return rcStrictHlp; \
    2139     } while (0)
    2140 #else
    2141 # define IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, a_CallExpr) return a_CallExpr
    2142 #endif
    2143 
    2144 /**
    2145  * Defers the rest of the instruction emulation to a C implementation routine
    2146  * and returns, only taking the standard parameters.
    2147  *
    2148  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2149  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2150  *                          in the native recompiler.
    2151  * @param   a_pfnCImpl      The pointer to the C routine.
    2152  * @sa      IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
    2153  */
    2154 #define IEM_MC_CALL_CIMPL_0(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
    2155     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
    2156 
    2157 /**
    2158  * Defers the rest of instruction emulation to a C implementation routine and
    2159  * returns, taking one argument in addition to the standard ones.
    2160  *
    2161  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2162  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2163  *                          in the native recompiler.
    2164  * @param   a_pfnCImpl      The pointer to the C routine.
    2165  * @param   a0              The argument.
    2166  */
    2167 #define IEM_MC_CALL_CIMPL_1(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
    2168     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
    2169 
    2170 /**
    2171  * Defers the rest of the instruction emulation to a C implementation routine
    2172  * and returns, taking two arguments in addition to the standard ones.
    2173  *
    2174  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2175  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2176  *                          in the native recompiler.
    2177  * @param   a_pfnCImpl      The pointer to the C routine.
    2178  * @param   a0              The first extra argument.
    2179  * @param   a1              The second extra argument.
    2180  */
    2181 #define IEM_MC_CALL_CIMPL_2(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
    2182     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
    2183 
    2184 /**
    2185  * Defers the rest of the instruction emulation to a C implementation routine
    2186  * and returns, taking three arguments in addition to the standard ones.
    2187  *
    2188  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2189  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2190  *                          in the native recompiler.
    2191  * @param   a_pfnCImpl      The pointer to the C routine.
    2192  * @param   a0              The first extra argument.
    2193  * @param   a1              The second extra argument.
    2194  * @param   a2              The third extra argument.
    2195  */
    2196 #define IEM_MC_CALL_CIMPL_3(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
    2197     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
    2198 
    2199 /**
    2200  * Defers the rest of the instruction emulation to a C implementation routine
    2201  * and returns, taking four arguments in addition to the standard ones.
    2202  *
    2203  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2204  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2205  *                          in the native recompiler.
    2206  * @param   a_pfnCImpl      The pointer to the C routine.
    2207  * @param   a0              The first extra argument.
    2208  * @param   a1              The second extra argument.
    2209  * @param   a2              The third extra argument.
    2210  * @param   a3              The fourth extra argument.
    2211  */
    2212 #define IEM_MC_CALL_CIMPL_4(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3) \
    2213     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3))
    2214 
    2215 /**
    2216  * Defers the rest of the instruction emulation to a C implementation routine
    2217  * and returns, taking five arguments in addition to the standard ones.
    2218  *
    2219  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2220  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2221  *                          in the native recompiler.
    2222  * @param   a_pfnCImpl      The pointer to the C routine.
    2223  * @param   a0              The first extra argument.
    2224  * @param   a1              The second extra argument.
    2225  * @param   a2              The third extra argument.
    2226  * @param   a3              The fourth extra argument.
    2227  * @param   a4              The fifth extra argument.
    2228  */
    2229 #define IEM_MC_CALL_CIMPL_5(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2, a3, a4) \
    2230     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4))
    2231 
    2232 /**
    2233  * Defers the entire instruction emulation to a C implementation routine and
    2234  * returns, only taking the standard parameters.
    2235  *
    2236  * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
    2237  *
    2238  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2239  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2240  *                          in the native recompiler.
    2241  * @param   a_pfnCImpl      The pointer to the C routine.
    2242  * @sa      IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
    2243  */
    2244 #define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
    2245     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu)))
    2246 
    2247 /**
    2248  * Defers the entire instruction emulation to a C implementation routine and
    2249  * returns, taking one argument in addition to the standard ones.
    2250  *
    2251  * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
    2252  *
    2253  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2254  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2255  *                          in the native recompiler.
    2256  * @param   a_pfnCImpl      The pointer to the C routine.
    2257  * @param   a0              The argument.
    2258  */
    2259 #define IEM_MC_DEFER_TO_CIMPL_1_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0) \
    2260     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0))
    2261 
    2262 /**
    2263  * Defers the entire instruction emulation to a C implementation routine and
    2264  * returns, taking two arguments in addition to the standard ones.
    2265  *
    2266  * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
    2267  *
    2268  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2269  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2270  *                          in the native recompiler.
    2271  * @param   a_pfnCImpl      The pointer to the C routine.
    2272  * @param   a0              The first extra argument.
    2273  * @param   a1              The second extra argument.
    2274  */
    2275 #define IEM_MC_DEFER_TO_CIMPL_2_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1) \
    2276     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1))
    2277 
    2278 /**
    2279  * Defers the entire instruction emulation to a C implementation routine and
    2280  * returns, taking three arguments in addition to the standard ones.
    2281  *
    2282  * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
    2283  *
    2284  * @param   a_fFlags        IEM_CIMPL_F_XXX.
    2285  * @param   a_fGstShwFlush  Guest shadow register copies needing to be flushed
    2286  *                          in the native recompiler.
    2287  * @param   a_pfnCImpl      The pointer to the C routine.
    2288  * @param   a0              The first extra argument.
    2289  * @param   a1              The second extra argument.
    2290  * @param   a2              The third extra argument.
    2291  */
    2292 #define IEM_MC_DEFER_TO_CIMPL_3_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl, a0, a1, a2) \
    2293     IEM_MC_CALL_CIMPL_HLP_RET(a_fFlags, (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2))
    22941185
    22951186
     
    26061497    } while (0)
    26071498
    2608 /** @note Not for IOPL or IF testing. */
    2609 #define IEM_MC_IF_EFL_BIT_SET(a_fBit)                   if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
    2610 /** @note Not for IOPL or IF testing. */
    2611 #define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit)               if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
    2612 /** @note Not for IOPL or IF testing. */
    2613 #define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits)             if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
    2614 /** @note Not for IOPL or IF testing. */
    2615 #define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits)              if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
    2616 /** @note Not for IOPL or IF testing. */
    2617 #define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2)         \
    2618     if (   !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    2619         != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    2620 /** @note Not for IOPL or IF testing. */
    2621 #define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2)         \
    2622     if (   !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    2623         == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    2624 /** @note Not for IOPL or IF testing. */
    2625 #define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
    2626     if (   (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
    2627         ||    !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    2628            != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    2629 /** @note Not for IOPL or IF testing. */
    2630 #define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
    2631     if (   !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
    2632         &&    !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    2633            == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    26341499#define IEM_MC_IF_CX_IS_NZ()                            if (pVCpu->cpum.GstCtx.cx  != 0) {
    26351500#define IEM_MC_IF_ECX_IS_NZ()                           if (pVCpu->cpum.GstCtx.ecx != 0) {
     
    26621527        if (   pVCpu->cpum.GstCtx.rcx != 1 \
    26631528            && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
    2664 #define IEM_MC_IF_LOCAL_IS_Z(a_Local)                   if ((a_Local) == 0) {
    2665 #define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo)       if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
    26661529
    26671530#define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
     
    26801543    if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
    26811544
    2682 #define IEM_MC_ELSE()                                   } else {
    2683 #define IEM_MC_ENDIF()                                  } do {} while (0)
    2684 
    2685 
    2686 /** Recompiler debugging: Flush guest register shadow copies. */
    2687 #define IEM_MC_HINT_FLUSH_GUEST_SHADOW(g_fGstShwFlush)  ((void)0)
    2688 
    2689 /** Recompiler liveness info: input GPR */
    2690 #define IEM_MC_LIVENESS_GREG_INPUT(a_iGReg)             ((void)0)
    2691 /** Recompiler liveness info: clobbered GPR */
    2692 #define IEM_MC_LIVENESS_GREG_CLOBBER(a_iGReg)           ((void)0)
    2693 /** Recompiler liveness info: modified GPR register (i.e. input & output)  */
    2694 #define IEM_MC_LIVENESS_GREG_MODIFY(a_iGReg)            ((void)0)
    2695 
    2696 /** Recompiler liveness info: input MM register */
    2697 #define IEM_MC_LIVENESS_MREG_INPUT(a_iMReg)             ((void)0)
    2698 /** Recompiler liveness info: clobbered MM register */
    2699 #define IEM_MC_LIVENESS_MREG_CLOBBER(a_iMReg)           ((void)0)
    2700 /** Recompiler liveness info: modified MM register (i.e. input & output)  */
    2701 #define IEM_MC_LIVENESS_MREG_MODIFY(a_iMReg)            ((void)0)
    2702 
    2703 /** Recompiler liveness info: input SSE register */
    2704 #define IEM_MC_LIVENESS_XREG_INPUT(a_iXReg)             ((void)0)
    2705 /** Recompiler liveness info: clobbered SSE register */
    2706 #define IEM_MC_LIVENESS_XREG_CLOBBER(a_iXReg)           ((void)0)
    2707 /** Recompiler liveness info: modified SSE register (i.e. input & output)  */
    2708 #define IEM_MC_LIVENESS_XREG_MODIFY(a_iXReg)            ((void)0)
    2709 
    2710 /** Recompiler liveness info: input MXCSR */
    2711 #define IEM_MC_LIVENESS_MXCSR_INPUT()                   ((void)0)
    2712 /** Recompiler liveness info: clobbered MXCSR */
    2713 #define IEM_MC_LIVENESS_MXCSR_CLOBBER()                 ((void)0)
    2714 /** Recompiler liveness info: modified MXCSR (i.e. input & output)  */
    2715 #define IEM_MC_LIVENESS_MXCSR_MODIFY()                  ((void)0)
    2716 
    2717 /** @todo add more as needed. */
    2718 
    27191545/** @}  */
    27201546
    2721 #endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
    2722 
     1547#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMMc_x86_h */
     1548
  • trunk/src/VBox/VMM/include/IEMMc.h

    r108299 r108312  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - IEM_MC_XXX.
     3 * IEM - Interpreted Execution Manager - IEM_MC_XXX, common.
    44 */
    55
     
    3838 * instructions as well as recompiler instructions.  Thus this obfuscation.
    3939 *
     40 * There are target specific "microcodes" in addition to the ones listed here.
     41 * The target specific header may also override the definitions here to allow
     42 * for differences.
     43 *
    4044 * @{
    4145 */
     
    5054/** Advances RIP, finishes the instruction and returns.
    5155 * This may include raising debug exceptions and such. */
    52 #define IEM_MC_ADVANCE_PC_AND_FINISH()                  return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
    53 
    54 
    55 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    56 #define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) \
    57     return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize)
    58 /** Sets RIP (may trigger \#GP), finishes the instruction and returns.
    59  * @note only usable in 16-bit op size mode.  */
    60 #define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) \
    61     return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
    62 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    63 #define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) \
    64     return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize)
    65 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    66 #define IEM_MC_IND_JMP_U16_AND_FINISH(a_u16NewIP) \
    67     return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP), IEM_GET_INSTR_LEN(pVCpu))
    68 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    69 #define IEM_MC_IND_JMP_U32_AND_FINISH(a_u32NewIP) \
    70     return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP), IEM_GET_INSTR_LEN(pVCpu))
    71 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    72 #define IEM_MC_IND_JMP_U64_AND_FINISH(a_u64NewIP) \
    73     return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP), IEM_GET_INSTR_LEN(pVCpu))
    74 
    75 /** Sets RIP (may trigger \#GP), finishes the instruction and returns.
    76  * @note only usable in 16-bit op size mode.  */
    77 #define IEM_MC_REL_CALL_S16_AND_FINISH(a_i16) \
    78     return iemRegRipRelativeCallS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16))
    79 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    80 #define IEM_MC_REL_CALL_S32_AND_FINISH(a_i32) \
    81     return iemRegEip32RelativeCallS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32))
    82 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    83 #define IEM_MC_REL_CALL_S64_AND_FINISH(a_i64) \
    84     return iemRegRip64RelativeCallS64AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i64))
    85 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    86 #define IEM_MC_IND_CALL_U16_AND_FINISH(a_u16NewIP) \
    87     return iemRegIp16IndirectCallU16AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u16NewIP))
    88 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    89 #define IEM_MC_IND_CALL_U32_AND_FINISH(a_u32NewIP) \
    90     return iemRegEip32IndirectCallU32AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u32NewIP))
    91 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */
    92 #define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP) \
    93     return iemRegRip64IndirectCallU64AndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u64NewIP))
    94 
    95 
    96 /** Fetches the near return address from the stack, sets RIP and RSP (may trigger
    97  * \#GP or \#SS), finishes the instruction and returns. */
    98 #define IEM_MC_RETN_AND_FINISH(a_cbPopArgs) \
    99     return iemRegRipNearReturnAndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_cbPopArgs), pVCpu->iem.s.enmEffOpSize)
    100 
    101 
    102 #define IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(a_uVar) \
    103     do { \
    104         if (RT_LIKELY((a_uVar) != 0)) \
    105         { /* probable */ } \
    106         else return iemRaiseDivideError(pVCpu); \
    107     } while (0)
    108 #define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE()       \
    109     do { \
    110         if (RT_LIKELY(!(pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)))) \
    111         { /* probable */ } \
    112         else return iemRaiseDeviceNotAvailable(pVCpu); \
    113     } while (0)
    114 #define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE()  \
    115     do { \
    116         if (RT_LIKELY(!((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)))) \
    117         { /* probable */ } \
    118         else return iemRaiseDeviceNotAvailable(pVCpu); \
    119     } while (0)
    120 #define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
    121     do { \
    122         if (RT_LIKELY(!(pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES))) \
    123         { /* probable */ } \
    124         else return iemRaiseMathFault(pVCpu); \
    125     } while (0)
    126 #define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
    127     do { \
    128         /* Since none of the bits we compare from XCR0, CR4 and CR0 overlap, it can \
    129            be reduced to a single compare branch in the more probably code path. */ \
    130         if (RT_LIKELY(   (  (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) \
    131                           | (pVCpu->cpum.GstCtx.cr4     & X86_CR4_OSXSAVE) \
    132                           | (pVCpu->cpum.GstCtx.cr0     & X86_CR0_TS)) \
    133                       == (XSAVE_C_YMM | XSAVE_C_SSE | X86_CR4_OSXSAVE))) \
    134         { /* probable */ } \
    135         else if (   (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
    136                  || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE)) \
    137             return iemRaiseUndefinedOpcode(pVCpu); \
    138         else \
    139             return iemRaiseDeviceNotAvailable(pVCpu); \
    140     } while (0)
    141 AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR4_OSXSAVE));
    142 AssertCompile(!((XSAVE_C_YMM | XSAVE_C_SSE) & X86_CR0_TS));
    143 AssertCompile(!(X86_CR4_OSXSAVE & X86_CR0_TS));
    144 #define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
    145     do { \
    146         /* Since the CR4 and CR0 bits doesn't overlap, it can be reduced to a
    147            single compare branch in the more probable code path. */ \
    148         if (RT_LIKELY(  (  (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
    149                          | (pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
    150                       ==                             X86_CR4_OSFXSR)) \
    151         { /* likely */ } \
    152         else if (   (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
    153                  || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR)) \
    154             return iemRaiseUndefinedOpcode(pVCpu); \
    155         else \
    156             return iemRaiseDeviceNotAvailable(pVCpu); \
    157     } while (0)
    158 AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_CR4_OSFXSR));
    159 #define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
    160     do { \
    161         /* Since the two CR0 bits doesn't overlap with FSW.ES, this can be reduced to a
    162            single compare branch in the more probable code path. */ \
    163         if (RT_LIKELY(!(  (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
    164                         | (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES)))) \
    165         { /* probable */ } \
    166         else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
    167             return iemRaiseUndefinedOpcode(pVCpu); \
    168         else if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
    169             return iemRaiseDeviceNotAvailable(pVCpu); \
    170         else \
    171             return iemRaiseMathFault(pVCpu); \
    172     } while (0)
    173 AssertCompile(!((X86_CR0_EM | X86_CR0_TS) & X86_FSW_ES));
    174 /** @todo recomp: this one is slightly problematic as the recompiler doesn't
    175  *        count the CPL into the TB key.  However it is safe enough for now, as
    176  *        it calls iemRaiseGeneralProtectionFault0 directly so no calls will be
    177  *        emitted for it. */
    178 #define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
    179     do { \
    180         if (RT_LIKELY(IEM_GET_CPL(pVCpu) == 0)) { /* probable */ } \
    181         else return iemRaiseGeneralProtectionFault0(pVCpu); \
    182     } while (0)
    183 #define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
    184     do { \
    185         if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
    186         else return iemRaiseGeneralProtectionFault0(pVCpu); \
    187     } while (0)
    188 #define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
    189     do { \
    190         if (RT_LIKELY(   ((pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE) | IEM_GET_CPU_MODE(pVCpu)) \
    191                       == (X86_CR4_FSGSBASE | IEMMODE_64BIT))) \
    192         { /* probable */ } \
    193         else return iemRaiseUndefinedOpcode(pVCpu); \
    194     } while (0)
    195 AssertCompile(X86_CR4_FSGSBASE > UINT8_MAX);
    196 #define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
    197     do { \
    198         if (RT_LIKELY(IEM_IS_CANONICAL(a_u64Addr))) { /* likely */ } \
    199         else return iemRaiseGeneralProtectionFault0(pVCpu); \
    200     } while (0)
     56#define IEM_MC_ADVANCE_PC_AND_FINISH()                  return iemRegAddToPcAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
     57
     58
     59/** Sets PC, finishes the instruction and returns. */
     60#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8)              return iemRegPcRelativeJumpS8AndFinishClearingRF(pVCpu, (a_i8))
     61/** Sets PC, finishes the instruction and returns. */
     62#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16)            return iemRegPcRelativeJumpS16AndFinishClearingRF(pVCpu, (a_i16))
     63/** Sets PC, finishes the instruction and returns. */
     64#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32)            return iemRegPcRelativeJumpS32AndFinishClearingRF(pVCpu, (a_i32))
     65/** Sets PC, finishes the instruction and returns. */
     66#define IEM_MC_IND_JMP_U16_AND_FINISH(a_u16NewIP)       return iemRegPcJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP))
     67/** Sets PC, finishes the instruction and returns. */
     68#define IEM_MC_IND_JMP_U32_AND_FINISH(a_u32NewIP)       return iemRegPcJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP))
     69/** Sets PC, finishes the instruction and returns. */
     70#define IEM_MC_IND_JMP_U64_AND_FINISH(a_u64NewIP)       return iemRegPcJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP))
     71
     72/** Saves the return address, sets PC, finishes the instruction and returns. */
     73#define IEM_MC_REL_CALL_S16_AND_FINISH(a_i16)           return iemRegPcRelativeCallS16AndFinishClearingRF(pVCpu, (a_i16))
     74/** Saves the return address, sets PC, finishes the instruction and returns. */
     75#define IEM_MC_REL_CALL_S32_AND_FINISH(a_i32)           return iemRegPcRelativeCallS32AndFinishClearingRF(pVCpu, (a_i32))
     76/** Saves the return address, sets PC, finishes the instruction and returns. */
     77#define IEM_MC_REL_CALL_S64_AND_FINISH(a_i64)           return iemRegPcRelativeCallS64AndFinishClearingRF(pVCpu, (a_i64))
     78/** Saves the return address, sets PC, finishes the instruction and returns. */
     79#define IEM_MC_IND_CALL_U16_AND_FINISH(a_u16NewIP)      return iemRegPcIndirectCallU16AndFinishClearingRF((pVCpu), (a_u16NewIP))
     80/** Saves the return address, sets PC, finishes the instruction and returns. */
     81#define IEM_MC_IND_CALL_U32_AND_FINISH(a_u32NewIP)      return iemRegPcIndirectCallU32AndFinishClearingRF((pVCpu), (a_u32NewIP))
     82/** Saves the return address, sets PC, finishes the instruction and returns. */
     83#define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP)      return iemRegPcIndirectCallU64AndFinishClearingRF((pVCpu), (a_u64NewIP))
    20184
    20285
     
    20891#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg)       a_Type const a_Name = (a_Value)
    20992#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg)   a_Type const a_Name = &(a_Local)
    210 /** @note IEMAllInstPython.py duplicates the expansion. */
    211 #define IEM_MC_ARG_EFLAGS(a_Name, a_iArg)               uint32_t const a_Name = pVCpu->cpum.GstCtx.eflags.u
    212 /** @note IEMAllInstPython.py duplicates the expansion. */
    213 #define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
    214     uint32_t  a_Name  = pVCpu->cpum.GstCtx.eflags.u; \
    215     uint32_t *a_pName = &a_Name
    216 /** @note IEMAllInstPython.py duplicates the expansion. */
    217 #define IEM_MC_LOCAL_EFLAGS(a_Name)                     uint32_t a_Name = pVCpu->cpum.GstCtx.eflags.u
    218 #define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
    219    do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
    220 #define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) do { \
    221         AssertMsg((pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) == ((a_EFlags) & ~(a_fEflOutput)),  \
    222                   ("eflags.u=%#x (%#x) vs %s=%#x (%#x) - diff %#x (a_fEflOutput=%#x)\n", \
    223                    pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput), pVCpu->cpum.GstCtx.eflags.u, #a_EFlags, \
    224                    (a_EFlags) & ~(a_fEflOutput), (a_EFlags), \
    225                    (pVCpu->cpum.GstCtx.eflags.u & ~(a_fEflOutput)) ^ ((a_EFlags) & ~(a_fEflOutput)), a_fEflOutput)); \
    226         pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); \
    227         Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); \
    228     } while (0)
    229 #define IEM_MC_COMMIT_EFLAGS_OPT(a_EFlags)                               IEM_MC_COMMIT_EFLAGS(a_EFlags)
    230 #define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput)
    23193
    23294/** ASSUMES the source variable not used after this statement. */
     
    260122        (a_u128Dst).s.Hi = iemGRegFetchU64(pVCpu, (a_iGRegHi)); \
    261123    } while(0)
    262 #define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
    263         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
    264         (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
    265     } while (0)
    266 #define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
    267         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
    268         (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
    269     } while (0)
    270 #define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
    271         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
    272         (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
    273     } while (0)
    274 /** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
    275 #define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
    276         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
    277         (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
    278     } while (0)
    279 #define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
    280         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
    281         (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
    282     } while (0)
    283 /** @note Not for IOPL or IF testing or modification. */
    284 #define IEM_MC_FETCH_EFLAGS(a_EFlags)                   (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
    285 #define IEM_MC_FETCH_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_FETCH_EFLAGS(a_EFlags)
    286 #define IEM_MC_FETCH_EFLAGS_U8(a_EFlags)                (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u   /* (only LAHF) */
    287 #define IEM_MC_FETCH_FSW(a_u16Fsw)                      (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
    288 #define IEM_MC_FETCH_FCW(a_u16Fcw)                      (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
    289 
    290 #define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value)        *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
    291 #define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value)      *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
     124
     125/** @todo these zero-extends the result, which can be a bit confusing for
     126 *        IEM_MC_STORE_GREG_I32... */
    292127#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value)      *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
    293128#define IEM_MC_STORE_GREG_I32(a_iGReg, a_i32Value)      *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_i32Value) /* clear high bits. */
    294129#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value)      *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
    295130#define IEM_MC_STORE_GREG_I64(a_iGReg, a_i64Value)      *iemGRegRefI64(pVCpu, (a_iGReg)) = (a_i64Value)
    296 #define IEM_MC_STORE_GREG_U8_CONST                      IEM_MC_STORE_GREG_U8
    297 #define IEM_MC_STORE_GREG_U16_CONST                     IEM_MC_STORE_GREG_U16
    298131#define IEM_MC_STORE_GREG_U32_CONST                     IEM_MC_STORE_GREG_U32
    299132#define IEM_MC_STORE_GREG_U64_CONST                     IEM_MC_STORE_GREG_U64
     
    308141#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg)             *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
    309142
    310 /** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
    311 #define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
    312         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
    313         *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
    314     } while (0)
    315 #define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
    316         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
    317         *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
    318     } while (0)
    319 #define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
    320     do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
    321 
    322143
    323144#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg)           (a_pu8Dst)  = iemGRegRefU8( pVCpu, (a_iGReg))
     
    325146#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg)         (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
    326147#define IEM_MC_REF_GREG_U16_CONST(a_pu16Dst, a_iGReg)   (a_pu16Dst) = (uint16_t const *)iemGRegRefU16(pVCpu, (a_iGReg))
    327 /** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
    328  *        Use IEM_MC_CLEAR_HIGH_GREG_U64! */
     148/** @todo X86: User of IEM_MC_REF_GREG_U32 needs to clear the high bits on
     149 *        commit. Use IEM_MC_CLEAR_HIGH_GREG_U64! */
    329150#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg)         (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
    330151#define IEM_MC_REF_GREG_U32_CONST(a_pu32Dst, a_iGReg)   (a_pu32Dst) = (uint32_t const *)iemGRegRefU32(pVCpu, (a_iGReg))
     
    335156#define IEM_MC_REF_GREG_I64(a_pi64Dst, a_iGReg)         (a_pi64Dst) = (int64_t        *)iemGRegRefU64(pVCpu, (a_iGReg))
    336157#define IEM_MC_REF_GREG_I64_CONST(a_pi64Dst, a_iGReg)   (a_pi64Dst) = (int64_t  const *)iemGRegRefU64(pVCpu, (a_iGReg))
    337 /** @note Not for IOPL or IF testing or modification.
    338  * @note Must preserve any undefined bits, see CPUMX86EFLAGS! */
    339 #define IEM_MC_REF_EFLAGS(a_pEFlags)                    (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.uBoth
    340 #define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) IEM_MC_REF_EFLAGS(a_pEFlags)
    341 
    342 #define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value)        *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
     158
    343159#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
    344     do { \
    345         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    346         *pu32Reg += (a_u32Value); \
    347         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
     160    do { /* Clears the high 32 bits of the register. */ \
     161        uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
     162        *pu64Reg = (uint32_t)((uint32_t)*pu64Reg + (a_u32Value)); \
    348163    } while (0)
    349164#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value)        *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
    350165
    351 #define IEM_MC_SUB_GREG_U16(a_iGReg, a_u8Const)         *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u8Const)
    352166#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u8Const) \
    353     do { \
    354         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    355         *pu32Reg -= (a_u8Const); \
    356         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
     167    do { /* Clears the high 32 bits of the register. */ \
     168        uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
     169        *pu64Reg = (uint32_t)((uint32_t)*pu64Reg - (a_u8Const)); \
    357170    } while (0)
    358171#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u8Const)          *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u8Const)
     
    394207#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask)      do { (a_u32Local) |= (a_u32Mask); } while (0)
    395208
    396 #define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value)          *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
    397 #define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value)        *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
    398209#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
    399     do { \
    400         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    401         *pu32Reg &= (a_u32Value); \
    402         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
     210    do {  /* Clears the high 32 bits of the register. */ \
     211        uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
     212        *pu64Reg = (uint32_t)((uint32_t)*pu64Reg & (a_u32Value)); \
    403213    } while (0)
    404214#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value)        *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
    405215
    406 #define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value)           *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
    407 #define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value)         *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
    408216#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
    409     do { \
    410         uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
    411         *pu32Reg |= (a_u32Value); \
    412         pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
     217    do {  /* Clears the high 32 bits of the register. */ \
     218        uint64_t * const pu64Reg = iemGRegRefU64(pVCpu, (a_iGReg)); \
     219        *pu64Reg = (uint32_t)((uint32_t)*pu64Reg | (a_u32Value)); \
    413220    } while (0)
    414221#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value)         *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
     
    418225#define IEM_MC_BSWAP_LOCAL_U64(a_u64Local)              (a_u64Local) = RT_BSWAP_U64((a_u64Local));
    419226
    420 /** @note Not for IOPL or IF modification. */
    421 #define IEM_MC_SET_EFL_BIT(a_fBit)                      do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
    422 /** @note Not for IOPL or IF modification. */
    423 #define IEM_MC_CLEAR_EFL_BIT(a_fBit)                    do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
    424 /** @note Not for IOPL or IF modification. */
    425 #define IEM_MC_FLIP_EFL_BIT(a_fBit)                     do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
    426 
    427 #define IEM_MC_CLEAR_FSW_EX()   do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
    428 
    429 /** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
    430 #define IEM_MC_FPU_TO_MMX_MODE() do { \
    431         iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
    432         pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
    433         pVCpu->cpum.GstCtx.XState.x87.FTW  = 0xff; \
    434     } while (0)
    435 
    436 /** Switches the FPU state from MMX mode (FSW.TOS=0, FTW=0xffff). */
    437 #define IEM_MC_FPU_FROM_MMX_MODE() do { \
    438         iemFpuRotateStackSetTop(&pVCpu->cpum.GstCtx.XState.x87, 0); \
    439         pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
    440         pVCpu->cpum.GstCtx.XState.x87.FTW  = 0; \
    441     } while (0)
    442 
    443 #define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
    444     do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
    445 #define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg, a_iDWord) \
    446     do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[a_iDWord]; } while (0)
    447 #define IEM_MC_FETCH_MREG_U16(a_u16Value, a_iMReg, a_iWord) \
    448     do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[a_iWord]; } while (0)
    449 #define IEM_MC_FETCH_MREG_U8(a_u8Value, a_iMReg, a_iByte) \
    450     do { (a_u8Value)  = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[a_iByte]; } while (0)
    451 #define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
    452     do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
    453          pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
    454     } while (0)
    455 #define IEM_MC_STORE_MREG_U32(a_iMReg, a_iDword, a_u32Value) \
    456     do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[(a_iDword)] = (a_u32Value); \
    457          pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
    458     } while (0)
    459 #define IEM_MC_STORE_MREG_U16(a_iMReg, a_iWord, a_u16Value) \
    460     do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au16[(a_iWord)] = (a_u16Value); \
    461          pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
    462     } while (0)
    463 #define IEM_MC_STORE_MREG_U8(a_iMReg, a_iByte, a_u8Value) \
    464     do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au8[(a_iByte)] = (a_u8Value); \
    465          pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
    466     } while (0)
    467 #define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
    468     do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
    469          pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
    470     } while (0)
    471 #define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
    472         (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
    473 #define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
    474         (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
    475 #define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
    476         (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
    477 #define IEM_MC_MODIFIED_MREG(a_iMReg) \
    478     do { pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; } while (0)
    479 #define IEM_MC_MODIFIED_MREG_BY_REF(a_pu64Dst) \
    480     do { ((uint32_t *)(a_pu64Dst))[2] = 0xffff; } while (0)
    481 
    482 #define IEM_MC_CLEAR_XREG_U32_MASK(a_iXReg, a_bMask) \
    483     do { if ((a_bMask) & (1 << 0)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = 0; \
    484          if ((a_bMask) & (1 << 1)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[1] = 0; \
    485          if ((a_bMask) & (1 << 2)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[2] = 0; \
    486          if ((a_bMask) & (1 << 3)) pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[3] = 0; \
    487     } while (0)
    488 #define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
    489     do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
    490          (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
    491     } while (0)
    492 #define IEM_MC_FETCH_XREG_XMM(a_XmmValue, a_iXReg) \
    493     do { (a_XmmValue).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
    494          (a_XmmValue).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
    495     } while (0)
    496 #define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg, a_iQWord) \
    497     do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQWord)]; } while (0)
    498 #define IEM_MC_FETCH_XREG_R64(a_r64Value, a_iXReg, a_iQWord) \
    499     do { (a_r64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[(a_iQWord)]; } while (0)
    500 #define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg, a_iDWord) \
    501     do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDWord)]; } while (0)
    502 #define IEM_MC_FETCH_XREG_R32(a_r32Value, a_iXReg, a_iDWord) \
    503     do { (a_r32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[(a_iDWord)]; } while (0)
    504 #define IEM_MC_FETCH_XREG_U16(a_u16Value, a_iXReg, a_iWord) \
    505     do { (a_u16Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]; } while (0)
    506 #define IEM_MC_FETCH_XREG_U8( a_u8Value,  a_iXReg, a_iByte) \
    507     do { (a_u8Value)  = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)]; } while (0)
    508 #define IEM_MC_FETCH_XREG_PAIR_U128(a_Dst, a_iXReg1, a_iXReg2) \
    509     do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    510          (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    511          (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
    512          (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
    513     } while (0)
    514 #define IEM_MC_FETCH_XREG_PAIR_XMM(a_Dst, a_iXReg1, a_iXReg2) \
    515     do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    516          (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    517          (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
    518          (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
    519     } while (0)
    520 #define IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iXReg2) \
    521     do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    522          (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    523          (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
    524          (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
    525          (a_Dst).u64Rax        = pVCpu->cpum.GstCtx.rax; \
    526          (a_Dst).u64Rdx        = pVCpu->cpum.GstCtx.rdx; \
    527     } while (0)
    528 #define IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iXReg2) \
    529     do { (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    530          (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    531          (a_Dst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[0]; \
    532          (a_Dst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg2)].au64[1]; \
    533          (a_Dst).u64Rax        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
    534          (a_Dst).u64Rdx        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
    535     } while (0)
    536 #define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
    537     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
    538          pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
    539     } while (0)
    540 #define IEM_MC_STORE_XREG_XMM(a_iXReg, a_XmmValue) \
    541     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_XmmValue).au64[0]; \
    542          pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_XmmValue).au64[1]; \
    543     } while (0)
    544 #define IEM_MC_STORE_XREG_XMM_U32(a_iXReg, a_iDword, a_XmmValue) \
    545     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_XmmValue).au32[(a_iDword)]; } while (0)
    546 #define IEM_MC_STORE_XREG_XMM_U64(a_iXReg, a_iQword, a_XmmValue) \
    547     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_XmmValue).au64[(a_iQword)]; } while (0)
    548 #define IEM_MC_STORE_XREG_U64(a_iXReg, a_iQword, a_u64Value) \
    549     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[(a_iQword)] = (a_u64Value); } while (0)
    550 #define IEM_MC_STORE_XREG_U32(a_iXReg, a_iDword, a_u32Value) \
    551     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDword)] = (a_u32Value); } while (0)
    552 #define IEM_MC_STORE_XREG_U16(a_iXReg, a_iWord, a_u16Value) \
    553     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au16[(a_iWord)]  = (a_u16Value); } while (0)
    554 #define IEM_MC_STORE_XREG_U8(a_iXReg,  a_iByte, a_u8Value) \
    555     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au8[(a_iByte)]   = (a_u8Value);  } while (0)
    556 
    557 #define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
    558     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
    559          pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
    560     } while (0)
    561 
    562 #define IEM_MC_STORE_XREG_U32_U128(a_iXReg, a_iDwDst, a_u128Value, a_iDwSrc) \
    563     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[(a_iDwDst)] = (a_u128Value).au32[(a_iDwSrc)]; } while (0)
    564 #define IEM_MC_STORE_XREG_R32(a_iXReg, a_r32Value) \
    565     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0] = (a_r32Value); } while (0)
    566 #define IEM_MC_STORE_XREG_R64(a_iXReg, a_r64Value) \
    567     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0] = (a_r64Value); } while (0)
    568 #define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
    569     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
    570          pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
    571     } while (0)
    572 
    573 #define IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX(a_iXRegDst, a_u8Src) \
    574     do { uintptr_t const iXRegDstTmp    = (a_iXRegDst); \
    575          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[0]        = (a_u8Src); \
    576          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[1]        = (a_u8Src); \
    577          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[2]        = (a_u8Src); \
    578          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[3]        = (a_u8Src); \
    579          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[4]        = (a_u8Src); \
    580          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[5]        = (a_u8Src); \
    581          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[6]        = (a_u8Src); \
    582          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[7]        = (a_u8Src); \
    583          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[8]        = (a_u8Src); \
    584          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[9]        = (a_u8Src); \
    585          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[10]       = (a_u8Src); \
    586          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[11]       = (a_u8Src); \
    587          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[12]       = (a_u8Src); \
    588          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[13]       = (a_u8Src); \
    589          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[14]       = (a_u8Src); \
    590          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au8[15]       = (a_u8Src); \
    591          IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
    592     } while (0)
    593 #define IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX(a_iXRegDst, a_u16Src) \
    594     do { uintptr_t const iXRegDstTmp    = (a_iXRegDst); \
    595          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[0]       = (a_u16Src); \
    596          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[1]       = (a_u16Src); \
    597          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[2]       = (a_u16Src); \
    598          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[3]       = (a_u16Src); \
    599          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[4]       = (a_u16Src); \
    600          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[5]       = (a_u16Src); \
    601          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[6]       = (a_u16Src); \
    602          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au16[7]       = (a_u16Src); \
    603          IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
    604     } while (0)
    605 #define IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX(a_iXRegDst, a_u32Src) \
    606     do { uintptr_t const iXRegDstTmp    = (a_iXRegDst); \
    607          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[0]       = (a_u32Src); \
    608          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[1]       = (a_u32Src); \
    609          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[2]       = (a_u32Src); \
    610          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au32[3]       = (a_u32Src); \
    611          IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
    612     } while (0)
    613 #define IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX(a_iXRegDst, a_u64Src) \
    614     do { uintptr_t const iXRegDstTmp    = (a_iXRegDst); \
    615          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[0]       = (a_u64Src); \
    616          pVCpu->cpum.GstCtx.XState.x87.aXMM[iXRegDstTmp].au64[1]       = (a_u64Src); \
    617          IEM_MC_CLEAR_YREG_128_UP(iXRegDstTmp); \
    618     } while (0)
    619 
    620 #define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg)       \
    621     (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
    622 #define IEM_MC_REF_XREG_XMM(a_pXmmDst, a_iXReg)       \
    623     (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
    624 #define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
    625     (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
    626 #define IEM_MC_REF_XREG_XMM_CONST(a_pXmmDst, a_iXReg) \
    627     (a_pXmmDst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)])
    628 #define IEM_MC_REF_XREG_U32_CONST(a_pu32Dst, a_iXReg) \
    629     (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0])
    630 #define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
    631     (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
    632 #define IEM_MC_REF_XREG_R32_CONST(a_pr32Dst, a_iXReg) \
    633     (a_pr32Dst) = ((RTFLOAT32U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar32[0])
    634 #define IEM_MC_REF_XREG_R64_CONST(a_pr64Dst, a_iXReg) \
    635     (a_pr64Dst) = ((RTFLOAT64U const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].ar64[0])
    636 #define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
    637     do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
    638             = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
    639          pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
    640             = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
    641     } while (0)
    642 
    643 #define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
    644     do { uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    645          (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
    646     } while (0)
    647 #define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc, a_iQWord) \
    648     do { uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    649          if ((a_iQWord) < 2) \
    650             (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[(a_iQWord)]; \
    651          else \
    652             (a_u64Dst) = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[(a_iQWord) - 2]; \
    653     } while (0)
    654 #define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc, a_iDQword) \
    655     do { uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    656          if ((a_iDQword) == 0) \
    657          { \
    658             (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[0]; \
    659             (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegSrcTmp)].au64[1]; \
    660          } \
    661          else \
    662          { \
    663             (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[0]; \
    664             (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegSrcTmp)].au64[1]; \
    665          } \
    666     } while (0)
    667 #define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
    668     do { uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    669          (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
    670          (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
    671          (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
    672          (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
    673     } while (0)
    674 #define IEM_MC_FETCH_YREG_YMM(a_uYmmDst, a_iYRegSrc) \
    675     do { uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    676          (a_uYmmDst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
    677          (a_uYmmDst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
    678          (a_uYmmDst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
    679          (a_uYmmDst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
    680     } while (0)
    681 #define IEM_MC_FETCH_YREG_PAIR_YMM(a_uYmmDst, a_iYRegSrc1, a_iYRegSrc2) \
    682     do { uintptr_t const iYRegSrc1Tmp    = (a_iYRegSrc1); \
    683          uintptr_t const iYRegSrc2Tmp    = (a_iYRegSrc2); \
    684          (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc1Tmp].au64[0]; \
    685          (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc1Tmp].au64[1]; \
    686          (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc1Tmp].au64[0]; \
    687          (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc1Tmp].au64[1]; \
    688          (a_uYmmDst).uSrc2.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc2Tmp].au64[0]; \
    689          (a_uYmmDst).uSrc2.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc2Tmp].au64[1]; \
    690          (a_uYmmDst).uSrc2.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc2Tmp].au64[0]; \
    691          (a_uYmmDst).uSrc2.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrc2Tmp].au64[1]; \
    692     } while (0)
    693 
    694 #define IEM_MC_STORE_YREG_U128(a_iYRegDst, a_iDQword, a_u128Value) \
    695     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    696          if ((a_iDQword) == 0) \
    697          { \
    698             pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
    699             pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
    700          } \
    701          else \
    702          { \
    703             pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[0] = (a_u128Value).au64[0]; \
    704             pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[1] = (a_u128Value).au64[1]; \
    705          } \
    706     } while (0)
    707 
    708 #define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
    709 #define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
    710     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    711          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0]       = (a_u32Src); \
    712          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1]       = 0; \
    713          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = 0; \
    714          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    715          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    716          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    717     } while (0)
    718 #define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
    719     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    720          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = (a_u64Src); \
    721          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = 0; \
    722          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    723          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    724          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    725     } while (0)
    726 #define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
    727     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    728          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = (a_u128Src).au64[0]; \
    729          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = (a_u128Src).au64[1]; \
    730          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    731          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    732          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    733     } while (0)
    734 #define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
    735     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    736          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = (a_u256Src).au64[0]; \
    737          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = (a_u256Src).au64[1]; \
    738          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
    739          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
    740          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    741     } while (0)
    742 #define IEM_MC_STORE_YREG_YMM_ZX_VLMAX(a_iYRegDst, a_uYmmSrc) \
    743     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    744          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = (a_uYmmSrc).au64[0]; \
    745          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = (a_uYmmSrc).au64[1]; \
    746          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_uYmmSrc).au64[2]; \
    747          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_uYmmSrc).au64[3]; \
    748          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    749     } while (0)
    750 #define IEM_MC_STORE_YREG_U32_U256(a_iYRegDst, a_iDwDst, a_u256Value, a_iDwSrc) \
    751     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    752          if ((a_iDwDst) < 4) \
    753             pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au32[(a_iDwDst)] = (a_u256Value).au32[(a_iDwSrc)]; \
    754          else \
    755             pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au32[(a_iDwDst) - 4] = (a_u256Value).au32[(a_iDwSrc)]; \
    756     } while (0)
    757 #define IEM_MC_STORE_YREG_U64_U256(a_iYRegDst, a_iQwDst, a_u256Value, a_iQwSrc) \
    758     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    759          if ((a_iQwDst) < 2) \
    760             pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[(a_iQwDst)] = (a_u256Value).au64[(a_iQwSrc)]; \
    761          else \
    762             pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[(a_iQwDst) - 2] = (a_u256Value).au64[(a_iQwSrc)]; \
    763     } while (0)
    764 #define IEM_MC_STORE_YREG_U64(a_iYRegDst, a_iQword, a_u64Value) \
    765     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    766          if ((a_iQword) < 2) \
    767             pVCpu->cpum.GstCtx.XState.x87.aXMM[(iYRegDstTmp)].au64[(a_iQword)] = (a_u64Value); \
    768          else \
    769             pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[(iYRegDstTmp)].au64[(a_iQword) - 2] = (a_u64Value); \
    770     } while (0)
    771 
    772 #define IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX(a_iYRegDst, a_u8Src) \
    773     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    774          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[0]        = (a_u8Src); \
    775          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[1]        = (a_u8Src); \
    776          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[2]        = (a_u8Src); \
    777          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[3]        = (a_u8Src); \
    778          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[4]        = (a_u8Src); \
    779          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[5]        = (a_u8Src); \
    780          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[6]        = (a_u8Src); \
    781          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[7]        = (a_u8Src); \
    782          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[8]        = (a_u8Src); \
    783          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[9]        = (a_u8Src); \
    784          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[10]       = (a_u8Src); \
    785          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[11]       = (a_u8Src); \
    786          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[12]       = (a_u8Src); \
    787          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[13]       = (a_u8Src); \
    788          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[14]       = (a_u8Src); \
    789          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au8[15]       = (a_u8Src); \
    790          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[0]  = (a_u8Src); \
    791          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[1]  = (a_u8Src); \
    792          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[2]  = (a_u8Src); \
    793          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[3]  = (a_u8Src); \
    794          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[4]  = (a_u8Src); \
    795          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[5]  = (a_u8Src); \
    796          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[6]  = (a_u8Src); \
    797          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[7]  = (a_u8Src); \
    798          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[8]  = (a_u8Src); \
    799          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[9]  = (a_u8Src); \
    800          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[10] = (a_u8Src); \
    801          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[11] = (a_u8Src); \
    802          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[12] = (a_u8Src); \
    803          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[13] = (a_u8Src); \
    804          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[14] = (a_u8Src); \
    805          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au8[15] = (a_u8Src); \
    806          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    807     } while (0)
    808 #define IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX(a_iYRegDst, a_u16Src) \
    809     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    810          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[0]       = (a_u16Src); \
    811          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[1]       = (a_u16Src); \
    812          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[2]       = (a_u16Src); \
    813          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[3]       = (a_u16Src); \
    814          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[4]       = (a_u16Src); \
    815          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[5]       = (a_u16Src); \
    816          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[6]       = (a_u16Src); \
    817          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au16[7]       = (a_u16Src); \
    818          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[0] = (a_u16Src); \
    819          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[1] = (a_u16Src); \
    820          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[2] = (a_u16Src); \
    821          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[3] = (a_u16Src); \
    822          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[4] = (a_u16Src); \
    823          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[5] = (a_u16Src); \
    824          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[6] = (a_u16Src); \
    825          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au16[7] = (a_u16Src); \
    826          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    827     } while (0)
    828 #define IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
    829     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    830          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0]       = (a_u32Src); \
    831          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1]       = (a_u32Src); \
    832          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[2]       = (a_u32Src); \
    833          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[3]       = (a_u32Src); \
    834          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[0] = (a_u32Src); \
    835          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[1] = (a_u32Src); \
    836          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[2] = (a_u32Src); \
    837          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au32[3] = (a_u32Src); \
    838          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    839     } while (0)
    840 #define IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
    841     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    842          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = (a_u64Src); \
    843          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = (a_u64Src); \
    844          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u64Src); \
    845          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u64Src); \
    846          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    847     } while (0)
    848 #define IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
    849     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    850          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = (a_u128Src).au64[0]; \
    851          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = (a_u128Src).au64[1]; \
    852          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
    853          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
    854          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    855     } while (0)
    856 
    857 #define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg)       \
    858     (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
    859 #define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
    860     (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
    861 #define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
    862     (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
    863 #define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
    864     do { uintptr_t const iYRegTmp   = (a_iYReg); \
    865          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
    866          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
    867          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
    868     } while (0)
    869 
    870 #define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
    871     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    872          uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    873          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
    874          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
    875          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
    876          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
    877          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    878     } while (0)
    879 #define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
    880     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    881          uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    882          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
    883          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
    884          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    885          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    886          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    887     } while (0)
    888 #define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
    889     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    890          uintptr_t const iYRegSrcTmp    = (a_iYRegSrc); \
    891          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
    892          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = 0; \
    893          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    894          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    895          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    896     } while (0)
    897 
    898 #define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
    899     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    900          uintptr_t const iYRegSrc32Tmp  = (a_iYRegSrc32); \
    901          uintptr_t const iYRegSrcHxTmp  = (a_iYRegSrcHx); \
    902          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
    903          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
    904          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
    905          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    906          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    907          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    908     } while (0)
    909 #define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
    910     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    911          uintptr_t const iYRegSrc64Tmp  = (a_iYRegSrc64); \
    912          uintptr_t const iYRegSrcHxTmp  = (a_iYRegSrcHx); \
    913          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
    914          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
    915          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    916          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    917          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    918     } while (0)
    919 #define IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovlhps */ \
    920     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    921          uintptr_t const iYRegSrc64Tmp  = (a_iYRegSrc64); \
    922          uintptr_t const iYRegSrcHxTmp  = (a_iYRegSrcHx); \
    923          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
    924          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
    925          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    926          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    927          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    928     } while (0)
    929 #define IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
    930     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    931          uintptr_t const iYRegSrc64Tmp  = (a_iYRegSrc64); \
    932          uintptr_t const iYRegSrcHxTmp  = (a_iYRegSrcHx); \
    933          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
    934          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
    935          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    936          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    937          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    938     } while (0)
    939 #define IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX(a_iYRegDst, a_iYRegSrcHx, a_u64Local) \
    940     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    941          uintptr_t const iYRegSrcHxTmp  = (a_iYRegSrcHx); \
    942          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[0]; \
    943          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = (a_u64Local); \
    944          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    945          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    946          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    947     } while (0)
    948 #define IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
    949     do { uintptr_t const iYRegDstTmp    = (a_iYRegDst); \
    950          uintptr_t const iYRegSrcHxTmp  = (a_iYRegSrcHx); \
    951          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0]       = (a_u64Local); \
    952          pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1]       = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
    953          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
    954          pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
    955          IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
    956     } while (0)
    957 
    958 #define IEM_MC_CLEAR_ZREG_256_UP(a_iYReg) \
    959     do { IEM_MC_INT_CLEAR_ZMM_256_UP(a_iYReg); } while (0)
    960227
    961228#define IEM_MC_FETCH_MEM_SEG_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
     
    1031298#define IEM_MC_FETCH_MEM_SEG_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
    1032299    ((a_r64Dst).u = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
    1033 #define IEM_MC_FETCH_MEM_SEG_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
    1034     iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
    1035 #define IEM_MC_FETCH_MEM_SEG_D80(a_d80Dst, a_iSeg, a_GCPtrMem) \
    1036     iemMemFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_iSeg), (a_GCPtrMem))
    1037300
    1038301#define IEM_MC_FETCH_MEM_FLAT_R32(a_r32Dst, a_GCPtrMem) \
     
    1040303#define IEM_MC_FETCH_MEM_FLAT_R64(a_r64Dst, a_GCPtrMem) \
    1041304    ((a_r64Dst).u = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem)))
    1042 #define IEM_MC_FETCH_MEM_FLAT_R80(a_r80Dst, a_GCPtrMem) \
    1043     iemMemFlatFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_GCPtrMem))
    1044 #define IEM_MC_FETCH_MEM_FLAT_D80(a_d80Dst, a_GCPtrMem) \
    1045     iemMemFlatFetchDataD80Jmp(pVCpu, &(a_d80Dst), (a_GCPtrMem))
    1046305
    1047306#define IEM_MC_FETCH_MEM_SEG_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
     
    1052311    iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
    1053312
    1054 #define IEM_MC_FETCH_MEM_SEG_XMM(a_XmmDst, a_iSeg, a_GCPtrMem) \
    1055     iemMemFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
    1056 #define IEM_MC_FETCH_MEM_SEG_XMM_NO_AC(a_XmmDst, a_iSeg, a_GCPtrMem) \
    1057     iemMemFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
    1058 #define IEM_MC_FETCH_MEM_SEG_XMM_ALIGN_SSE(a_XmmDst, a_iSeg, a_GCPtrMem) \
    1059     iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_iSeg), (a_GCPtrMem))
    1060 
    1061313#define IEM_MC_FETCH_MEM_FLAT_U128(a_u128Dst, a_GCPtrMem) \
    1062314    iemMemFlatFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
     
    1066318    iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_GCPtrMem))
    1067319
    1068 #define IEM_MC_FETCH_MEM_FLAT_XMM(a_XmmDst, a_GCPtrMem) \
    1069     iemMemFlatFetchDataU128Jmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
    1070 #define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC(a_XmmDst, a_GCPtrMem) \
    1071     iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
    1072 #define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE(a_XmmDst, a_GCPtrMem) \
    1073     iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_XmmDst).uXmm, (a_GCPtrMem))
    1074 
    1075 #define IEM_MC_FETCH_MEM_SEG_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1076         iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
    1077         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1078         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1079     } while (0)
    1080 #define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    1081         iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
    1082         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1083         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1084     } while (0)
    1085 
    1086 #define IEM_MC_FETCH_MEM_SEG_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1087         iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
    1088         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1089         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1090     } while (0)
    1091 
    1092 #define IEM_MC_FETCH_MEM_SEG_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1093         iemMemFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_iSeg2), (a_GCPtrMem2)); \
    1094         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1095         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1096     } while (0)
    1097 
    1098 #define IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    1099         iemMemFlatFetchDataU128AlignedSseJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
    1100         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1101         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1102     } while (0)
    1103 
    1104 #define IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC_AND_XREG_XMM(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    1105         iemMemFlatFetchDataU128NoAcJmp(pVCpu, &(a_Dst).uSrc2.uXmm, (a_GCPtrMem2)); \
    1106         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1107         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1108     } while (0)
    1109 
    1110 #define IEM_MC_FETCH_MEM_SEG_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_iSeg2, a_GCPtrMem2) do {  \
    1111         (a_Dst).uSrc2.uXmm.au64[0] = 0; \
    1112         (a_Dst).uSrc2.uXmm.au64[1] = 0; \
    1113         (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
    1114         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1115         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1116     } while (0)
    1117 #define IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM(a_Dst, a_iXReg1, a_iDWord2, a_GCPtrMem2) do { \
    1118         (a_Dst).uSrc2.uXmm.au64[0] = 0; \
    1119         (a_Dst).uSrc2.uXmm.au64[1] = 0; \
    1120         (a_Dst).uSrc2.uXmm.au32[(a_iDWord2)] = iemMemFlatFetchDataU32Jmp(pVCpu, (a_GCPtrMem2)); \
    1121         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1122         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1123     } while (0)
    1124 
    1125 #define IEM_MC_FETCH_MEM_SEG_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_iSeg2, a_GCPtrMem2) do {  \
    1126         (a_Dst).uSrc2.uXmm.au64[!(a_iQWord2)] = 0; \
    1127         (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)]  = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg2), (a_GCPtrMem2)); \
    1128         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1129         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1130     } while (0)
    1131 #define IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM(a_Dst, a_iXReg1, a_iQWord2, a_GCPtrMem2) do {  \
    1132         (a_Dst).uSrc2.uXmm.au64[1] = 0; \
    1133         (a_Dst).uSrc2.uXmm.au64[(a_iQWord2)]  = iemMemFlatFetchDataU64Jmp(pVCpu, (a_GCPtrMem2)); \
    1134         (a_Dst).uSrc1.uXmm.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1135         (a_Dst).uSrc1.uXmm.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1136     } while (0)
    1137 
    1138 
    1139 #define IEM_MC_FETCH_MEM_SEG_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1140         iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
    1141         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1142         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1143         (a_Dst).u64Rax        = pVCpu->cpum.GstCtx.rax; \
    1144         (a_Dst).u64Rdx        = pVCpu->cpum.GstCtx.rdx; \
    1145     } while (0)
    1146 #define IEM_MC_FETCH_MEM_SEG_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_iSeg2, a_GCPtrMem2) do { \
    1147         iemMemFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_iSeg2), (a_GCPtrMem2)); \
    1148         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1149         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1150         (a_Dst).u64Rax        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
    1151         (a_Dst).u64Rdx        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
    1152     } while (0)
    1153 
    1154 #define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    1155         iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
    1156         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1157         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1158         (a_Dst).u64Rax        = pVCpu->cpum.GstCtx.rax; \
    1159         (a_Dst).u64Rdx        = pVCpu->cpum.GstCtx.rdx; \
    1160     } while (0)
    1161 #define IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64(a_Dst, a_iXReg1, a_GCPtrMem2) do { \
    1162         iemMemFlatFetchDataU128Jmp(pVCpu, &(a_Dst).uSrc2, (a_GCPtrMem2)); \
    1163         (a_Dst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[0]; \
    1164         (a_Dst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg1)].au64[1]; \
    1165         (a_Dst).u64Rax        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.eax; \
    1166         (a_Dst).u64Rdx        = (int64_t)(int32_t)pVCpu->cpum.GstCtx.edx; \
    1167     } while (0)
    1168 
    1169 
    1170320#define IEM_MC_FETCH_MEM_SEG_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
    1171321    iemMemFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
     
    1175325    iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
    1176326
    1177 #define IEM_MC_FETCH_MEM_SEG_YMM(a_YmmDst, a_iSeg, a_GCPtrMem) \
    1178     iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
    1179 #define IEM_MC_FETCH_MEM_SEG_YMM_NO_AC(a_YmmDst, a_iSeg, a_GCPtrMem) \
    1180     iemMemFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
    1181 #define IEM_MC_FETCH_MEM_SEG_YMM_ALIGN_AVX(a_YmmDst, a_iSeg, a_GCPtrMem) \
    1182     iemMemFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_iSeg), (a_GCPtrMem))
    1183 
    1184 #define IEM_MC_FETCH_MEM_SEG_YMM_NO_AC_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_iSeg2, a_GCPtrMem2) do { \
    1185         uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \
    1186         iemMemFetchDataU256NoAcJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_iSeg2), (a_GCPtrMem2)); \
    1187         (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[0]; \
    1188         (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[1]; \
    1189         (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[0]; \
    1190         (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[1]; \
    1191     } while (0)
    1192 
    1193327#define IEM_MC_FETCH_MEM_FLAT_U256(a_u256Dst, a_GCPtrMem) \
    1194328    iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
     
    1197331#define IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX(a_u256Dst, a_GCPtrMem) \
    1198332    iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_u256Dst), (a_GCPtrMem))
    1199 
    1200 #define IEM_MC_FETCH_MEM_FLAT_YMM(a_YmmDst, a_GCPtrMem) \
    1201     iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
    1202 #define IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC(a_YmmDst, a_GCPtrMem) \
    1203     iemMemFlatFetchDataU256NoAcJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
    1204 #define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX(a_YmmDst, a_GCPtrMem) \
    1205     iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_YmmDst).ymm, (a_GCPtrMem))
    1206 
    1207 #define IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM(a_uYmmDst, a_iYRegSrc1, a_GCPtrMem2) do { \
    1208         uintptr_t const a_iYRegSrc1Tmp = (a_iYRegSrc1); \
    1209         iemMemFlatFetchDataU256AlignedAvxJmp(pVCpu, &(a_uYmmDst).uSrc2.ymm, (a_GCPtrMem2)); \
    1210         (a_uYmmDst).uSrc1.au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[0]; \
    1211         (a_uYmmDst).uSrc1.au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[a_iYRegSrc1Tmp].au64[1]; \
    1212         (a_uYmmDst).uSrc1.au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[0]; \
    1213         (a_uYmmDst).uSrc1.au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[a_iYRegSrc1Tmp].au64[1]; \
    1214     } while (0)
    1215 
    1216333
    1217334
     
    1310427#define IEM_MC_STORE_MEM_BY_REF_R32_NEG_QNAN(a_pr32Dst)         (a_pr32Dst)->u = UINT32_C(0xffc00000)
    1311428#define IEM_MC_STORE_MEM_BY_REF_R64_NEG_QNAN(a_pr64Dst)         (a_pr64Dst)->u = UINT64_C(0xfff8000000000000)
    1312 #define IEM_MC_STORE_MEM_BY_REF_R80_NEG_QNAN(a_pr80Dst) \
    1313     do { \
    1314         (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
    1315         (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
    1316     } while (0)
    1317 #define IEM_MC_STORE_MEM_BY_REF_D80_INDEF(a_pd80Dst) \
    1318     do { \
    1319         (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
    1320         (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
    1321     } while (0)
    1322429
    1323430#define IEM_MC_STORE_MEM_SEG_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
     
    1325432#define IEM_MC_STORE_MEM_SEG_U128_NO_AC(a_iSeg, a_GCPtrMem, a_u128Value) \
    1326433    iemMemStoreDataU128NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u128Value))
    1327 #define IEM_MC_STORE_MEM_SEG_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
    1328     iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
    1329434
    1330435#define IEM_MC_STORE_MEM_FLAT_U128(a_GCPtrMem, a_u128Value) \
     
    1332437#define IEM_MC_STORE_MEM_FLAT_U128_NO_AC(a_GCPtrMem, a_u128Value) \
    1333438    iemMemFlatStoreDataU128NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u128Value))
    1334 #define IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE(a_GCPtrMem, a_u128Value) \
    1335     iemMemStoreDataU128AlignedSseJmp(pVCpu, UINT8_MAX, (a_GCPtrMem), (a_u128Value))
    1336439
    1337440#define IEM_MC_STORE_MEM_SEG_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
     
    1339442#define IEM_MC_STORE_MEM_SEG_U256_NO_AC(a_iSeg, a_GCPtrMem, a_u256Value) \
    1340443    iemMemStoreDataU256NoAcJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    1341 #define IEM_MC_STORE_MEM_SEG_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
    1342     iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
    1343444
    1344445#define IEM_MC_STORE_MEM_FLAT_U256(a_GCPtrMem, a_u256Value) \
     
    1346447#define IEM_MC_STORE_MEM_FLAT_U256_NO_AC(a_GCPtrMem, a_u256Value) \
    1347448    iemMemFlatStoreDataU256NoAcJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
    1348 #define IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX(a_GCPtrMem, a_u256Value) \
    1349     iemMemFlatStoreDataU256AlignedAvxJmp(pVCpu, (a_GCPtrMem), &(a_u256Value))
    1350 
    1351 /* Regular stack push and pop: */
    1352 #define IEM_MC_PUSH_U16(a_u16Value)             iemMemStackPushU16Jmp(pVCpu, (a_u16Value))
    1353 #define IEM_MC_PUSH_U32(a_u32Value)             iemMemStackPushU32Jmp(pVCpu, (a_u32Value))
    1354 #define IEM_MC_PUSH_U32_SREG(a_uSegVal)         iemMemStackPushU32SRegJmp(pVCpu, (a_uSegVal))
    1355 #define IEM_MC_PUSH_U64(a_u64Value)             iemMemStackPushU64Jmp(pVCpu, (a_u64Value))
    1356 
    1357 #define IEM_MC_POP_GREG_U16(a_iGReg)            iemMemStackPopGRegU16Jmp(pVCpu, (a_iGReg))
    1358 #define IEM_MC_POP_GREG_U32(a_iGReg)            iemMemStackPopGRegU32Jmp(pVCpu, (a_iGReg))
    1359 #define IEM_MC_POP_GREG_U64(a_iGReg)            iemMemStackPopGRegU64Jmp(pVCpu, (a_iGReg))
    1360 
    1361 /* 32-bit flat stack push and pop: */
    1362 #define IEM_MC_FLAT32_PUSH_U16(a_u16Value)      iemMemFlat32StackPushU16Jmp(pVCpu, (a_u16Value))
    1363 #define IEM_MC_FLAT32_PUSH_U32(a_u32Value)      iemMemFlat32StackPushU32Jmp(pVCpu, (a_u32Value))
    1364 #define IEM_MC_FLAT32_PUSH_U32_SREG(a_uSegVal)  iemMemFlat32StackPushU32SRegJmp(pVCpu, (a_uSegVal))
    1365 
    1366 #define IEM_MC_FLAT32_POP_GREG_U16(a_iGReg)     iemMemFlat32StackPopGRegU16Jmp(pVCpu, a_iGReg))
    1367 #define IEM_MC_FLAT32_POP_GREG_U32(a_iGReg)     iemMemFlat32StackPopGRegU32Jmp(pVCpu, a_iGReg))
    1368 
    1369 /* 64-bit flat stack push and pop: */
    1370 #define IEM_MC_FLAT64_PUSH_U16(a_u16Value)      iemMemFlat64StackPushU16Jmp(pVCpu, (a_u16Value))
    1371 #define IEM_MC_FLAT64_PUSH_U64(a_u64Value)      iemMemFlat64StackPushU64Jmp(pVCpu, (a_u64Value))
    1372 
    1373 #define IEM_MC_FLAT64_POP_GREG_U16(a_iGReg)     iemMemFlat64StackPopGRegU16Jmp(pVCpu, (a_iGReg))
    1374 #define IEM_MC_FLAT64_POP_GREG_U64(a_iGReg)     iemMemFlat64StackPopGRegU64Jmp(pVCpu, (a_iGReg))
    1375449
    1376450
     
    19511025
    19521026
    1953 /* misc */
    1954 
    1955 /**
    1956  * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
    1957  *
    1958  * @param[out] a_pr80Mem    Where to return the pointer to the mapping.
    1959  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1960  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1961  * @param[in]  a_GCPtrMem   The memory address.
    1962  * @remarks Will return/long jump on errors.
    1963  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1964  */
    1965 #define IEM_MC_MEM_SEG_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1966     (a_pr80Mem) = iemMemMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1967 
    1968 /**
    1969  * Maps guest memory for 80-bit float writeonly direct (or bounce) buffer acccess.
    1970  *
    1971  * @param[out] a_pr80Mem    Where to return the pointer to the mapping.
    1972  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1973  * @param[in]  a_GCPtrMem   The memory address.
    1974  * @remarks Will return/long jump on errors.
    1975  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1976  */
    1977 #define IEM_MC_MEM_FLAT_MAP_R80_WO(a_pr80Mem, a_bUnmapInfo, a_GCPtrMem) \
    1978     (a_pr80Mem) = iemMemFlatMapDataR80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    1979 
    1980 
    1981 /**
    1982  * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
    1983  *
    1984  * @param[out] a_pd80Mem    Where to return the pointer to the mapping.
    1985  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1986  * @param[in]  a_iSeg       The segment register to access via. No UINT8_MAX!
    1987  * @param[in]  a_GCPtrMem   The memory address.
    1988  * @remarks Will return/long jump on errors.
    1989  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    1990  */
    1991 #define IEM_MC_MEM_SEG_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_iSeg, a_GCPtrMem) \
    1992     (a_pd80Mem) = iemMemMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_iSeg), (a_GCPtrMem))
    1993 
    1994 /**
    1995  * Maps guest memory for 80-bit BCD writeonly direct (or bounce) buffer acccess.
    1996  *
    1997  * @param[out] a_pd80Mem    Where to return the pointer to the mapping.
    1998  * @param[out] a_bUnmapInfo Where to return umapping instructions. uint8_t.
    1999  * @param[in]  a_GCPtrMem   The memory address.
    2000  * @remarks Will return/long jump on errors.
    2001  * @see     IEM_MC_MEM_COMMIT_AND_UNMAP_WO
    2002  */
    2003 #define IEM_MC_MEM_FLAT_MAP_D80_WO(a_pd80Mem, a_bUnmapInfo, a_GCPtrMem) \
    2004     (a_pd80Mem) = iemMemFlatMapDataD80WoJmp(pVCpu, &(a_bUnmapInfo), (a_GCPtrMem))
    2005 
    2006 
    2007 
    20081027/* commit + unmap */
    20091028
     
    20331052
    20341053
    2035 /** Commits the memory and unmaps the guest memory unless the FPU status word
    2036  * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
    2037  * that would cause FLD not to store.
    2038  *
    2039  * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
    2040  * store, while \#P will not.
    2041  *
    2042  * @remarks     May in theory return - for now.
    2043  * @note        Implictly frees both the a_bMapInfo and a_u16FSW variables.
    2044  */
    2045 #define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE_WO(a_bMapInfo, a_u16FSW) do { \
    2046         if (   !(a_u16FSW & X86_FSW_ES) \
    2047             || !(  (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
    2048                  & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
    2049             iemMemCommitAndUnmapWoJmp(pVCpu, a_bMapInfo); \
    2050         else \
    2051             iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo); \
    2052     } while (0)
    2053 
    20541054/** Rolls back (conceptually only, assumes no writes) and unmaps the guest memory.
    20551055 * @note        Implictly frees the a_bMapInfo variable. */
    20561056#define IEM_MC_MEM_ROLLBACK_AND_UNMAP_WO(a_bMapInfo)        iemMemRollbackAndUnmapWo(pVCpu, a_bMapInfo)
    20571057
    2058 
    2059 
    2060 /** Calculate efficient address from R/M. */
    2061 #define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
    2062     ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (a_bRm), (a_cbImmAndRspOffset)))
    20631058
    20641059
     
    22941289
    22951290
    2296 /**
    2297  * Calls a FPU assembly implementation taking one visible argument.
    2298  *
    2299  * @param   a_pfnAImpl      Pointer to the assembly FPU routine.
    2300  * @param   a0              The first extra argument.
    2301  */
    2302 #define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
    2303     do { \
    2304         a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
    2305     } while (0)
    2306 
    2307 /**
    2308  * Calls a FPU assembly implementation taking two visible arguments.
    2309  *
    2310  * @param   a_pfnAImpl      Pointer to the assembly FPU routine.
    2311  * @param   a0              The first extra argument.
    2312  * @param   a1              The second extra argument.
    2313  */
    2314 #define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
    2315     do { \
    2316         a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
    2317     } while (0)
    2318 
    2319 /**
    2320  * Calls a FPU assembly implementation taking three visible arguments.
    2321  *
    2322  * @param   a_pfnAImpl      Pointer to the assembly FPU routine.
    2323  * @param   a0              The first extra argument.
    2324  * @param   a1              The second extra argument.
    2325  * @param   a2              The third extra argument.
    2326  */
    2327 #define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
    2328     do { \
    2329         a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
    2330     } while (0)
    2331 
    2332 #define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
    2333     do { \
    2334         (a_FpuData).FSW       = (a_FSW); \
    2335         (a_FpuData).r80Result = *(a_pr80Value); \
    2336     } while (0)
    2337 
    2338 /** Pushes FPU result onto the stack. */
    2339 #define IEM_MC_PUSH_FPU_RESULT(a_FpuData, a_uFpuOpcode) \
    2340     iemFpuPushResult(pVCpu, &a_FpuData, a_uFpuOpcode)
    2341 /** Pushes FPU result onto the stack and sets the FPUDP. */
    2342 #define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2343     iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2344 
    2345 /** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
    2346 #define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo, a_uFpuOpcode) \
    2347     iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo, a_uFpuOpcode)
    2348 
    2349 /** Stores FPU result in a stack register. */
    2350 #define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg, a_uFpuOpcode) \
    2351     iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
    2352 /** Stores FPU result in a stack register and pops the stack. */
    2353 #define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg, a_uFpuOpcode) \
    2354     iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg, a_uFpuOpcode)
    2355 /** Stores FPU result in a stack register and sets the FPUDP. */
    2356 #define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2357     iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2358 /** Stores FPU result in a stack register, sets the FPUDP, and pops the
    2359  *  stack. */
    2360 #define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2361     iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2362 
    2363 /** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
    2364 #define IEM_MC_UPDATE_FPU_OPCODE_IP(a_uFpuOpcode) \
    2365     iemFpuUpdateOpcodeAndIp(pVCpu, a_uFpuOpcode)
    2366 /** Free a stack register (for FFREE and FFREEP). */
    2367 #define IEM_MC_FPU_STACK_FREE(a_iStReg) \
    2368     iemFpuStackFree(pVCpu, a_iStReg)
    2369 /** Increment the FPU stack pointer. */
    2370 #define IEM_MC_FPU_STACK_INC_TOP() \
    2371     iemFpuStackIncTop(pVCpu)
    2372 /** Decrement the FPU stack pointer. */
    2373 #define IEM_MC_FPU_STACK_DEC_TOP() \
    2374     iemFpuStackDecTop(pVCpu)
    2375 
    2376 /** Updates the FSW, FOP, FPUIP, and FPUCS. */
    2377 #define IEM_MC_UPDATE_FSW(a_u16FSW, a_uFpuOpcode) \
    2378     iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
    2379 /** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
    2380 #define IEM_MC_UPDATE_FSW_CONST(a_u16FSW, a_uFpuOpcode) \
    2381     iemFpuUpdateFSW(pVCpu, a_u16FSW, a_uFpuOpcode)
    2382 /** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
    2383 #define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2384     iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2385 /** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
    2386 #define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW, a_uFpuOpcode) \
    2387     iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW, a_uFpuOpcode)
    2388 /** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
    2389  *  stack. */
    2390 #define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2391     iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2392 /** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
    2393 #define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW, a_uFpuOpcode) \
    2394     iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW, a_uFpuOpcode)
    2395 
    2396 /** Raises a FPU stack underflow exception.  Sets FPUIP, FPUCS and FOP. */
    2397 #define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst, a_uFpuOpcode) \
    2398     iemFpuStackUnderflow(pVCpu, a_iStDst, a_uFpuOpcode)
    2399 /** Raises a FPU stack underflow exception.  Sets FPUIP, FPUCS and FOP. Pops
    2400  *  stack. */
    2401 #define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst, a_uFpuOpcode) \
    2402     iemFpuStackUnderflowThenPop(pVCpu, a_iStDst, a_uFpuOpcode)
    2403 /** Raises a FPU stack underflow exception.  Sets FPUIP, FPUCS, FOP, FPUDP and
    2404  *  FPUDS. */
    2405 #define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2406     iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2407 /** Raises a FPU stack underflow exception.  Sets FPUIP, FPUCS, FOP, FPUDP and
    2408  *  FPUDS. Pops stack. */
    2409 #define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2410     iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2411 /** Raises a FPU stack underflow exception.  Sets FPUIP, FPUCS and FOP. Pops
    2412  *  stack twice. */
    2413 #define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP(a_uFpuOpcode) \
    2414     iemFpuStackUnderflowThenPopPop(pVCpu, a_uFpuOpcode)
    2415 /** Raises a FPU stack underflow exception for an instruction pushing a result
    2416  *  value onto the stack. Sets FPUIP, FPUCS and FOP. */
    2417 #define IEM_MC_FPU_STACK_PUSH_UNDERFLOW(a_uFpuOpcode) \
    2418     iemFpuStackPushUnderflow(pVCpu, a_uFpuOpcode)
    2419 /** Raises a FPU stack underflow exception for an instruction pushing a result
    2420  *  value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
    2421 #define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO(a_uFpuOpcode) \
    2422     iemFpuStackPushUnderflowTwo(pVCpu, a_uFpuOpcode)
    2423 
    2424 /** Raises a FPU stack overflow exception as part of a push attempt.  Sets
    2425  *  FPUIP, FPUCS and FOP. */
    2426 #define IEM_MC_FPU_STACK_PUSH_OVERFLOW(a_uFpuOpcode) \
    2427     iemFpuStackPushOverflow(pVCpu, a_uFpuOpcode)
    2428 /** Raises a FPU stack overflow exception as part of a push attempt.  Sets
    2429  *  FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
    2430 #define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff, a_uFpuOpcode) \
    2431     iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff, a_uFpuOpcode)
    2432 /** Prepares for using the FPU state.
    2433  * Ensures that we can use the host FPU in the current context (RC+R0.
    2434  * Ensures the guest FPU state in the CPUMCTX is up to date. */
    2435 #define IEM_MC_PREPARE_FPU_USAGE()              iemFpuPrepareUsage(pVCpu)
    2436 /** Actualizes the guest FPU state so it can be accessed read-only fashion. */
    2437 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ()   iemFpuActualizeStateForRead(pVCpu)
    2438 /** Actualizes the guest FPU state so it can be accessed and modified. */
    2439 #define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
    2440 
    2441 /** Prepares for using the SSE state.
    2442  * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
    2443  * Ensures the guest SSE state in the CPUMCTX is up to date. */
    2444 #define IEM_MC_PREPARE_SSE_USAGE()              iemFpuPrepareUsageSse(pVCpu)
    2445 /** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
    2446 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ()   iemFpuActualizeSseStateForRead(pVCpu)
    2447 /** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
    2448 #define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
    2449 
    2450 /** Prepares for using the AVX state.
    2451  * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
    2452  * Ensures the guest AVX state in the CPUMCTX is up to date.
    2453  * @note This will include the AVX512 state too when support for it is added
    2454  *       due to the zero extending feature of VEX instruction. */
    2455 #define IEM_MC_PREPARE_AVX_USAGE()              iemFpuPrepareUsageAvx(pVCpu)
    2456 /** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
    2457 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ()   iemFpuActualizeAvxStateForRead(pVCpu)
    2458 /** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
    2459 #define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
    24601291
    24611292/**
     
    26061437    } while (0)
    26071438
    2608 /** @note Not for IOPL or IF testing. */
     1439/*
     1440 * x86: EFL == RFLAGS/EFLAGS for x86.
     1441 * arm: EFL == NZCV.
     1442 */
     1443/** @todo s/IEM_MC_IF_EFL_/IEM_MC_IF_FLAGS_/ */
     1444
     1445/** @note x86: Not for IOPL or IF testing. */
    26091446#define IEM_MC_IF_EFL_BIT_SET(a_fBit)                   if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
    2610 /** @note Not for IOPL or IF testing. */
     1447/** @note x86: Not for IOPL or IF testing. */
    26111448#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit)               if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
    2612 /** @note Not for IOPL or IF testing. */
     1449/** @note x86: Not for IOPL or IF testing. */
    26131450#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits)             if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
    2614 /** @note Not for IOPL or IF testing. */
     1451/** @note x86: Not for IOPL or IF testing. */
    26151452#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits)              if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
    2616 /** @note Not for IOPL or IF testing. */
     1453/** @note x86: Not for IOPL or IF testing. */
    26171454#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2)         \
    26181455    if (   !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    26191456        != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    2620 /** @note Not for IOPL or IF testing. */
     1457/** @note x86: Not for IOPL or IF testing. */
    26211458#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2)         \
    26221459    if (   !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    26231460        == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    2624 /** @note Not for IOPL or IF testing. */
     1461/** @note x86: Not for IOPL or IF testing. */
    26251462#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
    26261463    if (   (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
    26271464        ||    !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    26281465           != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    2629 /** @note Not for IOPL or IF testing. */
     1466/** @note x86: Not for IOPL or IF testing. */
    26301467#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
    26311468    if (   !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
    26321469        &&    !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
    26331470           == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
    2634 #define IEM_MC_IF_CX_IS_NZ()                            if (pVCpu->cpum.GstCtx.cx  != 0) {
    2635 #define IEM_MC_IF_ECX_IS_NZ()                           if (pVCpu->cpum.GstCtx.ecx != 0) {
    2636 #define IEM_MC_IF_RCX_IS_NZ()                           if (pVCpu->cpum.GstCtx.rcx != 0) {
    2637 #define IEM_MC_IF_CX_IS_NOT_ONE()                       if (pVCpu->cpum.GstCtx.cx  != 1) {
    2638 #define IEM_MC_IF_ECX_IS_NOT_ONE()                      if (pVCpu->cpum.GstCtx.ecx != 1) {
    2639 #define IEM_MC_IF_RCX_IS_NOT_ONE()                      if (pVCpu->cpum.GstCtx.rcx != 1) {
    2640 /** @note Not for IOPL or IF testing. */
    2641 #define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
    2642         if (   pVCpu->cpum.GstCtx.cx != 1 \
    2643             && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
    2644 /** @note Not for IOPL or IF testing. */
    2645 #define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
    2646         if (   pVCpu->cpum.GstCtx.ecx != 1 \
    2647             && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
    2648 /** @note Not for IOPL or IF testing. */
    2649 #define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_SET(a_fBit) \
    2650         if (   pVCpu->cpum.GstCtx.rcx != 1 \
    2651             && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
    2652 /** @note Not for IOPL or IF testing. */
    2653 #define IEM_MC_IF_CX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
    2654         if (   pVCpu->cpum.GstCtx.cx != 1 \
    2655             && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
    2656 /** @note Not for IOPL or IF testing. */
    2657 #define IEM_MC_IF_ECX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
    2658         if (   pVCpu->cpum.GstCtx.ecx != 1 \
    2659             && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
    2660 /** @note Not for IOPL or IF testing. */
    2661 #define IEM_MC_IF_RCX_IS_NOT_ONE_AND_EFL_BIT_NOT_SET(a_fBit) \
    2662         if (   pVCpu->cpum.GstCtx.rcx != 1 \
    2663             && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
     1471
    26641472#define IEM_MC_IF_LOCAL_IS_Z(a_Local)                   if ((a_Local) == 0) {
    26651473#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo)       if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
    2666 
    2667 #define IEM_MC_REF_FPUREG(a_pr80Dst, a_iSt) \
    2668     do { (a_pr80Dst) = &pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80; } while (0)
    2669 #define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
    2670     if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
    2671 #define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
    2672     if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
    2673 #define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
    2674     if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
    2675 #define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
    2676     if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
    2677 #define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
    2678     if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
    2679 #define IEM_MC_IF_FCW_IM() \
    2680     if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
    26811474
    26821475#define IEM_MC_ELSE()                                   } else {
     
    27151508#define IEM_MC_LIVENESS_MXCSR_MODIFY()                  ((void)0)
    27161509
    2717 /** @todo add more as needed. */
    27181510
    27191511/** @}  */
    27201512
     1513/*
     1514 * Include the target specific header.
     1515 */
     1516#ifdef VBOX_VMM_TARGET_X86
     1517# include "VMMAll/target-x86/IEMMc-x86.h"
     1518#elif defined(VBOX_VMM_TARGET_ARMV8)
     1519//# include "VMMAll/target-armv8/IEMMc-armv8.h"
     1520#else
     1521# error "port me"
     1522#endif
     1523
    27211524#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
    27221525
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette