VirtualBox

Changeset 108195 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Feb 13, 2025 2:57:25 PM (3 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167513
Message:

VMM/IEM: Splitting out most of the x86 target specific stuff from IEMInternal.h and into VMMAll/target-x86/IEMInternal-x86.h. jiraref:VBP-1431

Location:
trunk/src/VBox/VMM
Files:
20 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r108142 r108195  
    9696VBoxVMM_INCS     = \
    9797        include \
    98         $(VBoxVMM_0_OUTDIR)/CommonGenIncs
     98        $(VBoxVMM_0_OUTDIR)/CommonGenIncs \
     99        .
    99100VBoxVMM_ASINCS   = .
    100101VBoxVMM_ASFLAGS.amd64  = -Werror
     
    412413 VBoxVMMArm_INCS     = \
    413414        include \
    414         $(VBoxVMM_0_OUTDIR)/CommonGenIncs
     415        $(VBoxVMM_0_OUTDIR)/CommonGenIncs \
     416        .
    415417 VBoxVMMArm_SOURCES  = \
    416418        VBoxVMM.d \
     
    873875 VMMR0_INCS      = \
    874876        include \
    875         $(VBoxVMM_0_OUTDIR)/CommonGenIncs
     877        $(VBoxVMM_0_OUTDIR)/CommonGenIncs \
     878        .
    876879
    877880 VMMR0_SOURCES   = \
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108186 r108195  
    122122#define LOG_GROUP   LOG_GROUP_IEM
    123123#define VMCPU_INCL_CPUM_GST_CTX
     124#ifdef IN_RING0
     125# define VBOX_VMM_TARGET_X86
     126#endif
    124127#include <VBox/vmm/iem.h>
    125128#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp

    r107294 r108195  
    3030*   Header Files                                                                                                                 *
    3131*********************************************************************************************************************************/
     32#ifdef IN_RING0
     33# define VBOX_VMM_TARGET_X86
     34#endif
    3235#include "IEMInternal.h"
    3336#include <VBox/vmm/vmcc.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

    r107113 r108195  
    3333#define VMCPU_INCL_CPUM_GST_CTX
    3434#define IEM_WITH_OPAQUE_DECODER_STATE
     35#ifdef IN_RING0
     36# define VBOX_VMM_TARGET_X86
     37#endif
    3538#include <VBox/vmm/iem.h>
    3639#include <VBox/vmm/cpum.h>
     
    9396# define IEM_FLUSH_PREFETCH_HEAVY(a_pVCpu, a_cbInstr) do { } while (0)
    9497#endif
     98
     99
     100/*********************************************************************************************************************************
     101*   Structures and Typedefs                                                                                                      *
     102*********************************************************************************************************************************/
     103/**
     104 * Branch types - iemCImpl_BranchTaskSegment(), iemCImpl_BranchTaskGate(),
     105 * iemCImpl_BranchCallGate() and iemCImpl_BranchSysSel().
     106 */
     107typedef enum IEMBRANCH
     108{
     109    IEMBRANCH_JUMP = 1,
     110    IEMBRANCH_CALL,
     111    IEMBRANCH_TRAP,
     112    IEMBRANCH_SOFTWARE_INT,
     113    IEMBRANCH_HARDWARE_INT
     114} IEMBRANCH;
     115AssertCompileSize(IEMBRANCH, 4);
     116
    95117
    96118
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp

    r107113 r108195  
    3232#define LOG_GROUP   LOG_GROUP_IEM_SVM
    3333#define VMCPU_INCL_CPUM_GST_CTX
     34#ifdef IN_RING0
     35# define VBOX_VMM_TARGET_X86
     36#endif
    3437#include <VBox/vmm/iem.h>
    3538#include <VBox/vmm/pdmapic.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp

    r107113 r108195  
    3232#define LOG_GROUP   LOG_GROUP_IEM_VMX
    3333#define VMCPU_INCL_CPUM_GST_CTX
     34#ifdef IN_RING0
     35# define VBOX_VMM_TARGET_X86
     36#endif
    3437#include <VBox/vmm/iem.h>
    3538#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllDbg.cpp

    r106061 r108195  
    3232#define LOG_GROUP LOG_GROUP_IEM
    3333#define VMCPU_INCL_CPUM_GST_CTX
     34#ifdef IN_RING0
     35# define VBOX_VMM_TARGET_X86
     36#endif
    3437#include <VBox/vmm/iem.h>
    3538#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllIntprTables.h

    r107113 r108195  
    4040#endif
    4141#define VMCPU_INCL_CPUM_GST_CTX
     42#ifdef IN_RING0
     43# define VBOX_VMM_TARGET_X86
     44#endif
    4245#include <VBox/vmm/iem.h>
    4346#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veExecMem.cpp

    r107211 r108195  
    3333#define IEM_WITH_OPAQUE_DECODER_STATE
    3434#define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
     35#ifdef IN_RING0
     36# error "port me!"
     37#endif
    3538#include <VBox/vmm/iem.h>
    3639#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.h

    r107218 r108195  
    3232#define LOG_GROUP LOG_GROUP_IEM
    3333#define IEM_WITH_OPAQUE_DECODER_STATE
     34#ifdef IN_RING0
     35# define VBOX_VMM_TARGET_X86
     36#endif
    3437#include <VBox/vmm/iem.h>
    3538#include "IEMInternal.h"
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompBltIn.cpp

    r107202 r108195  
    3434#define VMCPU_INCL_CPUM_GST_CTX
    3535#define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
     36#ifdef IN_RING0
     37# define VBOX_VMM_TARGET_X86
     38#endif
    3639#include <VBox/vmm/iem.h>
    3740#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h

    r107200 r108195  
    3535#define VMM_INCLUDED_SRC_include_IEMMc_h /* block IEMMc.h inclusion. */
    3636#define IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES
     37#ifdef IN_RING0
     38# define VBOX_VMM_TARGET_X86
     39#endif
    3740#include <VBox/vmm/iem.h>
    3841#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp

    r107113 r108195  
    3535#define VMCPU_INCL_CPUM_GST_CTX
    3636#define IEM_WITH_OPAQUE_DECODER_STATE
     37#ifdef IN_RING0
     38# define VBOX_VMM_TARGET_X86
     39#endif
    3740#include <VBox/vmm/iem.h>
    3841#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp

    r107113 r108195  
    3535#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
    3636#define VMCPU_INCL_CPUM_GST_CTX
     37#ifdef IN_RING0
     38# define VBOX_VMM_TARGET_X86
     39#endif
    3740#include <VBox/vmm/iem.h>
    3841#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp

    r107113 r108195  
    5050#define IEM_WITH_CODE_TLB_AND_OPCODE_BUF  /* A bit hackish, but its all in IEMInline.h. */
    5151#define VMCPU_INCL_CPUM_GST_CTX
     52#ifdef IN_RING0
     53# define VBOX_VMM_TARGET_X86
     54#endif
    5255#include <VBox/vmm/iem.h>
    5356#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdTables.h

    r107113 r108195  
    4141#define IEM_WITH_CODE_TLB_AND_OPCODE_BUF  /* A bit hackish, but its all in IEMInline.h. */
    4242#define VMCPU_INCL_CPUM_GST_CTX
     43#ifdef IN_RING0
     44# define VBOX_VMM_TARGET_X86
     45#endif
    4346#include <VBox/vmm/iem.h>
    4447#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMInternal-x86.h

    r108188 r108195  
    11/* $Id$ */
    22/** @file
    3  * IEM - Internal header file.
     3 * IEM - Internal header file, x86 target specifics.
    44 */
    55
     
    2626 */
    2727
    28 #ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
    29 #define VMM_INCLUDED_SRC_include_IEMInternal_h
     28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInternal_x86_h
     29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInternal_x86_h
    3030#ifndef RT_WITHOUT_PRAGMA_ONCE
    3131# pragma once
    3232#endif
    3333
    34 #ifndef RT_IN_ASSEMBLER
    35 # include <VBox/vmm/cpum.h>
    36 # include <VBox/vmm/iem.h>
    37 # include <VBox/vmm/pgm.h>
    38 # include <VBox/vmm/stam.h>
    39 # include <VBox/param.h>
    40 
    41 # include <iprt/setjmp-without-sigmask.h>
    42 # include <iprt/list.h>
    43 #endif /* !RT_IN_ASSEMBLER */
    44 
    4534
    4635RT_C_DECLS_BEGIN
    4736
    4837
    49 /** @defgroup grp_iem_int      Internals
    50  * @ingroup grp_iem
     38/** @defgroup grp_iem_int_x86   X86 Target Internals
     39 * @ingroup grp_iem_int
    5140 * @internal
    5241 * @{
    5342 */
    5443
    55 /* Make doxygen happy w/o overcomplicating the #if checks. */
    56 #ifdef DOXYGEN_RUNNING
    57 # define IEM_WITH_THROW_CATCH
    58 # define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    59 #endif
    60 
    61 /** For expanding symbol in slickedit and other products tagging and
    62  *  crossreferencing IEM symbols. */
    63 #ifndef IEM_STATIC
    64 # define IEM_STATIC static
    65 #endif
    66 
    67 /** @def IEM_WITH_SETJMP
    68  * Enables alternative status code handling using setjmps.
    69  *
    70  * This adds a bit of expense via the setjmp() call since it saves all the
    71  * non-volatile registers.  However, it eliminates return code checks and allows
    72  * for more optimal return value passing (return regs instead of stack buffer).
    73  */
    74 #if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
    75 # define IEM_WITH_SETJMP
    76 #endif
    77 
    78 /** @def IEM_WITH_THROW_CATCH
    79  * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
    80  * mode code when IEM_WITH_SETJMP is in effect.
    81  *
    82  * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
    83  * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
    84  * result value improving by more than 1%. (Best out of three.)
    85  *
    86  * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
    87  * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
    88  * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
    89  * Linux, but it should be quite a bit faster for normal code.
    90  */
    91 #if defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER)) /* ASM-NOINC-START */
    92 # define IEM_WITH_THROW_CATCH
    93 #endif /*ASM-NOINC-END*/
    94 
    95 /** @def IEM_WITH_ADAPTIVE_TIMER_POLLING
    96  * Enables the adaptive timer polling code.
    97  */
    98 #if defined(DOXYGEN_RUNNING) || 1
    99 # define IEM_WITH_ADAPTIVE_TIMER_POLLING
    100 #endif
    101 
    102 /** @def IEM_WITH_INTRA_TB_JUMPS
    103  * Enables loop-jumps within a TB (currently only to the first call).
    104  */
    105 #if defined(DOXYGEN_RUNNING) || 1
    106 # define IEM_WITH_INTRA_TB_JUMPS
    107 #endif
    108 
    109 /** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING
    110  * Enables the delayed PC updating optimization (see @bugref{10373}).
    111  */
    112 #if defined(DOXYGEN_RUNNING) || 1
    113 # define IEMNATIVE_WITH_DELAYED_PC_UPDATING
    114 #endif
    115 /** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
    116  * Enabled delayed PC updating debugging code.
    117  * This is an alternative to the ARM64-only IEMNATIVE_REG_FIXED_PC_DBG. */
    118 #if defined(DOXYGEN_RUNNING) || 0
    119 # define IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
    120 #endif
    121 
    122 /** Enables access to even callee saved registers. */
    123 /*# define IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS*/
    124 
    125 #if defined(DOXYGEN_RUNNING) || 1
    126 /** @def IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    127  * Delay the writeback or dirty registers as long as possible. */
    128 # define IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
    129 #endif
    130 
    131 /** @def IEM_WITH_TLB_STATISTICS
    132  * Enables all TLB statistics. */
    133 #if defined(VBOX_WITH_STATISTICS) || defined(DOXYGEN_RUNNING)
    134 # define IEM_WITH_TLB_STATISTICS
    135 #endif
    136 
    137 /** @def IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
    138  * Enable this to use native emitters for certain SIMD FP operations. */
    139 #if 1 || defined(DOXYGEN_RUNNING)
    140 # define IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
    141 #endif
    142 
    143 /** @def VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING
    144  * Enable this to create a saved state file with the threaded translation
    145  * blocks fed to the native recompiler on VCPU \#0.  The resulting file can
    146  * then be fed into the native recompiler for code profiling purposes.
    147  * This is not a feature that should be normally be enabled! */
    148 #if 0 || defined(DOXYGEN_RUNNING)
    149 # define VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING
    150 #endif
    151 
    152 /** @def VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    153  * Enables a quicker alternative to throw/longjmp for IEM_DO_LONGJMP when
    154  * executing native translation blocks.
    155  *
    156  * This exploits the fact that we save all non-volatile registers in the TB
    157  * prologue and thus just need to do the same as the TB epilogue to get the
    158  * effect of a longjmp/throw.  Since MSC marks XMM6 thru XMM15 as
    159  * non-volatile (and does something even more crazy for ARM), this probably
    160  * won't work reliably on Windows. */
    161 #ifdef RT_ARCH_ARM64
    162 # ifndef RT_OS_WINDOWS
    163 #  define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    164 # endif
    165 #endif
    166 /* ASM-NOINC-START */
    167 #ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    168 # if !defined(IN_RING3) \
    169   || !defined(VBOX_WITH_IEM_RECOMPILER) \
    170   || !defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
    171 #  undef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    172 # elif defined(RT_OS_WINDOWS)
    173 #  pragma message("VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is not safe to use on windows")
    174 # endif
    175 #endif
    176 
    177 
    178 /** @def IEM_DO_LONGJMP
    179  *
    180  * Wrapper around longjmp / throw.
    181  *
    182  * @param   a_pVCpu     The CPU handle.
    183  * @param   a_rc        The status code jump back with / throw.
    184  */
    185 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
    186 # ifdef IEM_WITH_THROW_CATCH
    187 #  ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    188 #   define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \
    189             if ((a_pVCpu)->iem.s.pvTbFramePointerR3) \
    190                 iemNativeTbLongJmp((a_pVCpu)->iem.s.pvTbFramePointerR3, (a_rc)); \
    191             throw int(a_rc); \
    192         } while (0)
    193 #  else
    194 #   define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
    195 #  endif
    196 # else
    197 #  define IEM_DO_LONGJMP(a_pVCpu, a_rc)  longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
    198 # endif
    199 #endif
    200 
    201 /** For use with IEM function that may do a longjmp (when enabled).
    202  *
    203  * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
    204  * attribute.  So, we indicate that function that may be part of a longjmp may
    205  * throw "exceptions" and that the compiler should definitely not generate and
    206  * std::terminate calling unwind code.
    207  *
    208  * Here is one example of this ending in std::terminate:
    209  * @code{.txt}
    210 00 00000041`cadfda10 00007ffc`5d5a1f9f     ucrtbase!abort+0x4e
    211 01 00000041`cadfda40 00007ffc`57af229a     ucrtbase!terminate+0x1f
    212 02 00000041`cadfda70 00007ffb`eec91030     VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
    213 03 00000041`cadfdaa0 00007ffb`eec92c6d     VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
    214 04 00000041`cadfdad0 00007ffb`eec93ae5     VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
    215 05 00000041`cadfdc00 00007ffb`eec92258     VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
    216 06 00000041`cadfdc30 00007ffb`eec940e9     VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
    217 07 00000041`cadfdcd0 00007ffc`5f9f249f     VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
    218 08 00000041`cadfdd40 00007ffc`5f980939     ntdll!RtlpExecuteHandlerForUnwind+0xf
    219 09 00000041`cadfdd70 00007ffc`5f9a0edd     ntdll!RtlUnwindEx+0x339
    220 0a 00000041`cadfe490 00007ffc`57aff976     ntdll!RtlUnwind+0xcd
    221 0b 00000041`cadfea00 00007ffb`e1b5de01     VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
    222 0c (Inline Function) --------`--------     VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
    223 0d 00000041`cadfea50 00007ffb`e1b60f6b     VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
    224 0e 00000041`cadfea90 00007ffb`e1cc6201     VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
    225 0f 00000041`cadfec70 00007ffb`e1d0df8d     VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
    226 10 00000041`cadfed60 00007ffb`e1d0d4c0     VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829]    @encode
    227    @endcode
    228  *
    229  * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
    230  */
    231 #if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
    232 # define IEM_NOEXCEPT_MAY_LONGJMP   RT_NOEXCEPT_EX(false)
    233 #else
    234 # define IEM_NOEXCEPT_MAY_LONGJMP   RT_NOEXCEPT
    235 #endif
    236 /* ASM-NOINC-END */
    237 
    238 
    239 //#define IEM_WITH_CODE_TLB // - work in progress
    240 //#define IEM_WITH_DATA_TLB // - work in progress
    241 
    242 
    243 /** @def IEM_USE_UNALIGNED_DATA_ACCESS
    244  * Use unaligned accesses instead of elaborate byte assembly. */
    245 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)  /*ASM-NOINC*/
    246 # define IEM_USE_UNALIGNED_DATA_ACCESS
    247 #endif                                                                          /*ASM-NOINC*/
    248 
    249 //#define IEM_LOG_MEMORY_WRITES
    250 
    251 
    252 /** @def IEM_CFG_TARGET_CPU
    253  * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
    254  *
    255  * By default we allow this to be configured by the user via the
    256  * CPUM/GuestCpuName config string, but this comes at a slight cost during
    257  * decoding.  So, for applications of this code where there is no need to
    258  * be dynamic wrt target CPU, just modify this define.
    259  */
    260 #if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
    261 # define IEM_CFG_TARGET_CPU     IEMTARGETCPU_DYNAMIC
    262 #endif
    263 
    264 
    265 /*
    266  * X86 config.
    267  */
    268 
    269 #define IEM_IMPLEMENTS_TASKSWITCH
    270 
    271 /** @def IEM_WITH_3DNOW
    272  * Includes the 3DNow decoding.  */
    273 #if !defined(IEM_WITH_3DNOW) || defined(DOXYGEN_RUNNING)   /* For doxygen, set in Config.kmk. */
    274 # ifndef IEM_WITHOUT_3DNOW
    275 #  define IEM_WITH_3DNOW
    276 # endif
    277 #endif
    278 
    279 /** @def IEM_WITH_THREE_0F_38
    280  * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
    281 #if !defined(IEM_WITH_THREE_0F_38) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
    282 # ifndef IEM_WITHOUT_THREE_0F_38
    283 #  define IEM_WITH_THREE_0F_38
    284 # endif
    285 #endif
    286 
    287 /** @def IEM_WITH_THREE_0F_3A
    288  * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
    289 #if !defined(IEM_WITH_THREE_0F_3A) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
    290 # ifndef IEM_WITHOUT_THREE_0F_3A
    291 #  define IEM_WITH_THREE_0F_3A
    292 # endif
    293 #endif
    294 
    295 /** @def IEM_WITH_VEX
    296  * Includes the VEX decoding. */
    297 #if !defined(IEM_WITH_VEX) || defined(DOXYGEN_RUNNING)       /* For doxygen, set in Config.kmk. */
    298 # ifndef IEM_WITHOUT_VEX
    299 #  define IEM_WITH_VEX
    300 # endif
    301 #endif
    302 
    303 
    304 #ifndef RT_IN_ASSEMBLER /* ASM-NOINC-START - the rest of the file */
    305 
    306 # if !defined(IEM_WITHOUT_INSTRUCTION_STATS) && !defined(DOXYGEN_RUNNING)
    307 /** Instruction statistics.   */
    308 typedef struct IEMINSTRSTATS
    309 {
    310 # define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
    311 # include "IEMInstructionStatisticsTmpl.h"
    312 # undef IEM_DO_INSTR_STAT
    313 } IEMINSTRSTATS;
    314 #else
    315 struct IEMINSTRSTATS;
    316 typedef struct IEMINSTRSTATS IEMINSTRSTATS;
    317 #endif
    318 /** Pointer to IEM instruction statistics. */
    319 typedef IEMINSTRSTATS *PIEMINSTRSTATS;
    320 
    321 
    322 /** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
    323  * @{ */
    324 #define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE      0     /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
    325 #define IEMTARGETCPU_EFL_BEHAVIOR_INTEL       1     /**< Intel EFLAGS result. */
    326 #define IEMTARGETCPU_EFL_BEHAVIOR_AMD         2     /**< AMD EFLAGS result */
    327 #define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED    3     /**< Reserved/dummy entry slot that's the same as 0. */
    328 #define IEMTARGETCPU_EFL_BEHAVIOR_MASK        3     /**< For masking the index before use. */
    329 /** Selects the right variant from a_aArray.
    330  * pVCpu is implicit in the caller context. */
    331 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
    332     (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
    333 /** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
    334  * be used because the host CPU does not support the operation. */
    335 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
    336     (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
    337 /** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
    338  *  array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
    339  *  into the two.
    340  * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
    341 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
    342 # define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
    343     (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
    344 #else
    345 # define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
    346     (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
    347 #endif
    348 /** @} */
    349 
    350 /**
    351  * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
    352  * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
    353  *
    354  * On non-x86 hosts, this will shortcut to the fallback w/o checking the
    355  * indicator.
    356  *
    357  * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
    358  */
    359 #if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    360 # define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
    361     (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
    362 #else
    363 # define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
    364 #endif
    365 
    366 /** @name Helpers for passing C++ template arguments to an
    367  *        IEM_MC_NATIVE_EMIT_3/4/5 style macro.
    368  * @{
    369  */
    370 #define IEM_TEMPL_ARG_1(a1)             <a1>
    371 #define IEM_TEMPL_ARG_2(a1, a2)         <a1,a2>
    372 #define IEM_TEMPL_ARG_3(a1, a2, a3)     <a1,a2,a3>
    373 /** @} */
    374 
    375 
    376 /**
    377  * Branch types - iemCImpl_BranchTaskSegment(), iemCImpl_BranchTaskGate(),
    378  * iemCImpl_BranchCallGate() and iemCImpl_BranchSysSel().
    379  * @note x86 specific
    380  */
    381 typedef enum IEMBRANCH
    382 {
    383     IEMBRANCH_JUMP = 1,
    384     IEMBRANCH_CALL,
    385     IEMBRANCH_TRAP,
    386     IEMBRANCH_SOFTWARE_INT,
    387     IEMBRANCH_HARDWARE_INT
    388 } IEMBRANCH;
    389 AssertCompileSize(IEMBRANCH, 4);
    390 
    391 
    392 /**
    393  * INT instruction types - iemCImpl_int().
    394  * @note x86 specific
    395  */
    396 typedef enum IEMINT
    397 {
    398     /** INT n instruction (opcode 0xcd imm). */
    399     IEMINT_INTN  = 0,
    400     /** Single byte INT3 instruction (opcode 0xcc). */
    401     IEMINT_INT3  = IEM_XCPT_FLAGS_BP_INSTR,
    402     /** Single byte INTO instruction (opcode 0xce). */
    403     IEMINT_INTO  = IEM_XCPT_FLAGS_OF_INSTR,
    404     /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
    405     IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
    406 } IEMINT;
    407 AssertCompileSize(IEMINT, 4);
    408 
    409 
    410 /**
    411  * A FPU result.
    412  * @note x86 specific
    413  */
    414 typedef struct IEMFPURESULT
    415 {
    416     /** The output value. */
    417     RTFLOAT80U      r80Result;
    418     /** The output status. */
    419     uint16_t        FSW;
    420 } IEMFPURESULT;
    421 AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
    422 /** Pointer to a FPU result. */
    423 typedef IEMFPURESULT *PIEMFPURESULT;
    424 /** Pointer to a const FPU result. */
    425 typedef IEMFPURESULT const *PCIEMFPURESULT;
    426 
    427 
    428 /**
    429  * A FPU result consisting of two output values and FSW.
    430  * @note x86 specific
    431  */
    432 typedef struct IEMFPURESULTTWO
    433 {
    434     /** The first output value. */
    435     RTFLOAT80U      r80Result1;
    436     /** The output status. */
    437     uint16_t        FSW;
    438     /** The second output value. */
    439     RTFLOAT80U      r80Result2;
    440 } IEMFPURESULTTWO;
    441 AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
    442 AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
    443 /** Pointer to a FPU result consisting of two output values and FSW. */
    444 typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
    445 /** Pointer to a const FPU result consisting of two output values and FSW. */
    446 typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
    447 
    448 
    449 /**
    450  * IEM TLB entry.
    451  *
    452  * Lookup assembly:
    453  * @code{.asm}
    454         ; Calculate tag.
    455         mov     rax, [VA]
    456         shl     rax, 16
    457         shr     rax, 16 + X86_PAGE_SHIFT
    458         or      rax, [uTlbRevision]
    459 
    460         ; Do indexing.
    461         movzx   ecx, al
    462         lea     rcx, [pTlbEntries + rcx]
    463 
    464         ; Check tag.
    465         cmp     [rcx + IEMTLBENTRY.uTag], rax
    466         jne     .TlbMiss
    467 
    468         ; Check access.
    469         mov     rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
    470         and     rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
    471         cmp     rax, [uTlbPhysRev]
    472         jne     .TlbMiss
    473 
    474         ; Calc address and we're done.
    475         mov     eax, X86_PAGE_OFFSET_MASK
    476         and     eax, [VA]
    477         or      rax, [rcx + IEMTLBENTRY.pMappingR3]
    478     %ifdef VBOX_WITH_STATISTICS
    479         inc     qword [cTlbHits]
    480     %endif
    481         jmp     .Done
    482 
    483     .TlbMiss:
    484         mov     r8d, ACCESS_FLAGS
    485         mov     rdx, [VA]
    486         mov     rcx, [pVCpu]
    487         call    iemTlbTypeMiss
    488     .Done:
    489 
    490    @endcode
    491  *
    492  */
    493 typedef struct IEMTLBENTRY
    494 {
    495     /** The TLB entry tag.
    496      * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
    497      * is ASSUMING a virtual address width of 48 bits.
    498      *
    499      * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
    500      *
    501      * The TLB lookup code uses the current TLB revision, which won't ever be zero,
    502      * enabling an extremely cheap TLB invalidation most of the time.  When the TLB
    503      * revision wraps around though, the tags needs to be zeroed.
    504      *
    505      * @note    Try use SHRD instruction?  After seeing
    506      *          https://gmplib.org/~tege/x86-timing.pdf, maybe not.
    507      *
    508      * @todo    This will need to be reorganized for 57-bit wide virtual address and
    509      *          PCID (currently 12 bits) and ASID (currently 6 bits) support.  We'll
    510      *          have to move the TLB entry versioning entirely to the
    511      *          fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
    512      *          19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
    513      *          consumed by PCID and ASID (12 + 6 = 18).
    514      */
    515     uint64_t                uTag;
    516     /** Access flags and physical TLB revision.
    517      *
    518      * - Bit  0 - page tables   - not executable (X86_PTE_PAE_NX).
    519      * - Bit  1 - page tables   - not writable (complemented X86_PTE_RW).
    520      * - Bit  2 - page tables   - not user (complemented X86_PTE_US).
    521      * - Bit  3 - pgm phys/virt - not directly writable.
    522      * - Bit  4 - pgm phys page - not directly readable.
    523      * - Bit  5 - page tables   - not accessed (complemented X86_PTE_A).
    524      * - Bit  6 - page tables   - not dirty (complemented X86_PTE_D).
    525      * - Bit  7 - tlb entry     - pMappingR3 member not valid.
    526      * - Bits 63 thru 8 are used for the physical TLB revision number.
    527      *
    528      * We're using complemented bit meanings here because it makes it easy to check
    529      * whether special action is required.  For instance a user mode write access
    530      * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
    531      * non-zero result would mean special handling needed because either it wasn't
    532      * writable, or it wasn't user, or the page wasn't dirty.  A user mode read
    533      * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
    534      * need to check any PTE flag.
    535      */
    536     uint64_t                fFlagsAndPhysRev;
    537     /** The guest physical page address. */
    538     uint64_t                GCPhys;
    539     /** Pointer to the ring-3 mapping. */
    540     R3PTRTYPE(uint8_t *)    pbMappingR3;
    541 #if HC_ARCH_BITS == 32
    542     uint32_t                u32Padding1;
    543 #endif
    544 } IEMTLBENTRY;
    545 AssertCompileSize(IEMTLBENTRY, 32);
    546 /** Pointer to an IEM TLB entry. */
    547 typedef IEMTLBENTRY *PIEMTLBENTRY;
    548 /** Pointer to a const IEM TLB entry. */
    549 typedef IEMTLBENTRY const *PCIEMTLBENTRY;
    550 
    551 /** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
    552  * @{  */
    553 #define IEMTLBE_F_PT_NO_EXEC        RT_BIT_64(0)  /**< Page tables: Not executable. */
    554 #define IEMTLBE_F_PT_NO_WRITE       RT_BIT_64(1)  /**< Page tables: Not writable. */
    555 #define IEMTLBE_F_PT_NO_USER        RT_BIT_64(2)  /**< Page tables: Not user accessible (supervisor only). */
    556 #define IEMTLBE_F_PG_NO_WRITE       RT_BIT_64(3)  /**< Phys page:   Not writable (access handler, ROM, whatever). */
    557 #define IEMTLBE_F_PG_NO_READ        RT_BIT_64(4)  /**< Phys page:   Not readable (MMIO / access handler, ROM) */
    558 #define IEMTLBE_F_PT_NO_ACCESSED    RT_BIT_64(5)  /**< Phys tables: Not accessed (need to be marked accessed). */
    559 #define IEMTLBE_F_PT_NO_DIRTY       RT_BIT_64(6)  /**< Page tables: Not dirty (needs to be made dirty on write). */
    560 #define IEMTLBE_F_PT_LARGE_PAGE     RT_BIT_64(7)  /**< Page tables: Large 2 or 4 MiB page (for flushing). */
    561 #define IEMTLBE_F_NO_MAPPINGR3      RT_BIT_64(8)  /**< TLB entry:   The IEMTLBENTRY::pMappingR3 member is invalid. */
    562 #define IEMTLBE_F_PG_UNASSIGNED     RT_BIT_64(9)  /**< Phys page:   Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
    563 #define IEMTLBE_F_PG_CODE_PAGE      RT_BIT_64(10) /**< Phys page:   Code page. */
    564 #define IEMTLBE_F_PHYS_REV          UINT64_C(0xfffffffffffff800) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
    565 /** @} */
    566 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE     == IEMTLBE_F_PG_NO_WRITE);
    567 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ      == IEMTLBE_F_PG_NO_READ);
    568 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
    569 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED   == IEMTLBE_F_PG_UNASSIGNED);
    570 AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE    == IEMTLBE_F_PG_CODE_PAGE);
    571 AssertCompile(PGM_WALKINFO_BIG_PAGE           == IEMTLBE_F_PT_LARGE_PAGE);
    572 /** The bits set by PGMPhysIemGCPhys2PtrNoLock. */
    573 #define IEMTLBE_GCPHYS2PTR_MASK     (  PGMIEMGCPHYS2PTR_F_NO_WRITE \
    574                                      | PGMIEMGCPHYS2PTR_F_NO_READ \
    575                                      | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 \
    576                                      | PGMIEMGCPHYS2PTR_F_UNASSIGNED \
    577                                      | PGMIEMGCPHYS2PTR_F_CODE_PAGE \
    578                                      | IEMTLBE_F_PHYS_REV )
    579 
    580 
    581 /** The TLB size (power of two).
    582  * We initially chose 256 because that way we can obtain the result directly
    583  * from a 8-bit register without an additional AND instruction.
    584  * See also @bugref{10687}. */
    585 #if defined(RT_ARCH_AMD64)
    586 # define IEMTLB_ENTRY_COUNT                      256
    587 # define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO      8
    588 #else
    589 # define IEMTLB_ENTRY_COUNT                      8192
    590 # define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO      13
    591 #endif
    592 AssertCompile(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) == IEMTLB_ENTRY_COUNT);
    593 
    594 /** TLB slot format spec (assumes uint32_t or unsigned value). */
    595 #if IEMTLB_ENTRY_COUNT <= 0x100 / 2
    596 # define IEMTLB_SLOT_FMT    "%02x"
    597 #elif IEMTLB_ENTRY_COUNT <= 0x1000 / 2
    598 # define IEMTLB_SLOT_FMT    "%03x"
    599 #elif IEMTLB_ENTRY_COUNT <= 0x10000 / 2
    600 # define IEMTLB_SLOT_FMT    "%04x"
    601 #else
    602 # define IEMTLB_SLOT_FMT    "%05x"
    603 #endif
    604 
    605 /** Enable the large page bitmap TLB optimization.
    606  *
    607  * The idea here is to avoid scanning the full 32 KB (2MB pages, 2*512 TLB
    608  * entries) or 64 KB (4MB pages, 2*1024 TLB entries) worth of TLB entries during
    609  * invlpg when large pages are used, and instead just scan 128 or 256 bytes of
    610  * the bmLargePage bitmap to determin which TLB entires that might be containing
    611  * large pages and actually require checking.
    612  *
    613  * There is a good posibility of false positives since we currently don't clear
    614  * the bitmap when flushing the TLB, but it should help reduce the workload when
    615  * the large pages aren't fully loaded into the TLB in their entirity...
    616  */
    617 #define IEMTLB_WITH_LARGE_PAGE_BITMAP
    618 
    619 /**
    620  * An IEM TLB.
    621  *
    622  * We've got two of these, one for data and one for instructions.
    623  */
    624 typedef struct IEMTLB
    625 {
    626     /** The non-global TLB revision.
    627      * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
    628      * by adding RT_BIT_64(36) to it.  When it wraps around and becomes zero, all
    629      * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
    630      * (The revision zero indicates an invalid TLB entry.)
    631      *
    632      * The initial value is choosen to cause an early wraparound. */
    633     uint64_t            uTlbRevision;
    634     /** The TLB physical address revision - shadow of PGM variable.
    635      *
    636      * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
    637      * incremented by adding RT_BIT_64(8).  When it wraps around and becomes zero,
    638      * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
    639      * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
    640      *
    641      * The initial value is choosen to cause an early wraparound.
    642      *
    643      * @note This is placed between the two TLB revisions because we
    644      *       load it in pair with one or the other on arm64. */
    645     uint64_t volatile   uTlbPhysRev;
    646     /** The global TLB revision.
    647      * Same as uTlbRevision, but only increased for global flushes. */
    648     uint64_t            uTlbRevisionGlobal;
    649 
    650     /** Large page tag range.
    651      *
    652      * This is used to avoid scanning a large page's worth of TLB entries for each
    653      * INVLPG instruction, and only to do so iff we've loaded any and when the
    654      * address is in this range.  This is kept up to date when we loading new TLB
    655      * entries.
    656      */
    657     struct LARGEPAGERANGE
    658     {
    659         /** The lowest large page address tag, UINT64_MAX if none. */
    660         uint64_t        uFirstTag;
    661         /** The highest large page address tag (with offset mask part set), 0 if none. */
    662         uint64_t        uLastTag;
    663     }
    664     /** Large page range for non-global pages. */
    665                         NonGlobalLargePageRange,
    666     /** Large page range for global pages. */
    667                         GlobalLargePageRange;
    668     /** Number of non-global entries for large pages loaded since last TLB flush. */
    669     uint32_t            cTlbNonGlobalLargePageCurLoads;
    670     /** Number of global entries for large pages loaded since last TLB flush. */
    671     uint32_t            cTlbGlobalLargePageCurLoads;
    672 
    673     /* Statistics: */
    674 
    675     /** TLB hits in IEMAll.cpp code (IEM_WITH_TLB_STATISTICS only; both).
    676      * @note For the data TLB this is only used in iemMemMap and and for direct (i.e.
    677      *       not via safe read/write path) calls to iemMemMapJmp. */
    678     uint64_t            cTlbCoreHits;
    679     /** Safe read/write TLB hits in iemMemMapJmp (IEM_WITH_TLB_STATISTICS
    680      *  only; data tlb only). */
    681     uint64_t            cTlbSafeHits;
    682     /** TLB hits in IEMAllMemRWTmplInline.cpp.h (data + IEM_WITH_TLB_STATISTICS only). */
    683     uint64_t            cTlbInlineCodeHits;
    684 
    685     /** TLB misses in IEMAll.cpp code (both).
    686      * @note For the data TLB this is only used in iemMemMap and for direct (i.e.
    687      *       not via safe read/write path) calls to iemMemMapJmp. So,
    688      *       for the data TLB this more like 'other misses', while for the code
    689      *       TLB is all misses. */
    690     uint64_t            cTlbCoreMisses;
    691     /** Subset of cTlbCoreMisses that results in PTE.G=1 loads (odd entries). */
    692     uint64_t            cTlbCoreGlobalLoads;
    693     /** Safe read/write TLB misses in iemMemMapJmp (so data only). */
    694     uint64_t            cTlbSafeMisses;
    695     /** Subset of cTlbSafeMisses that results in PTE.G=1 loads (odd entries). */
    696     uint64_t            cTlbSafeGlobalLoads;
    697     /** Safe read path taken (data only).  */
    698     uint64_t            cTlbSafeReadPath;
    699     /** Safe write path taken (data only).  */
    700     uint64_t            cTlbSafeWritePath;
    701 
    702     /** @name Details for native code TLB misses.
    703      * @note These counts are included in the above counters (cTlbSafeReadPath,
    704      *       cTlbSafeWritePath, cTlbInlineCodeHits).
    705      * @{ */
    706     /** TLB misses in native code due to tag mismatch.   */
    707     STAMCOUNTER         cTlbNativeMissTag;
    708     /** TLB misses in native code due to flags or physical revision mismatch. */
    709     STAMCOUNTER         cTlbNativeMissFlagsAndPhysRev;
    710     /** TLB misses in native code due to misaligned access. */
    711     STAMCOUNTER         cTlbNativeMissAlignment;
    712     /** TLB misses in native code due to cross page access. */
    713     uint32_t            cTlbNativeMissCrossPage;
    714     /** TLB misses in native code due to non-canonical address. */
    715     uint32_t            cTlbNativeMissNonCanonical;
    716     /** @} */
    717 
    718     /** Slow read path (code only).  */
    719     uint32_t            cTlbSlowCodeReadPath;
    720 
    721     /** Regular TLB flush count. */
    722     uint32_t            cTlsFlushes;
    723     /** Global TLB flush count. */
    724     uint32_t            cTlsGlobalFlushes;
    725     /** Revision rollovers. */
    726     uint32_t            cTlbRevisionRollovers;
    727     /** Physical revision flushes. */
    728     uint32_t            cTlbPhysRevFlushes;
    729     /** Physical revision rollovers. */
    730     uint32_t            cTlbPhysRevRollovers;
    731 
    732     /** Number of INVLPG (and similar) operations. */
    733     uint32_t            cTlbInvlPg;
    734     /** Subset of cTlbInvlPg that involved non-global large pages. */
    735     uint32_t            cTlbInvlPgLargeNonGlobal;
    736     /** Subset of cTlbInvlPg that involved global large pages. */
    737     uint32_t            cTlbInvlPgLargeGlobal;
    738 
    739     uint32_t            au32Padding[13];
    740 
    741     /** The TLB entries.
    742      * Even entries are for PTE.G=0 and uses uTlbRevision.
    743      * Odd  entries are for PTE.G=1 and uses uTlbRevisionGlobal. */
    744     IEMTLBENTRY         aEntries[IEMTLB_ENTRY_COUNT * 2];
    745 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
    746     /** Bitmap tracking TLB entries for large pages.
    747      * This duplicates IEMTLBE_F_PT_LARGE_PAGE for each TLB entry. */
    748     uint64_t            bmLargePage[IEMTLB_ENTRY_COUNT * 2 / 64];
    749 #endif
    750 } IEMTLB;
    751 AssertCompileSizeAlignment(IEMTLB, 64);
    752 #ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
    753 AssertCompile(IEMTLB_ENTRY_COUNT >= 32 /* bmLargePage ASSUMPTION */);
    754 #endif
    755 /** The width (in bits) of the address portion of the TLB tag.   */
    756 #define IEMTLB_TAG_ADDR_WIDTH   36
    757 /** IEMTLB::uTlbRevision increment.  */
    758 #define IEMTLB_REVISION_INCR    RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH)
    759 /** IEMTLB::uTlbRevision mask.  */
    760 #define IEMTLB_REVISION_MASK    (~(RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH) - 1))
    761 
    762 /** IEMTLB::uTlbPhysRev increment.
    763  * @sa IEMTLBE_F_PHYS_REV */
    764 #define IEMTLB_PHYS_REV_INCR    RT_BIT_64(11)
    765 AssertCompile(IEMTLBE_F_PHYS_REV == ~(IEMTLB_PHYS_REV_INCR - 1U));
    766 
    767 /**
    768  * Calculates the TLB tag for a virtual address but without TLB revision.
    769  * @returns Tag value for indexing and comparing with IEMTLB::uTag.
    770  * @param   a_GCPtr     The virtual address.  Must be RTGCPTR or same size or
    771  *                      the clearing of the top 16 bits won't work (if 32-bit
    772  *                      we'll end up with mostly zeros).
    773  */
    774 #define IEMTLB_CALC_TAG_NO_REV(a_GCPtr)     ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
    775 /**
    776  * Converts a TLB tag value into a even TLB index.
    777  * @returns Index into IEMTLB::aEntries.
    778  * @param   a_uTag      Value returned by IEMTLB_CALC_TAG.
    779  */
    780 #if IEMTLB_ENTRY_COUNT == 256
    781 # define IEMTLB_TAG_TO_EVEN_INDEX(a_uTag)   ( (uint8_t)(a_uTag) * 2U )
    782 #else
    783 # define IEMTLB_TAG_TO_EVEN_INDEX(a_uTag)   ( ((a_uTag) & (IEMTLB_ENTRY_COUNT - 1U)) * 2U )
    784 AssertCompile(RT_IS_POWER_OF_TWO(IEMTLB_ENTRY_COUNT));
    785 #endif
    786 /**
    787  * Converts a TLB tag value into an even TLB index.
    788  * @returns Pointer into IEMTLB::aEntries corresponding to .
    789  * @param   a_pTlb      The TLB.
    790  * @param   a_uTag      Value returned by IEMTLB_CALC_TAG or
    791  *                      IEMTLB_CALC_TAG_NO_REV.
    792  */
    793 #define IEMTLB_TAG_TO_EVEN_ENTRY(a_pTlb, a_uTag)    ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_EVEN_INDEX(a_uTag)] )
    794 
    795 /** Converts a GC address to an even TLB index. */
    796 #define IEMTLB_ADDR_TO_EVEN_INDEX(a_GCPtr)  IEMTLB_TAG_TO_EVEN_INDEX(IEMTLB_CALC_TAG_NO_REV(a_GCPtr))
    797 
    798 
    799 /** @def IEM_WITH_TLB_TRACE
    800  * Enables the TLB tracing.
    801  * Adjust buffer size in IEMR3Init. */
    802 #if defined(DOXYGEN_RUNNING) || 0
    803 # define IEM_WITH_TLB_TRACE
    804 #endif
    805 
    806 #ifdef IEM_WITH_TLB_TRACE
    807 
    808 /** TLB trace entry types. */
    809 typedef enum : uint8_t
    810 {
    811     kIemTlbTraceType_Invalid,
    812     kIemTlbTraceType_InvlPg,
    813     kIemTlbTraceType_EvictSlot,
    814     kIemTlbTraceType_LargeEvictSlot,
    815     kIemTlbTraceType_LargeScan,
    816     kIemTlbTraceType_Flush,
    817     kIemTlbTraceType_FlushGlobal,
    818     kIemTlbTraceType_Load,
    819     kIemTlbTraceType_LoadGlobal,
    820     kIemTlbTraceType_Load_Cr0,  /**< x86 specific */
    821     kIemTlbTraceType_Load_Cr3,  /**< x86 specific */
    822     kIemTlbTraceType_Load_Cr4,  /**< x86 specific */
    823     kIemTlbTraceType_Load_Efer, /**< x86 specific */
    824     kIemTlbTraceType_Irq,
    825     kIemTlbTraceType_Xcpt,
    826     kIemTlbTraceType_IRet,      /**< x86 specific */
    827     kIemTlbTraceType_Tb_Compile,
    828     kIemTlbTraceType_Tb_Exec_Threaded,
    829     kIemTlbTraceType_Tb_Exec_Native,
    830     kIemTlbTraceType_User0,
    831     kIemTlbTraceType_User1,
    832     kIemTlbTraceType_User2,
    833     kIemTlbTraceType_User3,
    834 } IEMTLBTRACETYPE;
    835 
    836 /** TLB trace entry. */
    837 typedef struct IEMTLBTRACEENTRY
    838 {
    839     /** The flattened RIP for the event. */
    840     uint64_t            rip;
    841     /** The event type. */
    842     IEMTLBTRACETYPE     enmType;
    843     /** Byte parameter - typically used as 'bool fDataTlb'.  */
    844     uint8_t             bParam;
    845     /** 16-bit parameter value. */
    846     uint16_t            u16Param;
    847     /** 32-bit parameter value. */
    848     uint32_t            u32Param;
    849     /** 64-bit parameter value. */
    850     uint64_t            u64Param;
    851     /** 64-bit parameter value. */
    852     uint64_t            u64Param2;
    853 } IEMTLBTRACEENTRY;
    854 AssertCompileSize(IEMTLBTRACEENTRY, 32);
    855 /** Pointer to a TLB trace entry. */
    856 typedef IEMTLBTRACEENTRY *PIEMTLBTRACEENTRY;
    857 /** Pointer to a const TLB trace entry. */
    858 typedef IEMTLBTRACEENTRY const *PCIEMTLBTRACEENTRY;
    859 #endif /* !IEM_WITH_TLB_TRACE */
    860 
    861 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
    862 # define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr) \
    863     iemTlbTrace(a_pVCpu, kIemTlbTraceType_InvlPg, a_GCPtr)
    864 # define IEMTLBTRACE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb) \
    865     iemTlbTrace(a_pVCpu, kIemTlbTraceType_EvictSlot, a_GCPtrTag, a_GCPhys, a_fDataTlb, a_idxSlot)
    866 # define IEMTLBTRACE_LARGE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb) \
    867     iemTlbTrace(a_pVCpu, kIemTlbTraceType_LargeEvictSlot, a_GCPtrTag, a_GCPhys, a_fDataTlb, a_idxSlot)
    868 # define IEMTLBTRACE_LARGE_SCAN(a_pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb) \
    869     iemTlbTrace(a_pVCpu, kIemTlbTraceType_LargeScan, 0, 0, a_fDataTlb, (uint8_t)a_fGlobal | ((uint8_t)a_fNonGlobal << 1))
    870 # define IEMTLBTRACE_FLUSH(a_pVCpu, a_uRev, a_fDataTlb) \
    871     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Flush, a_uRev, 0, a_fDataTlb)
    872 # define IEMTLBTRACE_FLUSH_GLOBAL(a_pVCpu, a_uRev, a_uGRev, a_fDataTlb) \
    873     iemTlbTrace(a_pVCpu, kIemTlbTraceType_FlushGlobal, a_uRev, a_uGRev, a_fDataTlb)
    874 # define IEMTLBTRACE_LOAD(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb) \
    875     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load, a_GCPtr, a_GCPhys, a_fDataTlb, a_fTlb)
    876 # define IEMTLBTRACE_LOAD_GLOBAL(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb) \
    877     iemTlbTrace(a_pVCpu, kIemTlbTraceType_LoadGlobal, a_GCPtr, a_GCPhys, a_fDataTlb, a_fTlb)
    878 #else
    879 # define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr)                                               do { } while (0)
    880 # define IEMTLBTRACE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb)       do { } while (0)
    881 # define IEMTLBTRACE_LARGE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb) do { } while (0)
    882 # define IEMTLBTRACE_LARGE_SCAN(a_pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb)               do { } while (0)
    883 # define IEMTLBTRACE_FLUSH(a_pVCpu, a_uRev, a_fDataTlb)                                     do { } while (0)
    884 # define IEMTLBTRACE_FLUSH_GLOBAL(a_pVCpu, a_uRev, a_uGRev, a_fDataTlb)                     do { } while (0)
    885 # define IEMTLBTRACE_LOAD(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb)                   do { } while (0)
    886 # define IEMTLBTRACE_LOAD_GLOBAL(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb)            do { } while (0)
    887 #endif
    888 
    889 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
    890 # define IEMTLBTRACE_LOAD_CR0(a_pVCpu, a_uNew, a_uOld)      iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr0, a_uNew, a_uOld)
    891 # define IEMTLBTRACE_LOAD_CR3(a_pVCpu, a_uNew, a_uOld)      iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr3, a_uNew, a_uOld)
    892 # define IEMTLBTRACE_LOAD_CR4(a_pVCpu, a_uNew, a_uOld)      iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr4, a_uNew, a_uOld)
    893 # define IEMTLBTRACE_LOAD_EFER(a_pVCpu, a_uNew, a_uOld)     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Efer, a_uNew, a_uOld)
    894 #else
    895 # define IEMTLBTRACE_LOAD_CR0(a_pVCpu, a_uNew, a_uOld)      do { } while (0)
    896 # define IEMTLBTRACE_LOAD_CR3(a_pVCpu, a_uNew, a_uOld)      do { } while (0)
    897 # define IEMTLBTRACE_LOAD_CR4(a_pVCpu, a_uNew, a_uOld)      do { } while (0)
    898 # define IEMTLBTRACE_LOAD_EFER(a_pVCpu, a_uNew, a_uOld)     do { } while (0)
    899 #endif
    900 
    901 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
    902 # define IEMTLBTRACE_IRQ(a_pVCpu, a_uVector, a_fFlags, a_fEFlags) \
    903     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Irq, a_fEFlags, 0, a_uVector, a_fFlags)
    904 # define IEMTLBTRACE_XCPT(a_pVCpu, a_uVector, a_uErr, a_uCr2, a_fFlags) \
    905     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Xcpt, a_uErr, a_uCr2, a_uVector, a_fFlags)
    906 # define IEMTLBTRACE_IRET(a_pVCpu, a_uRetCs, a_uRetRip, a_fEFlags) \
    907     iemTlbTrace(a_pVCpu, kIemTlbTraceType_IRet, a_uRetRip, a_fEFlags, 0, a_uRetCs)
    908 #else
    909 # define IEMTLBTRACE_IRQ(a_pVCpu, a_uVector, a_fFlags, a_fEFlags)       do { } while (0)
    910 # define IEMTLBTRACE_XCPT(a_pVCpu, a_uVector, a_uErr, a_uCr2, a_fFlags) do { } while (0)
    911 # define IEMTLBTRACE_IRET(a_pVCpu, a_uRetCs, a_uRetRip, a_fEFlags)      do { } while (0)
    912 #endif
    913 
    914 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
    915 # define IEMTLBTRACE_TB_COMPILE(a_pVCpu, a_GCPhysPc) \
    916     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Tb_Compile, a_GCPhysPc)
    917 # define IEMTLBTRACE_TB_EXEC_THRD(a_pVCpu, a_pTb) \
    918     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Tb_Exec_Threaded, (a_pTb)->GCPhysPc, (uintptr_t)a_pTb, 0, (a_pTb)->cUsed)
    919 # define IEMTLBTRACE_TB_EXEC_N8VE(a_pVCpu, a_pTb) \
    920     iemTlbTrace(a_pVCpu, kIemTlbTraceType_Tb_Exec_Native,   (a_pTb)->GCPhysPc, (uintptr_t)a_pTb, 0, (a_pTb)->cUsed)
    921 #else
    922 # define IEMTLBTRACE_TB_COMPILE(a_pVCpu, a_GCPhysPc)                    do { } while (0)
    923 # define IEMTLBTRACE_TB_EXEC_THRD(a_pVCpu, a_pTb)                       do { } while (0)
    924 # define IEMTLBTRACE_TB_EXEC_N8VE(a_pVCpu, a_pTb)                       do { } while (0)
    925 #endif
    926 
    927 #if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
    928 # define IEMTLBTRACE_USER0(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
    929     iemTlbTrace(a_pVCpu, kIemTlbTraceType_User0, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
    930 # define IEMTLBTRACE_USER1(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
    931     iemTlbTrace(a_pVCpu, kIemTlbTraceType_User1, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
    932 # define IEMTLBTRACE_USER2(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
    933     iemTlbTrace(a_pVCpu, kIemTlbTraceType_User2, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
    934 # define IEMTLBTRACE_USER3(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
    935     iemTlbTrace(a_pVCpu, kIemTlbTraceType_User3, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
    936 #else
    937 # define IEMTLBTRACE_USER0(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
    938 # define IEMTLBTRACE_USER1(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
    939 # define IEMTLBTRACE_USER2(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
    940 # define IEMTLBTRACE_USER3(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
    941 #endif
    942 
    943 
    944 /** @name IEM_MC_F_XXX - MC block flags/clues.
    945  * @note x86 specific
    946  * @todo Merge with IEM_CIMPL_F_XXX
    947  * @{ */
    948 #define IEM_MC_F_ONLY_8086          RT_BIT_32(0)
    949 #define IEM_MC_F_MIN_186            RT_BIT_32(1)
    950 #define IEM_MC_F_MIN_286            RT_BIT_32(2)
    951 #define IEM_MC_F_NOT_286_OR_OLDER   IEM_MC_F_MIN_386
    952 #define IEM_MC_F_MIN_386            RT_BIT_32(3)
    953 #define IEM_MC_F_MIN_486            RT_BIT_32(4)
    954 #define IEM_MC_F_MIN_PENTIUM        RT_BIT_32(5)
    955 #define IEM_MC_F_MIN_PENTIUM_II     IEM_MC_F_MIN_PENTIUM
    956 #define IEM_MC_F_MIN_CORE           IEM_MC_F_MIN_PENTIUM
    957 #define IEM_MC_F_64BIT              RT_BIT_32(6)
    958 #define IEM_MC_F_NOT_64BIT          RT_BIT_32(7)
    959 /** This is set by IEMAllN8vePython.py to indicate a variation with the
    960  * flags-clearing-and-checking. */
    961 #define IEM_MC_F_WITH_FLAGS         RT_BIT_32(8)
    962 /** This is set by IEMAllN8vePython.py to indicate a variation without the
    963  * flags-clearing-and-checking, when there is also a variation with that.
    964  * @note Do not set this manully, it's only for python and for testing in
    965  *       the native recompiler! */
    966 #define IEM_MC_F_WITHOUT_FLAGS      RT_BIT_32(9)
    967 /** @} */
    968 
    969 /** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
    970  *
    971  * These clues are mainly for the recompiler, so that it can emit correct code.
    972  *
    973  * They are processed by the python script and which also automatically
    974  * calculates flags for MC blocks based on the statements, extending the use of
    975  * these flags to describe MC block behavior to the recompiler core.  The python
    976  * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
    977  * error checking purposes.  The script emits the necessary fEndTb = true and
    978  * similar statements as this reduces compile time a tiny bit.
    979  *
    980  * @{ */
    981 /** Flag set if direct branch, clear if absolute or indirect. */
    982 #define IEM_CIMPL_F_BRANCH_DIRECT        RT_BIT_32(0)
    983 /** Flag set if indirect branch, clear if direct or relative.
    984  * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
    985  * as well as for return instructions (RET, IRET, RETF). */
    986 #define IEM_CIMPL_F_BRANCH_INDIRECT      RT_BIT_32(1)
    987 /** Flag set if relative branch, clear if absolute or indirect. */
    988 #define IEM_CIMPL_F_BRANCH_RELATIVE      RT_BIT_32(2)
    989 /** Flag set if conditional branch, clear if unconditional. */
    990 #define IEM_CIMPL_F_BRANCH_CONDITIONAL   RT_BIT_32(3)
    991 /** Flag set if it's a far branch (changes CS).
    992  * @note x86 specific */
    993 #define IEM_CIMPL_F_BRANCH_FAR           RT_BIT_32(4)
    994 /** Convenience: Testing any kind of branch. */
    995 #define IEM_CIMPL_F_BRANCH_ANY          (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
    996 
    997 /** Execution flags may change (IEMCPU::fExec). */
    998 #define IEM_CIMPL_F_MODE                RT_BIT_32(5)
    999 /** May change significant portions of RFLAGS.
    1000  * @note x86 specific */
    1001 #define IEM_CIMPL_F_RFLAGS              RT_BIT_32(6)
    1002 /** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS.
    1003  * @note x86 specific */
    1004 #define IEM_CIMPL_F_STATUS_FLAGS        RT_BIT_32(7)
    1005 /** May trigger interrupt shadowing.
    1006  * @note x86 specific */
    1007 #define IEM_CIMPL_F_INHIBIT_SHADOW      RT_BIT_32(8)
    1008 /** May enable interrupts, so recheck IRQ immediately afterwards executing
    1009  *  the instruction. */
    1010 #define IEM_CIMPL_F_CHECK_IRQ_AFTER     RT_BIT_32(9)
    1011 /** May disable interrupts, so recheck IRQ immediately before executing the
    1012  *  instruction. */
    1013 #define IEM_CIMPL_F_CHECK_IRQ_BEFORE    RT_BIT_32(10)
    1014 /** Convenience: Check for IRQ both before and after an instruction. */
    1015 #define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
    1016 /** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
    1017 #define IEM_CIMPL_F_VMEXIT              RT_BIT_32(11)
    1018 /** May modify FPU state.
    1019  * @todo Not sure if this is useful yet.  */
    1020 #define IEM_CIMPL_F_FPU                 RT_BIT_32(12)
    1021 /** REP prefixed instruction which may yield before updating PC.
    1022  * @todo Not sure if this is useful, REP functions now return non-zero
    1023  *       status if they don't update the PC.
    1024  * @note x86 specific */
    1025 #define IEM_CIMPL_F_REP                 RT_BIT_32(13)
    1026 /** I/O instruction.
    1027  * @todo Not sure if this is useful yet.
    1028  * @note x86 specific */
    1029 #define IEM_CIMPL_F_IO                  RT_BIT_32(14)
    1030 /** Force end of TB after the instruction. */
    1031 #define IEM_CIMPL_F_END_TB              RT_BIT_32(15)
    1032 /** Flag set if a branch may also modify the stack (push/pop return address). */
    1033 #define IEM_CIMPL_F_BRANCH_STACK        RT_BIT_32(16)
    1034 /** Flag set if a branch may also modify the stack (push/pop return address)
    1035  *  and switch it (load/restore SS:RSP).
    1036  * @note x86 specific */
    1037 #define IEM_CIMPL_F_BRANCH_STACK_FAR    RT_BIT_32(17)
    1038 /** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
    1039 #define IEM_CIMPL_F_XCPT \
    1040     (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_STACK_FAR \
    1041      | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
    1042 
    1043 /** The block calls a C-implementation instruction function with two implicit arguments.
    1044  * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
    1045  * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
    1046  * @note The python scripts will add this if missing.  */
    1047 #define IEM_CIMPL_F_CALLS_CIMPL                 RT_BIT_32(18)
    1048 /** The block calls an ASM-implementation instruction function.
    1049  * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
    1050  * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
    1051  * @note The python scripts will add this if missing.  */
    1052 #define IEM_CIMPL_F_CALLS_AIMPL                 RT_BIT_32(19)
    1053 /** The block calls an ASM-implementation instruction function with an implicit
    1054  * X86FXSTATE pointer argument.
    1055  * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
    1056  * IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE.
    1057  * @note The python scripts will add this if missing.
    1058  * @note x86 specific */
    1059 #define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE    RT_BIT_32(20)
    1060 /** The block calls an ASM-implementation instruction function with an implicit
    1061  * X86XSAVEAREA pointer argument.
    1062  * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
    1063  * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
    1064  * @note No different from IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE, so same value.
    1065  * @note The python scripts will add this if missing.
    1066  * @note x86 specific */
    1067 #define IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE     IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE
    1068 /** @} */
    1069 
    1070 
    1071 /** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
    1072  *
    1073  * These flags are set when entering IEM and adjusted as code is executed, such
    1074  * that they will always contain the current values as instructions are
    1075  * finished.
    1076  *
    1077  * In recompiled execution mode, (most of) these flags are included in the
    1078  * translation block selection key and stored in IEMTB::fFlags alongside the
    1079  * IEMTB_F_XXX flags.  The latter flags uses bits 31 thru 24, which are all zero
    1080  * in IEMCPU::fExec.
    1081  *
    1082  * @{ */
    1083 /** Mode: The block target mode mask. */
    1084 #define IEM_F_MODE_MASK                     UINT32_C(0x0000001f)
    1085 /** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
    1086 #define IEM_F_MODE_CPUMODE_MASK             UINT32_C(0x00000003)
    1087 /** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
    1088  * conditional in EIP/IP updating), and flat wide open CS, SS, DS, and ES in
    1089  * 32-bit mode (for simplifying most memory accesses). */
    1090 #define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
    1091 /** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
    1092 #define IEM_F_MODE_X86_PROT_MASK            UINT32_C(0x00000008)
    1093 /** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
    1094 #define IEM_F_MODE_X86_V86_MASK             UINT32_C(0x00000010)
    1095 
    1096 /** X86 Mode: 16-bit on 386 or later. */
    1097 #define IEM_F_MODE_X86_16BIT                UINT32_C(0x00000000)
    1098 /** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
    1099 #define IEM_F_MODE_X86_16BIT_PRE_386        UINT32_C(0x00000004)
    1100 /** X86 Mode: 16-bit protected mode on 386 or later. */
    1101 #define IEM_F_MODE_X86_16BIT_PROT           UINT32_C(0x00000008)
    1102 /** X86 Mode: 16-bit protected mode on 386 or later. */
    1103 #define IEM_F_MODE_X86_16BIT_PROT_PRE_386   UINT32_C(0x0000000c)
    1104 /** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
    1105 #define IEM_F_MODE_X86_16BIT_PROT_V86       UINT32_C(0x00000018)
    1106 
    1107 /** X86 Mode: 32-bit on 386 or later. */
    1108 #define IEM_F_MODE_X86_32BIT                UINT32_C(0x00000001)
    1109 /** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
    1110 #define IEM_F_MODE_X86_32BIT_FLAT           UINT32_C(0x00000005)
    1111 /** X86 Mode: 32-bit protected mode. */
    1112 #define IEM_F_MODE_X86_32BIT_PROT           UINT32_C(0x00000009)
    1113 /** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
    1114 #define IEM_F_MODE_X86_32BIT_PROT_FLAT      UINT32_C(0x0000000d)
    1115 
    1116 /** X86 Mode: 64-bit (includes protected, but not the flat bit). */
    1117 #define IEM_F_MODE_X86_64BIT                UINT32_C(0x0000000a)
    1118 
    1119 /** X86 Mode: Checks if @a a_fExec represent a FLAT mode. */
    1120 #define IEM_F_MODE_X86_IS_FLAT(a_fExec)     (   ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \
    1121                                              || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT \
    1122                                              || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT)
    1123 
    1124 /** Bypass access handlers when set. */
    1125 #define IEM_F_BYPASS_HANDLERS               UINT32_C(0x00010000)
    1126 /** Have pending hardware instruction breakpoints.   */
    1127 #define IEM_F_PENDING_BRK_INSTR             UINT32_C(0x00020000)
    1128 /** Have pending hardware data breakpoints.   */
    1129 #define IEM_F_PENDING_BRK_DATA              UINT32_C(0x00040000)
    1130 
    1131 /** X86: Have pending hardware I/O breakpoints. */
    1132 #define IEM_F_PENDING_BRK_X86_IO            UINT32_C(0x00000400)
    1133 /** X86: Disregard the lock prefix (implied or not) when set. */
    1134 #define IEM_F_X86_DISREGARD_LOCK            UINT32_C(0x00000800)
    1135 
    1136 /** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
    1137 #define IEM_F_PENDING_BRK_MASK              (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
    1138 
    1139 /** Caller configurable options. */
    1140 #define IEM_F_USER_OPTS                     (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
    1141 
    1142 /** X86: The current protection level (CPL) shift factor.   */
    1143 #define IEM_F_X86_CPL_SHIFT                 8
    1144 /** X86: The current protection level (CPL) mask. */
    1145 #define IEM_F_X86_CPL_MASK                  UINT32_C(0x00000300)
    1146 /** X86: The current protection level (CPL) shifted mask. */
    1147 #define IEM_F_X86_CPL_SMASK                 UINT32_C(0x00000003)
    1148 
    1149 /** X86: Alignment checks enabled (CR0.AM=1 & EFLAGS.AC=1). */
    1150 #define IEM_F_X86_AC                        UINT32_C(0x00080000)
    1151 
    1152 /** X86 execution context.
    1153  * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
    1154  * the exception of IEM_F_X86_CTX_NORMAL).  This allows running VMs from SMM
    1155  * mode. */
    1156 #define IEM_F_X86_CTX_MASK                  UINT32_C(0x0000f000)
    1157 /** X86 context: Plain regular execution context. */
    1158 #define IEM_F_X86_CTX_NORMAL                UINT32_C(0x00000000)
    1159 /** X86 context: VT-x enabled. */
    1160 #define IEM_F_X86_CTX_VMX                   UINT32_C(0x00001000)
    1161 /** X86 context: AMD-V enabled. */
    1162 #define IEM_F_X86_CTX_SVM                   UINT32_C(0x00002000)
    1163 /** X86 context: In AMD-V or VT-x guest mode. */
    1164 #define IEM_F_X86_CTX_IN_GUEST              UINT32_C(0x00004000)
    1165 /** X86 context: System management mode (SMM). */
    1166 #define IEM_F_X86_CTX_SMM                   UINT32_C(0x00008000)
    1167 
    1168 /** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
    1169  * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
    1170  * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
    1171  * alread). */
    1172 
    1173 /** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
    1174  *        iemRegFinishClearingRF() most for most situations
    1175  *        (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
    1176  *        the IEM_F_PENDING_BRK_XXX bits alread). */
    1177 
    1178 /** @} */
    1179 
    1180 
    1181 /** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
    1182  *
    1183  * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
    1184  * translation block flags.  The combined flag mask (subject to
    1185  * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
    1186  *
    1187  * @{ */
    1188 /** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
    1189 #define IEMTB_F_IEM_F_MASK              UINT32_C(0x00ffffff)
    1190 
    1191 /** Type: The block type mask. */
    1192 #define IEMTB_F_TYPE_MASK               UINT32_C(0x03000000)
    1193 /** Type: Purly threaded recompiler (via tables). */
    1194 #define IEMTB_F_TYPE_THREADED           UINT32_C(0x01000000)
    1195 /** Type: Native recompilation.  */
    1196 #define IEMTB_F_TYPE_NATIVE             UINT32_C(0x02000000)
    1197 
    1198 /** Set when we're starting the block in an "interrupt shadow".
    1199  * We don't need to distingish between the two types of this mask, thus the one.
    1200  * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
    1201 #define IEMTB_F_INHIBIT_SHADOW          UINT32_C(0x04000000)
    1202 /** Set when we're currently inhibiting NMIs
    1203  * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
    1204 #define IEMTB_F_INHIBIT_NMI             UINT32_C(0x08000000)
    1205 
    1206 /** Checks that EIP/IP is wihin CS.LIM before each instruction.  Used when
    1207  * we're close the limit before starting a TB, as determined by
    1208  * iemGetTbFlagsForCurrentPc().
    1209  * @note x86 specific */
    1210 #define IEMTB_F_CS_LIM_CHECKS           UINT32_C(0x10000000)
    1211 
    1212 /** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
    1213  *
    1214  * @note We skip all of IEM_F_X86_CTX_MASK, with the exception of SMM (which we
    1215  *       don't implement), because we don't currently generate any context
    1216  *       specific code - that's all handled in CIMPL functions.
    1217  *
    1218  *       For the threaded recompiler we don't generate any CPL specific code
    1219  *       either, but the native recompiler does for memory access (saves getting
    1220  *       the CPL from fExec and turning it into IEMTLBE_F_PT_NO_USER).
    1221  *       Since most OSes will not share code between rings, this shouldn't
    1222  *       have any real effect on TB/memory/recompiling load.
    1223  */
    1224 #define IEMTB_F_KEY_MASK                ((UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEMTB_F_TYPE_MASK)) | IEM_F_X86_CTX_SMM)
    1225 /** @} */
    1226 
    1227 AssertCompile( (IEM_F_MODE_X86_16BIT              & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
    1228 AssertCompile(!(IEM_F_MODE_X86_16BIT              & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
    1229 AssertCompile(!(IEM_F_MODE_X86_16BIT              & IEM_F_MODE_X86_PROT_MASK));
    1230 AssertCompile(!(IEM_F_MODE_X86_16BIT              & IEM_F_MODE_X86_V86_MASK));
    1231 AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386      & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
    1232 AssertCompile(  IEM_F_MODE_X86_16BIT_PRE_386      & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
    1233 AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386      & IEM_F_MODE_X86_PROT_MASK));
    1234 AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386      & IEM_F_MODE_X86_V86_MASK));
    1235 AssertCompile( (IEM_F_MODE_X86_16BIT_PROT         & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
    1236 AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT         & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
    1237 AssertCompile(  IEM_F_MODE_X86_16BIT_PROT         & IEM_F_MODE_X86_PROT_MASK);
    1238 AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT         & IEM_F_MODE_X86_V86_MASK));
    1239 AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
    1240 AssertCompile(  IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
    1241 AssertCompile(  IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
    1242 AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
    1243 AssertCompile(  IEM_F_MODE_X86_16BIT_PROT_V86     & IEM_F_MODE_X86_PROT_MASK);
    1244 AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86     & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
    1245 AssertCompile(  IEM_F_MODE_X86_16BIT_PROT_V86     & IEM_F_MODE_X86_V86_MASK);
    1246 
    1247 AssertCompile( (IEM_F_MODE_X86_32BIT              & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
    1248 AssertCompile(!(IEM_F_MODE_X86_32BIT              & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
    1249 AssertCompile(!(IEM_F_MODE_X86_32BIT              & IEM_F_MODE_X86_PROT_MASK));
    1250 AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT         & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
    1251 AssertCompile(  IEM_F_MODE_X86_32BIT_FLAT         & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
    1252 AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT         & IEM_F_MODE_X86_PROT_MASK));
    1253 AssertCompile( (IEM_F_MODE_X86_32BIT_PROT         & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
    1254 AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT         & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
    1255 AssertCompile(  IEM_F_MODE_X86_32BIT_PROT         & IEM_F_MODE_X86_PROT_MASK);
    1256 AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT    & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
    1257 AssertCompile(  IEM_F_MODE_X86_32BIT_PROT_FLAT    & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
    1258 AssertCompile(  IEM_F_MODE_X86_32BIT_PROT_FLAT    & IEM_F_MODE_X86_PROT_MASK);
    1259 
    1260 AssertCompile( (IEM_F_MODE_X86_64BIT              & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
    1261 AssertCompile(  IEM_F_MODE_X86_64BIT              & IEM_F_MODE_X86_PROT_MASK);
    1262 AssertCompile(!(IEM_F_MODE_X86_64BIT              & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
    1263 
    1264 /** Native instruction type for use with the native code generator.
    1265  * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
    1266 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    1267 typedef uint8_t IEMNATIVEINSTR;
    1268 #else
    1269 typedef uint32_t IEMNATIVEINSTR;
    1270 #endif
    1271 /** Pointer to a native instruction unit. */
    1272 typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
    1273 /** Pointer to a const native instruction unit. */
    1274 typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
    1275 
    1276 /**
    1277  * A call for the threaded call table.
    1278  */
    1279 typedef struct IEMTHRDEDCALLENTRY
    1280 {
    1281     /** The function to call (IEMTHREADEDFUNCS). */
    1282     uint16_t    enmFunction;
    1283 
    1284     /** Instruction number in the TB (for statistics). */
    1285     uint8_t     idxInstr;
    1286     /** The opcode length. */
    1287     uint8_t     cbOpcode;
    1288     /** Offset into IEMTB::pabOpcodes. */
    1289     uint16_t    offOpcode;
    1290 
    1291     /** TB lookup table index (7 bits) and large size (1 bits).
    1292      *
    1293      * The default size is 1 entry, but for indirect calls and returns we set the
    1294      * top bit and allocate 4 (IEM_TB_LOOKUP_TAB_LARGE_SIZE) entries.  The large
    1295      * tables uses RIP for selecting the entry to use, as it is assumed a hash table
    1296      * lookup isn't that slow compared to sequentially trying out 4 TBs.
    1297      *
    1298      * By default lookup table entry 0 for a TB is reserved as a fallback for
    1299      * calltable entries w/o explicit entreis, so this member will be non-zero if
    1300      * there is a lookup entry associated with this call.
    1301      *
    1302      * @sa IEM_TB_LOOKUP_TAB_GET_SIZE, IEM_TB_LOOKUP_TAB_GET_IDX
    1303      */
    1304     uint8_t     uTbLookup;
    1305 
    1306     /** Flags - IEMTHREADEDCALLENTRY_F_XXX. */
    1307     uint8_t     fFlags;
    1308 
    1309     /** Generic parameters. */
    1310     uint64_t    auParams[3];
    1311 } IEMTHRDEDCALLENTRY;
    1312 AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
    1313 /** Pointer to a threaded call entry. */
    1314 typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
    1315 /** Pointer to a const threaded call entry. */
    1316 typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
    1317 
    1318 /** The number of TB lookup table entries for a large allocation
    1319  *  (IEMTHRDEDCALLENTRY::uTbLookup bit 7 set). */
    1320 #define IEM_TB_LOOKUP_TAB_LARGE_SIZE                    4
    1321 /** Get the lookup table size from IEMTHRDEDCALLENTRY::uTbLookup. */
    1322 #define IEM_TB_LOOKUP_TAB_GET_SIZE(a_uTbLookup)         (!((a_uTbLookup) & 0x80) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE)
    1323 /** Get the first lookup table index from IEMTHRDEDCALLENTRY::uTbLookup. */
    1324 #define IEM_TB_LOOKUP_TAB_GET_IDX(a_uTbLookup)          ((a_uTbLookup) & 0x7f)
    1325 /** Get the lookup table index from IEMTHRDEDCALLENTRY::uTbLookup and RIP. */
    1326 #define IEM_TB_LOOKUP_TAB_GET_IDX_WITH_RIP(a_uTbLookup, a_Rip) \
    1327     (!((a_uTbLookup) & 0x80) ? (a_uTbLookup) & 0x7f : ((a_uTbLookup) & 0x7f) + ((a_Rip) & (IEM_TB_LOOKUP_TAB_LARGE_SIZE - 1)) )
    1328 
    1329 /** Make a IEMTHRDEDCALLENTRY::uTbLookup value. */
    1330 #define IEM_TB_LOOKUP_TAB_MAKE(a_idxTable, a_fLarge)    ((a_idxTable) | ((a_fLarge) ? 0x80 : 0))
    1331 
    1332 
    1333 /** The call entry is a jump target. */
    1334 #define IEMTHREADEDCALLENTRY_F_JUMP_TARGET              UINT8_C(0x01)
    1335 
    1336 
    1337 /**
    1338  * Native IEM TB 'function' typedef.
    1339  *
    1340  * This will throw/longjmp on occation.
    1341  *
    1342  * @note    AMD64 doesn't have that many non-volatile registers and does sport
    1343  *          32-bit address displacments, so we don't need pCtx.
    1344  *
    1345  *          On ARM64 pCtx allows us to directly address the whole register
    1346  *          context without requiring a separate indexing register holding the
    1347  *          offset. This saves an instruction loading the offset for each guest
    1348  *          CPU context access, at the cost of a non-volatile register.
    1349  *          Fortunately, ARM64 has quite a lot more registers.
    1350  */
    1351 typedef
    1352 #ifdef RT_ARCH_AMD64
    1353 int FNIEMTBNATIVE(PVMCPUCC pVCpu)
    1354 #else
    1355 int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
    1356 #endif
    1357 #if RT_CPLUSPLUS_PREREQ(201700)
    1358     IEM_NOEXCEPT_MAY_LONGJMP
    1359 #endif
    1360     ;
    1361 /** Pointer to a native IEM TB entry point function.
    1362  * This will throw/longjmp on occation.  */
    1363 typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
    1364 
    1365 
    1366 /**
    1367  * Translation block.
    1368  *
    1369  * The current plan is to just keep TBs and associated lookup hash table private
    1370  * to each VCpu as that simplifies TB removal greatly (no races) and generally
    1371  * avoids using expensive atomic primitives for updating lists and stuff.
    1372  */
    1373 #pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
    1374 typedef struct IEMTB
    1375 {
    1376     /** Next block with the same hash table entry. */
    1377     struct IEMTB       *pNext;
    1378     /** Usage counter. */
    1379     uint32_t            cUsed;
    1380     /** The IEMCPU::msRecompilerPollNow last time it was used. */
    1381     uint32_t            msLastUsed;
    1382 
    1383     /** @name What uniquely identifies the block.
    1384      * @{ */
    1385     RTGCPHYS            GCPhysPc;
    1386     /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
    1387     uint32_t            fFlags;
    1388     union
    1389     {
    1390         struct
    1391         {
    1392             /**< Relevant CS X86DESCATTR_XXX bits. */
    1393             uint16_t    fAttr;
    1394         } x86;
    1395     };
    1396     /** @} */
    1397 
    1398     /** Number of opcode ranges. */
    1399     uint8_t             cRanges;
    1400     /** Statistics: Number of instructions in the block. */
    1401     uint8_t             cInstructions;
    1402 
    1403     /** Type specific info. */
    1404     union
    1405     {
    1406         struct
    1407         {
    1408             /** The call sequence table. */
    1409             PIEMTHRDEDCALLENTRY paCalls;
    1410             /** Number of calls in paCalls. */
    1411             uint16_t            cCalls;
    1412             /** Number of calls allocated. */
    1413             uint16_t            cAllocated;
    1414         } Thrd;
    1415         struct
    1416         {
    1417             /** The native instructions (PFNIEMTBNATIVE). */
    1418             PIEMNATIVEINSTR     paInstructions;
    1419             /** Number of instructions pointed to by paInstructions. */
    1420             uint32_t            cInstructions;
    1421         } Native;
    1422         /** Generic view for zeroing when freeing. */
    1423         struct
    1424         {
    1425             uintptr_t           uPtr;
    1426             uint32_t            uData;
    1427         } Gen;
    1428     };
    1429 
    1430     /** The allocation chunk this TB belongs to. */
    1431     uint8_t             idxAllocChunk;
    1432     /** The number of entries in the lookup table.
    1433      * Because we're out of space, the TB lookup table is located before the
    1434      * opcodes pointed to by pabOpcodes. */
    1435     uint8_t             cTbLookupEntries;
    1436 
    1437     /** Number of bytes of opcodes stored in pabOpcodes.
    1438      * @todo this field isn't really needed, aRanges keeps the actual info. */
    1439     uint16_t            cbOpcodes;
    1440     /** Pointer to the opcode bytes this block was recompiled from.
    1441      * This also points to the TB lookup table, which starts cTbLookupEntries
    1442      * entries before the opcodes (we don't have room atm for another point). */
    1443     uint8_t            *pabOpcodes;
    1444 
    1445     union
    1446     {
    1447         /** Native recompilation debug info if enabled.
    1448          * This is only generated by the native recompiler. */
    1449         struct IEMTBDBG    *pDbgInfo;
    1450         /** For threaded TBs and natives when debug info is disabled, this is the flat
    1451          * PC corresponding to GCPhysPc. */
    1452         RTGCPTR             FlatPc;
    1453     };
    1454 
    1455     /* --- 64 byte cache line end --- */
    1456 
    1457     /** Opcode ranges.
    1458      *
    1459      * The opcode checkers and maybe TLB loading functions will use this to figure
    1460      * out what to do.  The parameter will specify an entry and the opcode offset to
    1461      * start at and the minimum number of bytes to verify (instruction length).
    1462      *
    1463      * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
    1464      * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
    1465      * code TLB (must have a valid entry for that address) and scan the ranges to
    1466      * locate the corresponding opcodes. Probably.
    1467      */
    1468     struct IEMTBOPCODERANGE
    1469     {
    1470         /** Offset within pabOpcodes. */
    1471         uint16_t        offOpcodes;
    1472         /** Number of bytes. */
    1473         uint16_t        cbOpcodes;
    1474         /** The page offset. */
    1475         RT_GCC_EXTENSION
    1476         uint16_t        offPhysPage : 12;
    1477         /** Unused bits. */
    1478         RT_GCC_EXTENSION
    1479         uint16_t        u2Unused    :  2;
    1480         /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
    1481         RT_GCC_EXTENSION
    1482         uint16_t        idxPhysPage :  2;
    1483     } aRanges[8];
    1484 
    1485     /** Physical pages that this TB covers.
    1486      * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
    1487     RTGCPHYS            aGCPhysPages[2];
    1488 } IEMTB;
    1489 #pragma pack()
    1490 AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
    1491 AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
    1492 AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
    1493 AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
    1494 AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
    1495 AssertCompileMemberOffset(IEMTB, aRanges, 64);
    1496 AssertCompileMemberSize(IEMTB, aRanges[0], 6);
    1497 #if 1
    1498 AssertCompileSize(IEMTB, 128);
    1499 # define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
    1500 #else
    1501 AssertCompileSize(IEMTB, 168);
    1502 # undef  IEMTB_SIZE_IS_POWER_OF_TWO
    1503 #endif
    1504 
    1505 /** Pointer to a translation block. */
    1506 typedef IEMTB *PIEMTB;
    1507 /** Pointer to a const translation block. */
    1508 typedef IEMTB const *PCIEMTB;
    1509 
    1510 /** Gets address of the given TB lookup table entry. */
    1511 #define IEMTB_GET_TB_LOOKUP_TAB_ENTRY(a_pTb, a_idx) \
    1512     ((PIEMTB *)&(a_pTb)->pabOpcodes[-(int)((a_pTb)->cTbLookupEntries - (a_idx)) * sizeof(PIEMTB)])
    1513 
    1514 /**
    1515  * Gets the physical address for a TB opcode range.
    1516  */
    1517 DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
    1518 {
    1519     Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
    1520     uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
    1521     Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
    1522     if (idxPage == 0)
    1523         return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
    1524     Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
    1525     return pTb->aGCPhysPages[idxPage - 1];
    1526 }
    1527 
    1528 
    1529 /**
    1530  * A chunk of memory in the TB allocator.
    1531  */
    1532 typedef struct IEMTBCHUNK
    1533 {
    1534     /** Pointer to the translation blocks in this chunk. */
    1535     PIEMTB          paTbs;
    1536 #ifdef IN_RING0
    1537     /** Allocation handle. */
    1538     RTR0MEMOBJ      hMemObj;
    1539 #endif
    1540 } IEMTBCHUNK;
    1541 
    1542 /**
    1543  * A per-CPU translation block allocator.
    1544  *
    1545  * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
    1546  * the length of the collision list, and of course also for cache line alignment
    1547  * reasons, the TBs must be allocated with at least 64-byte alignment.
    1548  * Memory is there therefore allocated using one of the page aligned allocators.
    1549  *
    1550  *
    1551  * To avoid wasting too much memory, it is allocated piecemeal as needed,
    1552  * in chunks (IEMTBCHUNK) of 2 MiB or more.  The TB has an 8-bit chunk index
    1553  * that enables us to quickly calculate the allocation bitmap position when
    1554  * freeing the translation block.
    1555  */
    1556 typedef struct IEMTBALLOCATOR
    1557 {
    1558     /** Magic value (IEMTBALLOCATOR_MAGIC). */
    1559     uint32_t        uMagic;
    1560 
    1561 #ifdef IEMTB_SIZE_IS_POWER_OF_TWO
    1562     /** Mask corresponding to cTbsPerChunk - 1. */
    1563     uint32_t        fChunkMask;
    1564     /** Shift count corresponding to cTbsPerChunk. */
    1565     uint8_t         cChunkShift;
    1566 #else
    1567     uint32_t        uUnused;
    1568     uint8_t         bUnused;
    1569 #endif
    1570     /** Number of chunks we're allowed to allocate. */
    1571     uint8_t         cMaxChunks;
    1572     /** Number of chunks currently populated. */
    1573     uint16_t        cAllocatedChunks;
    1574     /** Number of translation blocks per chunk. */
    1575     uint32_t        cTbsPerChunk;
    1576     /** Chunk size. */
    1577     uint32_t        cbPerChunk;
    1578 
    1579     /** The maximum number of TBs. */
    1580     uint32_t        cMaxTbs;
    1581     /** Total number of TBs in the populated chunks.
    1582      * (cAllocatedChunks * cTbsPerChunk) */
    1583     uint32_t        cTotalTbs;
    1584     /** The current number of TBs in use.
    1585      * The number of free TBs: cAllocatedTbs - cInUseTbs; */
    1586     uint32_t        cInUseTbs;
    1587     /** Statistics: Number of the cInUseTbs that are native ones. */
    1588     uint32_t        cNativeTbs;
    1589     /** Statistics: Number of the cInUseTbs that are threaded ones. */
    1590     uint32_t        cThreadedTbs;
    1591 
    1592     /** Where to start pruning TBs from when we're out.
    1593      *  See iemTbAllocatorAllocSlow for details. */
    1594     uint32_t        iPruneFrom;
    1595     /** Where to start pruning native TBs from when we're out of executable memory.
    1596      *  See iemTbAllocatorFreeupNativeSpace for details. */
    1597     uint32_t        iPruneNativeFrom;
    1598     uint64_t        u64Padding;
    1599 
    1600     /** Statistics: Number of TB allocation calls. */
    1601     STAMCOUNTER     StatAllocs;
    1602     /** Statistics: Number of TB free calls. */
    1603     STAMCOUNTER     StatFrees;
    1604     /** Statistics: Time spend pruning. */
    1605     STAMPROFILE     StatPrune;
    1606     /** Statistics: Time spend pruning native TBs. */
    1607     STAMPROFILE     StatPruneNative;
    1608 
    1609     /** The delayed free list (see iemTbAlloctorScheduleForFree). */
    1610     PIEMTB          pDelayedFreeHead;
    1611     /* Head of the list of free TBs. */
    1612     PIEMTB          pTbsFreeHead;
    1613 
    1614     /** Allocation chunks. */
    1615     IEMTBCHUNK      aChunks[256];
    1616 } IEMTBALLOCATOR;
    1617 /** Pointer to a TB allocator. */
    1618 typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
    1619 
    1620 /** Magic value for the TB allocator (Emmet Harley Cohen). */
    1621 #define IEMTBALLOCATOR_MAGIC        UINT32_C(0x19900525)
    1622 
    1623 
    1624 /**
    1625  * A per-CPU translation block cache (hash table).
    1626  *
    1627  * The hash table is allocated once during IEM initialization and size double
    1628  * the max TB count, rounded up to the nearest power of two (so we can use and
    1629  * AND mask rather than a rest division when hashing).
    1630  */
    1631 typedef struct IEMTBCACHE
    1632 {
    1633     /** Magic value (IEMTBCACHE_MAGIC). */
    1634     uint32_t        uMagic;
    1635     /** Size of the hash table.  This is a power of two. */
    1636     uint32_t        cHash;
    1637     /** The mask corresponding to cHash. */
    1638     uint32_t        uHashMask;
    1639     uint32_t        uPadding;
    1640 
    1641     /** @name Statistics
    1642      * @{ */
    1643     /** Number of collisions ever. */
    1644     STAMCOUNTER     cCollisions;
    1645 
    1646     /** Statistics: Number of TB lookup misses. */
    1647     STAMCOUNTER     cLookupMisses;
    1648     /** Statistics: Number of TB lookup hits via hash table (debug only). */
    1649     STAMCOUNTER     cLookupHits;
    1650     /** Statistics: Number of TB lookup hits via TB associated lookup table (debug only). */
    1651     STAMCOUNTER     cLookupHitsViaTbLookupTable;
    1652     STAMCOUNTER     auPadding2[2];
    1653     /** Statistics: Collision list length pruning. */
    1654     STAMPROFILE     StatPrune;
    1655     /** @} */
    1656 
    1657     /** The hash table itself.
    1658      * @note The lower 6 bits of the pointer is used for keeping the collision
    1659      *       list length, so we can take action when it grows too long.
    1660      *       This works because TBs are allocated using a 64 byte (or
    1661      *       higher) alignment from page aligned chunks of memory, so the lower
    1662      *       6 bits of the address will always be zero.
    1663      *       See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
    1664      */
    1665     RT_FLEXIBLE_ARRAY_EXTENSION
    1666     PIEMTB          apHash[RT_FLEXIBLE_ARRAY];
    1667 } IEMTBCACHE;
    1668 /** Pointer to a per-CPU translation block cache. */
    1669 typedef IEMTBCACHE *PIEMTBCACHE;
    1670 
    1671 /** Magic value for IEMTBCACHE (Johnny O'Neal). */
    1672 #define IEMTBCACHE_MAGIC            UINT32_C(0x19561010)
    1673 
    1674 /** The collision count mask for IEMTBCACHE::apHash entries. */
    1675 #define IEMTBCACHE_PTR_COUNT_MASK               ((uintptr_t)0x3f)
    1676 /** The max collision count for IEMTBCACHE::apHash entries before pruning. */
    1677 #define IEMTBCACHE_PTR_MAX_COUNT                ((uintptr_t)0x30)
    1678 /** Combine a TB pointer and a collision list length into a value for an
    1679  *  IEMTBCACHE::apHash entry. */
    1680 #define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount)    (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
    1681 /** Combine a TB pointer and a collision list length into a value for an
    1682  *  IEMTBCACHE::apHash entry. */
    1683 #define IEMTBCACHE_PTR_GET_TB(a_pHashEntry)     (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
    1684 /** Combine a TB pointer and a collision list length into a value for an
    1685  *  IEMTBCACHE::apHash entry. */
    1686 #define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry)  ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
    1687 
    1688 /**
    1689  * Calculates the hash table slot for a TB from physical PC address and TB flags.
    1690  */
    1691 #define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
    1692     IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
    1693 
    1694 /**
    1695  * Calculates the hash table slot for a TB from physical PC address and TB
    1696  * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
    1697  */
    1698 #define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
    1699     (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
    1700 
    1701 
    1702 /** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
    1703  *
    1704  * These flags parallels the main IEM_CIMPL_F_BRANCH_XXX flags.
    1705  *
    1706  * @{ */
    1707 /** Value if no branching happened recently. */
    1708 #define IEMBRANCHED_F_NO            UINT8_C(0x00)
    1709 /** Flag set if direct branch, clear if absolute or indirect. */
    1710 #define IEMBRANCHED_F_DIRECT        UINT8_C(0x01)
    1711 /** Flag set if indirect branch, clear if direct or relative. */
    1712 #define IEMBRANCHED_F_INDIRECT      UINT8_C(0x02)
    1713 /** Flag set if relative branch, clear if absolute or indirect. */
    1714 #define IEMBRANCHED_F_RELATIVE      UINT8_C(0x04)
    1715 /** Flag set if conditional branch, clear if unconditional. */
    1716 #define IEMBRANCHED_F_CONDITIONAL   UINT8_C(0x08)
    1717 /** Flag set if it's a far branch.
    1718  * @note x86 specific */
    1719 #define IEMBRANCHED_F_FAR           UINT8_C(0x10)
    1720 /** Flag set if the stack pointer is modified. */
    1721 #define IEMBRANCHED_F_STACK         UINT8_C(0x20)
    1722 /** Flag set if the stack pointer and (maybe) the stack segment are modified.
    1723  * @note x86 specific */
    1724 #define IEMBRANCHED_F_STACK_FAR     UINT8_C(0x40)
    1725 /** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
    1726 #define IEMBRANCHED_F_ZERO          UINT8_C(0x80)
    1727 /** @} */
    1728 
    1729 
    1730 /**
    1731  * The per-CPU IEM state.
    1732  */
    1733 typedef struct IEMCPU
    1734 {
    1735     /** Info status code that needs to be propagated to the IEM caller.
    1736      * This cannot be passed internally, as it would complicate all success
    1737      * checks within the interpreter making the code larger and almost impossible
    1738      * to get right.  Instead, we'll store status codes to pass on here.  Each
    1739      * source of these codes will perform appropriate sanity checks. */
    1740     int32_t                 rcPassUp;                                                                       /* 0x00 */
    1741     /** Execution flag, IEM_F_XXX. */
    1742     uint32_t                fExec;                                                                          /* 0x04 */
    1743 
    1744     /** @name Decoder state.
    1745      * @{ */
    1746 #ifdef IEM_WITH_CODE_TLB
    1747     /** The offset of the next instruction byte. */
    1748     uint32_t                offInstrNextByte;                                                               /* 0x08 */
    1749     /** The number of bytes available at pbInstrBuf for the current instruction.
    1750      * This takes the max opcode length into account so that doesn't need to be
    1751      * checked separately. */
    1752     uint32_t                cbInstrBuf;                                                                     /* 0x0c */
    1753     /** Pointer to the page containing RIP, user specified buffer or abOpcode.
    1754      * This can be NULL if the page isn't mappable for some reason, in which
    1755      * case we'll do fallback stuff.
    1756      *
    1757      * If we're executing an instruction from a user specified buffer,
    1758      * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
    1759      * aligned pointer but pointer to the user data.
    1760      *
    1761      * For instructions crossing pages, this will start on the first page and be
    1762      * advanced to the next page by the time we've decoded the instruction.  This
    1763      * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
    1764      */
    1765     uint8_t const          *pbInstrBuf;                                                                     /* 0x10 */
    1766 # if ARCH_BITS == 32
    1767     uint32_t                uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
    1768 # endif
    1769     /** The program counter corresponding to pbInstrBuf.
    1770      * This is set to a non-canonical address when we need to invalidate it. */
    1771     uint64_t                uInstrBufPc;                                                                    /* 0x18 */
    1772     /** The guest physical address corresponding to pbInstrBuf. */
    1773     RTGCPHYS                GCPhysInstrBuf;                                                                 /* 0x20 */
    1774     /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
    1775      * This takes the CS segment limit into account.
    1776      * @note Set to zero when the code TLB is flushed to trigger TLB reload. */
    1777     uint16_t                cbInstrBufTotal;                                                                /* 0x28 */
    1778     /** Offset into pbInstrBuf of the first byte of the current instruction.
    1779      * Can be negative to efficiently handle cross page instructions. */
    1780     int16_t                 offCurInstrStart;                                                               /* 0x2a */
    1781 
    1782 # ifndef IEM_WITH_OPAQUE_DECODER_STATE
    1783     /** The prefix mask (IEM_OP_PRF_XXX). */
    1784     uint32_t                fPrefixes;                                                                      /* 0x2c */
    1785     /** The extra REX ModR/M register field bit (REX.R << 3). */
    1786     uint8_t                 uRexReg;                                                                        /* 0x30 */
    1787     /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
    1788      * (REX.B << 3). */
    1789     uint8_t                 uRexB;                                                                          /* 0x31 */
    1790     /** The extra REX SIB index field bit (REX.X << 3). */
    1791     uint8_t                 uRexIndex;                                                                      /* 0x32 */
    1792 
    1793     /** The effective segment register (X86_SREG_XXX). */
    1794     uint8_t                 iEffSeg;                                                                        /* 0x33 */
    1795 
    1796     /** The offset of the ModR/M byte relative to the start of the instruction. */
    1797     uint8_t                 offModRm;                                                                       /* 0x34 */
    1798 
    1799 #  ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    1800     /** The current offset into abOpcode. */
    1801     uint8_t                 offOpcode;                                                                      /* 0x35 */
    1802 #  else
    1803     uint8_t                 bUnused;                                                                        /* 0x35 */
    1804 #  endif
    1805 # else  /* IEM_WITH_OPAQUE_DECODER_STATE */
    1806     uint8_t                 abOpaqueDecoderPart1[0x36 - 0x2c];
    1807 # endif /* IEM_WITH_OPAQUE_DECODER_STATE */
    1808 
    1809 #else  /* !IEM_WITH_CODE_TLB */
    1810 #  ifndef IEM_WITH_OPAQUE_DECODER_STATE
    1811     /** The size of what has currently been fetched into abOpcode. */
    1812     uint8_t                 cbOpcode;                                                                       /*       0x08 */
    1813     /** The current offset into abOpcode. */
    1814     uint8_t                 offOpcode;                                                                      /*       0x09 */
    1815     /** The offset of the ModR/M byte relative to the start of the instruction. */
    1816     uint8_t                 offModRm;                                                                       /*       0x0a */
    1817 
    1818     /** The effective segment register (X86_SREG_XXX). */
    1819     uint8_t                 iEffSeg;                                                                        /*       0x0b */
    1820 
    1821     /** The prefix mask (IEM_OP_PRF_XXX). */
    1822     uint32_t                fPrefixes;                                                                      /*       0x0c */
    1823     /** The extra REX ModR/M register field bit (REX.R << 3). */
    1824     uint8_t                 uRexReg;                                                                        /*       0x10 */
    1825     /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
    1826      * (REX.B << 3). */
    1827     uint8_t                 uRexB;                                                                          /*       0x11 */
    1828     /** The extra REX SIB index field bit (REX.X << 3). */
    1829     uint8_t                 uRexIndex;                                                                      /*       0x12 */
    1830 
    1831 # else  /* IEM_WITH_OPAQUE_DECODER_STATE */
    1832     uint8_t                 abOpaqueDecoderPart1[0x13 - 0x08];
    1833 # endif /* IEM_WITH_OPAQUE_DECODER_STATE */
    1834 #endif /* !IEM_WITH_CODE_TLB */
    1835 
    1836 #ifndef IEM_WITH_OPAQUE_DECODER_STATE
    1837     /** The effective operand mode. */
    1838     IEMMODE                 enmEffOpSize;                                                                   /* 0x36, 0x13 */
    1839     /** The default addressing mode. */
    1840     IEMMODE                 enmDefAddrMode;                                                                 /* 0x37, 0x14 */
    1841     /** The effective addressing mode. */
    1842     IEMMODE                 enmEffAddrMode;                                                                 /* 0x38, 0x15 */
    1843     /** The default operand mode. */
    1844     IEMMODE                 enmDefOpSize;                                                                   /* 0x39, 0x16 */
    1845 
    1846     /** Prefix index (VEX.pp) for two byte and three byte tables. */
    1847     uint8_t                 idxPrefix;                                                                      /* 0x3a, 0x17 */
    1848     /** 3rd VEX/EVEX/XOP register.
    1849      * Please use IEM_GET_EFFECTIVE_VVVV to access.  */
    1850     uint8_t                 uVex3rdReg;                                                                     /* 0x3b, 0x18 */
    1851     /** The VEX/EVEX/XOP length field. */
    1852     uint8_t                 uVexLength;                                                                     /* 0x3c, 0x19 */
    1853     /** Additional EVEX stuff. */
    1854     uint8_t                 fEvexStuff;                                                                     /* 0x3d, 0x1a */
    1855 
    1856 # ifndef IEM_WITH_CODE_TLB
    1857     /** Explicit alignment padding. */
    1858     uint8_t                 abAlignment2a[1];                                                               /*       0x1b */
    1859 # endif
    1860     /** The FPU opcode (FOP). */
    1861     uint16_t                uFpuOpcode;                                                                     /* 0x3e, 0x1c */
    1862 # ifndef IEM_WITH_CODE_TLB
    1863     /** Explicit alignment padding. */
    1864     uint8_t                 abAlignment2b[2];                                                               /*       0x1e */
    1865 # endif
    1866 
    1867     /** The opcode bytes. */
    1868     uint8_t                 abOpcode[15];                                                                   /* 0x40, 0x20 */
    1869     /** Explicit alignment padding. */
    1870 # ifdef IEM_WITH_CODE_TLB
    1871     //uint8_t                 abAlignment2c[0x4f - 0x4f];                                                     /* 0x4f */
    1872 # else
    1873     uint8_t                 abAlignment2c[0x4f - 0x2f];                                                     /*       0x2f */
    1874 # endif
    1875 
    1876 #else  /* IEM_WITH_OPAQUE_DECODER_STATE */
    1877 # ifdef IEM_WITH_CODE_TLB
    1878     uint8_t                 abOpaqueDecoderPart2[0x4f - 0x36];
    1879 # else
    1880     uint8_t                 abOpaqueDecoderPart2[0x4f - 0x13];
    1881 # endif
    1882 #endif /* IEM_WITH_OPAQUE_DECODER_STATE */
    1883     /** @} */
    1884 
    1885 
    1886     /** The number of active guest memory mappings. */
    1887     uint8_t                 cActiveMappings;                                                                /* 0x4f, 0x4f */
    1888 
    1889     /** Records for tracking guest memory mappings. */
    1890     struct
    1891     {
    1892         /** The address of the mapped bytes. */
    1893         R3R0PTRTYPE(void *) pv;
    1894         /** The access flags (IEM_ACCESS_XXX).
    1895          * IEM_ACCESS_INVALID if the entry is unused. */
    1896         uint32_t            fAccess;
    1897 #if HC_ARCH_BITS == 64
    1898         uint32_t            u32Alignment4; /**< Alignment padding. */
    1899 #endif
    1900     } aMemMappings[3];                                                                                      /* 0x50 LB 0x30 */
    1901 
    1902     /** Locking records for the mapped memory. */
    1903     union
    1904     {
    1905         PGMPAGEMAPLOCK      Lock;
    1906         uint64_t            au64Padding[2];
    1907     } aMemMappingLocks[3];                                                                                  /* 0x80 LB 0x30 */
    1908 
    1909     /** Bounce buffer info.
    1910      * This runs in parallel to aMemMappings. */
    1911     struct
    1912     {
    1913         /** The physical address of the first byte. */
    1914         RTGCPHYS            GCPhysFirst;
    1915         /** The physical address of the second page. */
    1916         RTGCPHYS            GCPhysSecond;
    1917         /** The number of bytes in the first page. */
    1918         uint16_t            cbFirst;
    1919         /** The number of bytes in the second page. */
    1920         uint16_t            cbSecond;
    1921         /** Whether it's unassigned memory. */
    1922         bool                fUnassigned;
    1923         /** Explicit alignment padding. */
    1924         bool                afAlignment5[3];
    1925     } aMemBbMappings[3];                                                                                    /* 0xb0 LB 0x48 */
    1926 
    1927     /** The flags of the current exception / interrupt. */
    1928     uint32_t                fCurXcpt;                                                                       /* 0xf8 */
    1929     /** The current exception / interrupt. */
    1930     uint8_t                 uCurXcpt;                                                                       /* 0xfc */
    1931     /** Exception / interrupt recursion depth. */
    1932     int8_t                  cXcptRecursions;                                                                /* 0xfb */
    1933 
    1934     /** The next unused mapping index.
    1935      * @todo try find room for this up with cActiveMappings. */
    1936     uint8_t                 iNextMapping;                                                                   /* 0xfd */
    1937     uint8_t                 abAlignment7[1];
    1938 
    1939     /** Bounce buffer storage.
    1940      * This runs in parallel to aMemMappings and aMemBbMappings. */
    1941     struct
    1942     {
    1943         uint8_t             ab[512];
    1944     } aBounceBuffers[3];                                                                                    /* 0x100 LB 0x600 */
    1945 
    1946 
    1947     /** Pointer set jump buffer - ring-3 context. */
    1948     R3PTRTYPE(jmp_buf *)    pJmpBufR3;
    1949     /** Pointer set jump buffer - ring-0 context. */
    1950     R0PTRTYPE(jmp_buf *)    pJmpBufR0;
    1951 
    1952     /** @todo Should move this near @a fCurXcpt later. */
    1953     /** The CR2 for the current exception / interrupt. */
    1954     uint64_t                uCurXcptCr2;
    1955     /** The error code for the current exception / interrupt. */
    1956     uint32_t                uCurXcptErr;
    1957 
    1958     /** @name Statistics
    1959      * @{  */
    1960     /** The number of instructions we've executed. */
    1961     uint32_t                cInstructions;
    1962     /** The number of potential exits. */
    1963     uint32_t                cPotentialExits;
    1964     /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
    1965     uint32_t                cRetInstrNotImplemented;
    1966     /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
    1967     uint32_t                cRetAspectNotImplemented;
    1968     /** Counts informational statuses returned (other than VINF_SUCCESS). */
    1969     uint32_t                cRetInfStatuses;
    1970     /** Counts other error statuses returned. */
    1971     uint32_t                cRetErrStatuses;
    1972     /** Number of times rcPassUp has been used. */
    1973     uint32_t                cRetPassUpStatus;
    1974     /** Number of times RZ left with instruction commit pending for ring-3. */
    1975     uint32_t                cPendingCommit;
    1976     /** Number of misaligned (host sense) atomic instruction accesses. */
    1977     uint32_t                cMisalignedAtomics;
    1978     /** Number of long jumps. */
    1979     uint32_t                cLongJumps;
    1980     /** @} */
    1981 
    1982     /** @name Target CPU information.
    1983      * @{ */
    1984 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
    1985     /** The target CPU. */
    1986     uint8_t                 uTargetCpu;
    1987 #else
    1988     uint8_t                 bTargetCpuPadding;
    1989 #endif
    1990     /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
    1991      * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
    1992      * native host support and the 2nd for when there is.
    1993      *
    1994      * The two values are typically indexed by a g_CpumHostFeatures bit.
    1995      *
    1996      * This is for instance used for the BSF & BSR instructions where AMD and
    1997      * Intel CPUs produce different EFLAGS. */
    1998     uint8_t                 aidxTargetCpuEflFlavour[2];
    1999 
    2000     /** The CPU vendor. */
    2001     CPUMCPUVENDOR           enmCpuVendor;
    2002     /** @} */
    2003 
    2004     /** Counts RDMSR \#GP(0) LogRel(). */
    2005     uint8_t                 cLogRelRdMsr;
    2006     /** Counts WRMSR \#GP(0) LogRel(). */
    2007     uint8_t                 cLogRelWrMsr;
    2008     /** Alignment padding. */
    2009     uint8_t                 abAlignment9[50];
    2010 
    2011 
    2012     /** @name Recompiled Exection
    2013      * @{ */
    2014     /** Pointer to the current translation block.
    2015      * This can either be one being executed or one being compiled. */
    2016     R3PTRTYPE(PIEMTB)       pCurTbR3;
    2017 #ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
    2018     /** Frame pointer for the last native TB to execute. */
    2019     R3PTRTYPE(void *)       pvTbFramePointerR3;
    2020 #else
    2021     R3PTRTYPE(void *)       pvUnusedR3;
    2022 #endif
    2023 #ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
    2024     /** The saved host floating point control register (MXCSR on x86, FPCR on arm64)
    2025      * needing restore when the TB finished, IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED indicates the TB
    2026      * didn't modify it so we don't need to restore it. */
    2027 # ifdef RT_ARCH_AMD64
    2028     uint32_t                uRegFpCtrl;
    2029     /** Temporary copy of MXCSR for stmxcsr/ldmxcsr (so we don't have to fiddle with stack pointers). */
    2030     uint32_t                uRegMxcsrTmp;
    2031 # elif defined(RT_ARCH_ARM64)
    2032     uint64_t                uRegFpCtrl;
    2033 # else
    2034 #  error "Port me"
    2035 # endif
    2036 #else
    2037     uint64_t                u64Unused;
    2038 #endif
    2039     /** Pointer to the ring-3 TB cache for this EMT. */
    2040     R3PTRTYPE(PIEMTBCACHE)  pTbCacheR3;
    2041     /** Pointer to the ring-3 TB lookup entry.
    2042      * This either points to pTbLookupEntryDummyR3 or an actually lookuptable
    2043      * entry, thus it can always safely be used w/o NULL checking. */
    2044     R3PTRTYPE(PIEMTB *)     ppTbLookupEntryR3;
    2045 #if 0 /* unused */
    2046     /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
    2047      * The TBs are based on physical addresses, so this is needed to correleated
    2048      * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
    2049     uint64_t                uCurTbStartPc;
    2050 #endif
    2051 
    2052     /** Number of threaded TBs executed. */
    2053     uint64_t                cTbExecThreaded;
    2054     /** Number of native TBs executed. */
    2055     uint64_t                cTbExecNative;
    2056 
    2057     /** The number of IRQ/FF checks till the next timer poll call. */
    2058     uint32_t                cTbsTillNextTimerPoll;
    2059     /** The virtual sync time at the last timer poll call in milliseconds. */
    2060     uint32_t                msRecompilerPollNow;
    2061     /** The virtual sync time at the last timer poll call in nanoseconds. */
    2062     uint64_t                nsRecompilerPollNow;
    2063     /** The previous cTbsTillNextTimerPoll value. */
    2064     uint32_t                cTbsTillNextTimerPollPrev;
    2065 
    2066     /** The current instruction number in a native TB.
    2067      * This is set by code that may trigger an unexpected TB exit (throw/longjmp)
    2068      * and will be picked up by the TB execution loop. Only used when
    2069      * IEMNATIVE_WITH_INSTRUCTION_COUNTING is defined. */
    2070     uint8_t                 idxTbCurInstr;
    2071     /** @} */
    2072 
    2073     /** @name Recompilation
    2074      * @{ */
    2075     /** Whether we need to check the opcode bytes for the current instruction.
    2076      * This is set by a previous instruction if it modified memory or similar.  */
    2077     bool                    fTbCheckOpcodes;
    2078     /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
    2079     uint8_t                 fTbBranched;
    2080     /** Set when GCPhysInstrBuf is updated because of a page crossing. */
    2081     bool                    fTbCrossedPage;
    2082     /** Whether to end the current TB. */
    2083     bool                    fEndTb;
    2084     /** Indicates that the current instruction is an STI.  This is set by the
    2085      * iemCImpl_sti code and subsequently cleared by the recompiler. */
    2086     bool                    fTbCurInstrIsSti;
    2087     /** Spaced reserved for recompiler data / alignment. */
    2088     bool                    afRecompilerStuff1[1];
    2089     /** Number of instructions before we need emit an IRQ check call again.
    2090      * This helps making sure we don't execute too long w/o checking for
    2091      * interrupts and immediately following instructions that may enable
    2092      * interrupts (e.g. POPF, IRET, STI).  With STI an additional hack is
    2093      * required to make sure we check following the next instruction as well, see
    2094      * fTbCurInstrIsSti. */
    2095     uint8_t                 cInstrTillIrqCheck;
    2096     /** The index of the last CheckIrq call during threaded recompilation. */
    2097     uint16_t                idxLastCheckIrqCallNo;
    2098     /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
    2099     uint16_t                cbOpcodesAllocated;
    2100     /** The IEMTB::cUsed value when to attempt native recompilation of a TB. */
    2101     uint32_t                uTbNativeRecompileAtUsedCount;
    2102     /** The IEM_CIMPL_F_XXX mask for the current instruction. */
    2103     uint32_t                fTbCurInstr;
    2104     /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
    2105     uint32_t                fTbPrevInstr;
    2106     /** Strict: Tracking skipped EFLAGS calculations.  Any bits set here are
    2107      *  currently not up to date in EFLAGS. */
    2108     uint32_t                fSkippingEFlags;
    2109 #if 0  /* unused */
    2110     /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set.   */
    2111     RTGCPHYS                GCPhysInstrBufPrev;
    2112 #endif
    2113 
    2114     /** Fixed TB used for threaded recompilation.
    2115      * This is allocated once with maxed-out sizes and re-used afterwards. */
    2116     R3PTRTYPE(PIEMTB)       pThrdCompileTbR3;
    2117     /** Pointer to the ring-3 TB allocator for this EMT. */
    2118     R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
    2119     /** Pointer to the ring-3 executable memory allocator for this EMT. */
    2120     R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
    2121     /** Pointer to the native recompiler state for ring-3. */
    2122     R3PTRTYPE(struct IEMRECOMPILERSTATE *)  pNativeRecompilerStateR3;
    2123     /** Dummy entry for ppTbLookupEntryR3. */
    2124     R3PTRTYPE(PIEMTB)       pTbLookupEntryDummyR3;
    2125 #ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
    2126     /** The debug code advances this register as if it was CPUMCTX::rip and we
    2127      * didn't do delayed PC updating.  When CPUMCTX::rip is finally updated,
    2128      * the result is compared with this value. */
    2129     uint64_t                uPcUpdatingDebug;
    2130 #elif defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
    2131     /** The SSM handle used for saving threaded TBs for recompiler profiling. */
    2132     R3PTRTYPE(PSSMHANDLE)   pSsmThreadedTbsForProfiling;
    2133 #else
    2134     uint64_t                u64Placeholder;
    2135 #endif
    2136     /**
    2137      *  Whether we should use the host instruction invalidation APIs of the
    2138      *  host OS or our own version of it (macOS).  */
    2139     uint8_t                 fHostICacheInvalidation;
    2140 #define IEMNATIVE_ICACHE_F_USE_HOST_API     UINT8_C(0x01) /**< Use the host API (macOS) instead of our code. */
    2141 #define IEMNATIVE_ICACHE_F_END_WITH_ISH     UINT8_C(0x02) /**< Whether to end with a ISH barrier (arm). */
    2142     bool                    afRecompilerStuff2[7];
    2143     /** @} */
    2144 
    2145     /** Dummy TLB entry used for accesses to pages with databreakpoints. */
    2146     IEMTLBENTRY             DataBreakpointTlbe;
    2147 
    2148     /** Threaded TB statistics: Times TB execution was broken off before reaching the end. */
    2149     STAMCOUNTER             StatTbThreadedExecBreaks;
    2150     /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
    2151     STAMCOUNTER             StatCheckIrqBreaks;
    2152     /** Statistics: Times BltIn_CheckTimers breaks direct linking TBs. */
    2153     STAMCOUNTER             StatCheckTimersBreaks;
    2154     /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
    2155     STAMCOUNTER             StatCheckModeBreaks;
    2156     /** Threaded TB statistics: Times execution break on call with lookup entries. */
    2157     STAMCOUNTER             StatTbThreadedExecBreaksWithLookup;
    2158     /** Threaded TB statistics: Times execution break on call without lookup entries. */
    2159     STAMCOUNTER             StatTbThreadedExecBreaksWithoutLookup;
    2160     /** Statistics: Times a post jump target check missed and had to find new TB. */
    2161     STAMCOUNTER             StatCheckBranchMisses;
    2162     /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
    2163     STAMCOUNTER             StatCheckNeedCsLimChecking;
    2164     /** Statistics: Times a loop was detected within a TB. */
    2165     STAMCOUNTER             StatTbLoopInTbDetected;
    2166     /** Statistics: Times a loop back to the start of the TB was detected. */
    2167     STAMCOUNTER             StatTbLoopFullTbDetected;
    2168     /** Statistics: Times a loop back to the start of the TB was detected, var 2. */
    2169     STAMCOUNTER             StatTbLoopFullTbDetected2;
    2170     /** Exec memory allocator statistics: Number of times allocaintg executable memory failed. */
    2171     STAMCOUNTER             StatNativeExecMemInstrBufAllocFailed;
    2172     /** Native TB statistics: Number of fully recompiled TBs. */
    2173     STAMCOUNTER             StatNativeFullyRecompiledTbs;
    2174     /** TB statistics: Number of instructions per TB. */
    2175     STAMPROFILE             StatTbInstr;
    2176     /** TB statistics: Number of TB lookup table entries per TB. */
    2177     STAMPROFILE             StatTbLookupEntries;
    2178     /** Threaded TB statistics: Number of calls per TB. */
    2179     STAMPROFILE             StatTbThreadedCalls;
    2180     /** Native TB statistics: Native code size per TB. */
    2181     STAMPROFILE             StatTbNativeCode;
    2182     /** Native TB statistics: Profiling native recompilation. */
    2183     STAMPROFILE             StatNativeRecompilation;
    2184     /** Native TB statistics: Number of calls per TB that were recompiled properly. */
    2185     STAMPROFILE             StatNativeCallsRecompiled;
    2186     /** Native TB statistics: Number of threaded calls per TB that weren't recompiled. */
    2187     STAMPROFILE             StatNativeCallsThreaded;
    2188     /** Native recompiled execution: TLB hits for data fetches. */
    2189     STAMCOUNTER             StatNativeTlbHitsForFetch;
    2190     /** Native recompiled execution: TLB hits for data stores. */
    2191     STAMCOUNTER             StatNativeTlbHitsForStore;
    2192     /** Native recompiled execution: TLB hits for stack accesses. */
    2193     STAMCOUNTER             StatNativeTlbHitsForStack;
    2194     /** Native recompiled execution: TLB hits for mapped accesses. */
    2195     STAMCOUNTER             StatNativeTlbHitsForMapped;
    2196     /** Native recompiled execution: Code TLB misses for new page. */
    2197     STAMCOUNTER             StatNativeCodeTlbMissesNewPage;
    2198     /** Native recompiled execution: Code TLB hits for new page. */
    2199     STAMCOUNTER             StatNativeCodeTlbHitsForNewPage;
    2200     /** Native recompiled execution: Code TLB misses for new page with offset. */
    2201     STAMCOUNTER             StatNativeCodeTlbMissesNewPageWithOffset;
    2202     /** Native recompiled execution: Code TLB hits for new page with offset. */
    2203     STAMCOUNTER             StatNativeCodeTlbHitsForNewPageWithOffset;
    2204 
    2205     /** Native recompiler: Number of calls to iemNativeRegAllocFindFree. */
    2206     STAMCOUNTER             StatNativeRegFindFree;
    2207     /** Native recompiler: Number of times iemNativeRegAllocFindFree needed
    2208      *  to free a variable. */
    2209     STAMCOUNTER             StatNativeRegFindFreeVar;
    2210     /** Native recompiler: Number of times iemNativeRegAllocFindFree did
    2211      *  not need to free any variables. */
    2212     STAMCOUNTER             StatNativeRegFindFreeNoVar;
    2213     /** Native recompiler: Liveness info freed shadowed guest registers in
    2214      * iemNativeRegAllocFindFree. */
    2215     STAMCOUNTER             StatNativeRegFindFreeLivenessUnshadowed;
    2216     /** Native recompiler: Liveness info helped with the allocation in
    2217      *  iemNativeRegAllocFindFree. */
    2218     STAMCOUNTER             StatNativeRegFindFreeLivenessHelped;
    2219 
    2220     /** Native recompiler: Number of times status flags calc has been skipped. */
    2221     STAMCOUNTER             StatNativeEflSkippedArithmetic;
    2222     /** Native recompiler: Number of times status flags calc has been postponed. */
    2223     STAMCOUNTER             StatNativeEflPostponedArithmetic;
    2224     /** Native recompiler: Total number instructions in this category. */
    2225     STAMCOUNTER             StatNativeEflTotalArithmetic;
    2226 
    2227     /** Native recompiler: Number of times status flags calc has been skipped. */
    2228     STAMCOUNTER             StatNativeEflSkippedLogical;
    2229     /** Native recompiler: Number of times status flags calc has been postponed. */
    2230     STAMCOUNTER             StatNativeEflPostponedLogical;
    2231     /** Native recompiler: Total number instructions in this category. */
    2232     STAMCOUNTER             StatNativeEflTotalLogical;
    2233 
    2234     /** Native recompiler: Number of times status flags calc has been skipped. */
    2235     STAMCOUNTER             StatNativeEflSkippedShift;
    2236     /** Native recompiler: Number of times status flags calc has been postponed. */
    2237     STAMCOUNTER             StatNativeEflPostponedShift;
    2238     /** Native recompiler: Total number instructions in this category. */
    2239     STAMCOUNTER             StatNativeEflTotalShift;
    2240 
    2241     /** Native recompiler: Number of emits per postponement. */
    2242     STAMPROFILE             StatNativeEflPostponedEmits;
    2243 
    2244     /** Native recompiler: Number of opportunities to skip EFLAGS.CF updating. */
    2245     STAMCOUNTER             StatNativeLivenessEflCfSkippable;
    2246     /** Native recompiler: Number of opportunities to skip EFLAGS.PF updating. */
    2247     STAMCOUNTER             StatNativeLivenessEflPfSkippable;
    2248     /** Native recompiler: Number of opportunities to skip EFLAGS.AF updating. */
    2249     STAMCOUNTER             StatNativeLivenessEflAfSkippable;
    2250     /** Native recompiler: Number of opportunities to skip EFLAGS.ZF updating. */
    2251     STAMCOUNTER             StatNativeLivenessEflZfSkippable;
    2252     /** Native recompiler: Number of opportunities to skip EFLAGS.SF updating. */
    2253     STAMCOUNTER             StatNativeLivenessEflSfSkippable;
    2254     /** Native recompiler: Number of opportunities to skip EFLAGS.OF updating. */
    2255     STAMCOUNTER             StatNativeLivenessEflOfSkippable;
    2256     /** Native recompiler: Number of required EFLAGS.CF updates. */
    2257     STAMCOUNTER             StatNativeLivenessEflCfRequired;
    2258     /** Native recompiler: Number of required EFLAGS.PF updates. */
    2259     STAMCOUNTER             StatNativeLivenessEflPfRequired;
    2260     /** Native recompiler: Number of required EFLAGS.AF updates. */
    2261     STAMCOUNTER             StatNativeLivenessEflAfRequired;
    2262     /** Native recompiler: Number of required EFLAGS.ZF updates. */
    2263     STAMCOUNTER             StatNativeLivenessEflZfRequired;
    2264     /** Native recompiler: Number of required EFLAGS.SF updates. */
    2265     STAMCOUNTER             StatNativeLivenessEflSfRequired;
    2266     /** Native recompiler: Number of required EFLAGS.OF updates. */
    2267     STAMCOUNTER             StatNativeLivenessEflOfRequired;
    2268     /** Native recompiler: Number of potentially delayable EFLAGS.CF updates. */
    2269     STAMCOUNTER             StatNativeLivenessEflCfDelayable;
    2270     /** Native recompiler: Number of potentially delayable EFLAGS.PF updates. */
    2271     STAMCOUNTER             StatNativeLivenessEflPfDelayable;
    2272     /** Native recompiler: Number of potentially delayable EFLAGS.AF updates. */
    2273     STAMCOUNTER             StatNativeLivenessEflAfDelayable;
    2274     /** Native recompiler: Number of potentially delayable EFLAGS.ZF updates. */
    2275     STAMCOUNTER             StatNativeLivenessEflZfDelayable;
    2276     /** Native recompiler: Number of potentially delayable EFLAGS.SF updates. */
    2277     STAMCOUNTER             StatNativeLivenessEflSfDelayable;
    2278     /** Native recompiler: Number of potentially delayable EFLAGS.OF updates. */
    2279     STAMCOUNTER             StatNativeLivenessEflOfDelayable;
    2280 
    2281     /** Native recompiler: Number of potential PC updates in total. */
    2282     STAMCOUNTER             StatNativePcUpdateTotal;
    2283     /** Native recompiler: Number of PC updates which could be delayed. */
    2284     STAMCOUNTER             StatNativePcUpdateDelayed;
    2285 
    2286     /** Native recompiler: Number of time we had complicated dirty shadow
    2287      *  register situations with the other branch in IEM_MC_ENDIF. */
    2288     STAMCOUNTER             StatNativeEndIfOtherBranchDirty;
    2289 
    2290     /** Native recompiler: Number of calls to iemNativeSimdRegAllocFindFree. */
    2291     STAMCOUNTER             StatNativeSimdRegFindFree;
    2292     /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree needed
    2293      *  to free a variable. */
    2294     STAMCOUNTER             StatNativeSimdRegFindFreeVar;
    2295     /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree did
    2296      *  not need to free any variables. */
    2297     STAMCOUNTER             StatNativeSimdRegFindFreeNoVar;
    2298     /** Native recompiler: Liveness info freed shadowed guest registers in
    2299      * iemNativeSimdRegAllocFindFree. */
    2300     STAMCOUNTER             StatNativeSimdRegFindFreeLivenessUnshadowed;
    2301     /** Native recompiler: Liveness info helped with the allocation in
    2302      *  iemNativeSimdRegAllocFindFree. */
    2303     STAMCOUNTER             StatNativeSimdRegFindFreeLivenessHelped;
    2304 
    2305     /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks. */
    2306     STAMCOUNTER             StatNativeMaybeDeviceNotAvailXcptCheckPotential;
    2307     /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks. */
    2308     STAMCOUNTER             StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential;
    2309     /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks. */
    2310     STAMCOUNTER             StatNativeMaybeSseXcptCheckPotential;
    2311     /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks. */
    2312     STAMCOUNTER             StatNativeMaybeAvxXcptCheckPotential;
    2313 
    2314     /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted. */
    2315     STAMCOUNTER             StatNativeMaybeDeviceNotAvailXcptCheckOmitted;
    2316     /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted. */
    2317     STAMCOUNTER             StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted;
    2318     /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted. */
    2319     STAMCOUNTER             StatNativeMaybeSseXcptCheckOmitted;
    2320     /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted. */
    2321     STAMCOUNTER             StatNativeMaybeAvxXcptCheckOmitted;
    2322 
    2323     /** Native recompiler: The TB finished executing completely without jumping to a an exit label.
    2324      * Not availabe in release builds. */
    2325     STAMCOUNTER             StatNativeTbFinished;
    2326     /** Native recompiler: The TB finished executing jumping to the ReturnBreak label. */
    2327     STAMCOUNTER             StatNativeTbExitReturnBreak;
    2328     /** Native recompiler: The TB finished executing jumping to the ReturnBreakFF label. */
    2329     STAMCOUNTER             StatNativeTbExitReturnBreakFF;
    2330     /** Native recompiler: The TB finished executing jumping to the ReturnWithFlags label. */
    2331     STAMCOUNTER             StatNativeTbExitReturnWithFlags;
    2332     /** Native recompiler: The TB finished executing with other non-zero status. */
    2333     STAMCOUNTER             StatNativeTbExitReturnOtherStatus;
    2334     /** Native recompiler: The TB finished executing via throw / long jump. */
    2335     STAMCOUNTER             StatNativeTbExitLongJump;
    2336     /** Native recompiler: The TB finished executing jumping to the ReturnBreak
    2337      *  label, but directly jumped to the next TB, scenario \#1 w/o IRQ checks. */
    2338     STAMCOUNTER             StatNativeTbExitDirectLinking1NoIrq;
    2339     /** Native recompiler: The TB finished executing jumping to the ReturnBreak
    2340      *  label, but directly jumped to the next TB, scenario \#1 with IRQ checks. */
    2341     STAMCOUNTER             StatNativeTbExitDirectLinking1Irq;
    2342     /** Native recompiler: The TB finished executing jumping to the ReturnBreak
    2343      *  label, but directly jumped to the next TB, scenario \#1 w/o IRQ checks. */
    2344     STAMCOUNTER             StatNativeTbExitDirectLinking2NoIrq;
    2345     /** Native recompiler: The TB finished executing jumping to the ReturnBreak
    2346      *  label, but directly jumped to the next TB, scenario \#2 with IRQ checks. */
    2347     STAMCOUNTER             StatNativeTbExitDirectLinking2Irq;
    2348 
    2349     /** Native recompiler: The TB finished executing jumping to the RaiseDe label. */
    2350     STAMCOUNTER             StatNativeTbExitRaiseDe;
    2351     /** Native recompiler: The TB finished executing jumping to the RaiseUd label. */
    2352     STAMCOUNTER             StatNativeTbExitRaiseUd;
    2353     /** Native recompiler: The TB finished executing jumping to the RaiseSseRelated label. */
    2354     STAMCOUNTER             StatNativeTbExitRaiseSseRelated;
    2355     /** Native recompiler: The TB finished executing jumping to the RaiseAvxRelated label. */
    2356     STAMCOUNTER             StatNativeTbExitRaiseAvxRelated;
    2357     /** Native recompiler: The TB finished executing jumping to the RaiseSseAvxFpRelated label. */
    2358     STAMCOUNTER             StatNativeTbExitRaiseSseAvxFpRelated;
    2359     /** Native recompiler: The TB finished executing jumping to the RaiseNm label. */
    2360     STAMCOUNTER             StatNativeTbExitRaiseNm;
    2361     /** Native recompiler: The TB finished executing jumping to the RaiseGp0 label. */
    2362     STAMCOUNTER             StatNativeTbExitRaiseGp0;
    2363     /** Native recompiler: The TB finished executing jumping to the RaiseMf label. */
    2364     STAMCOUNTER             StatNativeTbExitRaiseMf;
    2365     /** Native recompiler: The TB finished executing jumping to the RaiseXf label. */
    2366     STAMCOUNTER             StatNativeTbExitRaiseXf;
    2367     /** Native recompiler: The TB finished executing jumping to the ObsoleteTb label. */
    2368     STAMCOUNTER             StatNativeTbExitObsoleteTb;
    2369 
    2370     /** Native recompiler: Number of full TB loops (jumps from end to start). */
    2371     STAMCOUNTER             StatNativeTbExitLoopFullTb;
    2372 
    2373     /** Native recompiler: Failure situations with direct linking scenario \#1.
    2374      * Counter with StatNativeTbExitReturnBreak. Not in release builds.
    2375      * @{  */
    2376     STAMCOUNTER             StatNativeTbExitDirectLinking1NoTb;
    2377     STAMCOUNTER             StatNativeTbExitDirectLinking1MismatchGCPhysPc;
    2378     STAMCOUNTER             StatNativeTbExitDirectLinking1MismatchFlags;
    2379     STAMCOUNTER             StatNativeTbExitDirectLinking1PendingIrq;
    2380     /** @} */
    2381 
    2382     /** Native recompiler: Failure situations with direct linking scenario \#2.
    2383      * Counter with StatNativeTbExitReturnBreak. Not in release builds.
    2384      * @{  */
    2385     STAMCOUNTER             StatNativeTbExitDirectLinking2NoTb;
    2386     STAMCOUNTER             StatNativeTbExitDirectLinking2MismatchGCPhysPc;
    2387     STAMCOUNTER             StatNativeTbExitDirectLinking2MismatchFlags;
    2388     STAMCOUNTER             StatNativeTbExitDirectLinking2PendingIrq;
    2389     /** @} */
    2390 
    2391     /** iemMemMap and iemMemMapJmp statistics.
    2392      *  @{ */
    2393     STAMCOUNTER             StatMemMapJmp;
    2394     STAMCOUNTER             StatMemMapNoJmp;
    2395     STAMCOUNTER             StatMemBounceBufferCrossPage;
    2396     STAMCOUNTER             StatMemBounceBufferMapPhys;
    2397     /** @} */
    2398 
    2399     /** Timer polling statistics (debug only).
    2400      * @{  */
    2401     STAMPROFILE             StatTimerPoll;
    2402     STAMPROFILE             StatTimerPollPoll;
    2403     STAMPROFILE             StatTimerPollRun;
    2404     STAMCOUNTER             StatTimerPollUnchanged;
    2405     STAMCOUNTER             StatTimerPollTiny;
    2406     STAMCOUNTER             StatTimerPollDefaultCalc;
    2407     STAMCOUNTER             StatTimerPollMax;
    2408     STAMPROFILE             StatTimerPollFactorDivision;
    2409     STAMPROFILE             StatTimerPollFactorMultiplication;
    2410     /** @} */
    2411 
    2412 
    2413     STAMCOUNTER             aStatAdHoc[8];
    2414 
    2415 #ifdef IEM_WITH_TLB_TRACE
    2416     /*uint64_t                au64Padding[0];*/
    2417 #else
    2418     uint64_t                au64Padding[2];
    2419 #endif
    2420 
    2421 #ifdef IEM_WITH_TLB_TRACE
    2422     /** The end (next) trace entry. */
    2423     uint32_t                idxTlbTraceEntry;
    2424     /** Number of trace entries allocated expressed as a power of two. */
    2425     uint32_t                cTlbTraceEntriesShift;
    2426     /** The trace entries. */
    2427     PIEMTLBTRACEENTRY       paTlbTraceEntries;
    2428 #endif
    2429 
    2430     /** Data TLB.
    2431      * @remarks Must be 64-byte aligned. */
    2432     IEMTLB                  DataTlb;
    2433     /** Instruction TLB.
    2434      * @remarks Must be 64-byte aligned. */
    2435     IEMTLB                  CodeTlb;
    2436 
    2437     /** Exception statistics. */
    2438     STAMCOUNTER             aStatXcpts[32];
    2439     /** Interrupt statistics. */
    2440     uint32_t                aStatInts[256];
    2441 
    2442 #if defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING) && !defined(IEM_WITHOUT_INSTRUCTION_STATS)
    2443     /** Instruction statistics for ring-0/raw-mode. */
    2444     IEMINSTRSTATS           StatsRZ;
    2445     /** Instruction statistics for ring-3. */
    2446     IEMINSTRSTATS           StatsR3;
    2447 # ifdef VBOX_WITH_IEM_RECOMPILER
    2448     /** Statistics per threaded function call.
    2449      * Updated by both the threaded and native recompilers. */
    2450     uint32_t                acThreadedFuncStats[0x6000 /*24576*/];
    2451 # endif
    2452 #endif
    2453 } IEMCPU;
    2454 AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
    2455 AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
    2456 AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
    2457 AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
    2458 AssertCompileMemberAlignment(IEMCPU, pCurTbR3, 64);
    2459 AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
    2460 AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
    2461 
    2462 /** Pointer to the per-CPU IEM state. */
    2463 typedef IEMCPU *PIEMCPU;
    2464 /** Pointer to the const per-CPU IEM state. */
    2465 typedef IEMCPU const *PCIEMCPU;
    2466 
    2467 /** @def IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED
    2468  * Value indicating the TB didn't modified the floating point control register.
    2469  * @note Neither FPCR nor MXCSR accept this as a valid value (MXCSR is not fully populated,
    2470  *       FPCR has the upper 32-bit reserved), so this is safe. */
    2471 #if defined(IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS) || defined(DOXYGEN_RUNNING)
    2472 # ifdef RT_ARCH_AMD64
    2473 #  define IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED UINT32_MAX
    2474 # elif defined(RT_ARCH_ARM64)
    2475 #  define IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED UINT64_MAX
    2476 # else
    2477 #  error "Port me"
    2478 # endif
    2479 #endif
    2480 
    2481 /** @def IEM_GET_CTX
    2482  * Gets the guest CPU context for the calling EMT.
    2483  * @returns PCPUMCTX
    2484  * @param   a_pVCpu The cross context virtual CPU structure of the calling thread.
    2485  */
    2486 #define IEM_GET_CTX(a_pVCpu)                    (&(a_pVCpu)->cpum.GstCtx)
    2487 
    2488 /** @def IEM_CTX_ASSERT
    2489  * Asserts that the @a a_fExtrnMbz is present in the CPU context.
    2490  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    2491  * @param   a_fExtrnMbz     The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
    2492  */
    2493 #define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
    2494     AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
    2495               ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
    2496               (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
    2497 
    2498 /** @def IEM_CTX_IMPORT_RET
    2499  * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
    2500  *
    2501  * Will call the keep to import the bits as needed.
    2502  *
    2503  * Returns on import failure.
    2504  *
    2505  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    2506  * @param   a_fExtrnImport  The mask of CPUMCTX_EXTRN_XXX flags to import.
    2507  */
    2508 #define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
    2509     do { \
    2510         if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
    2511         { /* likely */ } \
    2512         else \
    2513         { \
    2514             int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    2515             AssertRCReturn(rcCtxImport, rcCtxImport); \
    2516         } \
    2517     } while (0)
    2518 
    2519 /** @def IEM_CTX_IMPORT_NORET
    2520  * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
    2521  *
    2522  * Will call the keep to import the bits as needed.
    2523  *
    2524  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    2525  * @param   a_fExtrnImport  The mask of CPUMCTX_EXTRN_XXX flags to import.
    2526  */
    2527 #define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
    2528     do { \
    2529         if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
    2530         { /* likely */ } \
    2531         else \
    2532         { \
    2533             int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    2534             AssertLogRelRC(rcCtxImport); \
    2535         } \
    2536     } while (0)
    2537 
    2538 /** @def IEM_CTX_IMPORT_JMP
    2539  * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
    2540  *
    2541  * Will call the keep to import the bits as needed.
    2542  *
    2543  * Jumps on import failure.
    2544  *
    2545  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    2546  * @param   a_fExtrnImport  The mask of CPUMCTX_EXTRN_XXX flags to import.
    2547  */
    2548 #define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
    2549     do { \
    2550         if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
    2551         { /* likely */ } \
    2552         else \
    2553         { \
    2554             int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
    2555             AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
    2556         } \
    2557     } while (0)
    2558 
    2559 
    2560 
    2561 /** @def IEM_GET_TARGET_CPU
    2562  * Gets the current IEMTARGETCPU value.
    2563  * @returns IEMTARGETCPU value.
    2564  * @param   a_pVCpu The cross context virtual CPU structure of the calling thread.
    2565  */
    2566 #if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
    2567 # define IEM_GET_TARGET_CPU(a_pVCpu)    (IEM_CFG_TARGET_CPU)
    2568 #else
    2569 # define IEM_GET_TARGET_CPU(a_pVCpu)    ((a_pVCpu)->iem.s.uTargetCpu)
    2570 #endif
    2571 
    2572 /** @def IEM_GET_INSTR_LEN
    2573  * Gets the instruction length.
    2574  * @note x86 specific */
    2575 #ifdef IEM_WITH_CODE_TLB
    2576 # define IEM_GET_INSTR_LEN(a_pVCpu)     ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
    2577 #else
    2578 # define IEM_GET_INSTR_LEN(a_pVCpu)     ((a_pVCpu)->iem.s.offOpcode)
    2579 #endif
    2580 
    2581 /** @def IEM_TRY_SETJMP
    2582  * Wrapper around setjmp / try, hiding all the ugly differences.
    2583  *
    2584  * @note Use with extreme care as this is a fragile macro.
    2585  * @param   a_pVCpu     The cross context virtual CPU structure of the calling EMT.
    2586  * @param   a_rcTarget  The variable that should receive the status code in case
    2587  *                      of a longjmp/throw.
    2588  */
    2589 /** @def IEM_TRY_SETJMP_AGAIN
    2590  * For when setjmp / try is used again in the same variable scope as a previous
    2591  * IEM_TRY_SETJMP invocation.
    2592  */
    2593 /** @def IEM_CATCH_LONGJMP_BEGIN
    2594  * Start wrapper for catch / setjmp-else.
    2595  *
    2596  * This will set up a scope.
    2597  *
    2598  * @note Use with extreme care as this is a fragile macro.
    2599  * @param   a_pVCpu     The cross context virtual CPU structure of the calling EMT.
    2600  * @param   a_rcTarget  The variable that should receive the status code in case
    2601  *                      of a longjmp/throw.
    2602  */
    2603 /** @def IEM_CATCH_LONGJMP_END
    2604  * End wrapper for catch / setjmp-else.
    2605  *
    2606  * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
    2607  * state.
    2608  *
    2609  * @note Use with extreme care as this is a fragile macro.
    2610  * @param   a_pVCpu     The cross context virtual CPU structure of the calling EMT.
    2611  */
    2612 #if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
    2613 # ifdef IEM_WITH_THROW_CATCH
    2614 #  define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
    2615         a_rcTarget = VINF_SUCCESS; \
    2616         try
    2617 #  define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
    2618         IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
    2619 #  define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
    2620         catch (int rcThrown) \
    2621         { \
    2622             a_rcTarget = rcThrown
    2623 #  define IEM_CATCH_LONGJMP_END(a_pVCpu) \
    2624         } \
    2625         ((void)0)
    2626 # else  /* !IEM_WITH_THROW_CATCH */
    2627 #  define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
    2628         jmp_buf  JmpBuf; \
    2629         jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
    2630         (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
    2631         if ((rcStrict = setjmp(JmpBuf)) == 0)
    2632 #  define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
    2633         pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
    2634         (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
    2635         if ((rcStrict = setjmp(JmpBuf)) == 0)
    2636 #  define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
    2637         else \
    2638         { \
    2639             ((void)0)
    2640 #  define IEM_CATCH_LONGJMP_END(a_pVCpu) \
    2641         } \
    2642         (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
    2643 # endif /* !IEM_WITH_THROW_CATCH */
    2644 #endif  /* IEM_WITH_SETJMP */
    2645 
    2646 
    2647 /**
    2648  * Shared per-VM IEM data.
    2649  */
    2650 typedef struct IEM
    2651 {
    2652     /** The VMX APIC-access page handler type. */
    2653     PGMPHYSHANDLERTYPE      hVmxApicAccessPage;
    2654 #ifndef VBOX_WITHOUT_CPUID_HOST_CALL
    2655     /** Set if the CPUID host call functionality is enabled.   */
    2656     bool                    fCpuIdHostCall;
    2657 #endif
    2658 } IEM;
    2659 
    2660 
    2661 
    2662 /** @name IEM_ACCESS_XXX - Access details.
    2663  * @{ */
    2664 #define IEM_ACCESS_INVALID              UINT32_C(0x000000ff)
    2665 #define IEM_ACCESS_TYPE_READ            UINT32_C(0x00000001)
    2666 #define IEM_ACCESS_TYPE_WRITE           UINT32_C(0x00000002)
    2667 #define IEM_ACCESS_TYPE_EXEC            UINT32_C(0x00000004)
    2668 #define IEM_ACCESS_TYPE_MASK            UINT32_C(0x00000007)
    2669 #define IEM_ACCESS_WHAT_CODE            UINT32_C(0x00000010)
    2670 #define IEM_ACCESS_WHAT_DATA            UINT32_C(0x00000020)
    2671 #define IEM_ACCESS_WHAT_STACK           UINT32_C(0x00000030)
    2672 #define IEM_ACCESS_WHAT_SYS             UINT32_C(0x00000040)
    2673 #define IEM_ACCESS_WHAT_MASK            UINT32_C(0x00000070)
    2674 /** The writes are partial, so if initialize the bounce buffer with the
    2675  * orignal RAM content. */
    2676 #define IEM_ACCESS_PARTIAL_WRITE        UINT32_C(0x00000100)
    2677 /** Used in aMemMappings to indicate that the entry is bounce buffered. */
    2678 #define IEM_ACCESS_BOUNCE_BUFFERED      UINT32_C(0x00000200)
    2679 /** Bounce buffer with ring-3 write pending, first page. */
    2680 #define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
    2681 /** Bounce buffer with ring-3 write pending, second page. */
    2682 #define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
    2683 /** Not locked, accessed via the TLB. */
    2684 #define IEM_ACCESS_NOT_LOCKED           UINT32_C(0x00001000)
    2685 /** Atomic access.
    2686  * This enables special alignment checks and the VINF_EM_EMULATE_SPLIT_LOCK
    2687  * fallback for misaligned stuff. See @bugref{10547}. */
    2688 #define IEM_ACCESS_ATOMIC               UINT32_C(0x00002000)
    2689 /** Valid bit mask. */
    2690 #define IEM_ACCESS_VALID_MASK           UINT32_C(0x00003fff)
    2691 /** Shift count for the TLB flags (upper word). */
    2692 #define IEM_ACCESS_SHIFT_TLB_FLAGS      16
    2693 
    2694 /** Atomic read+write data alias. */
    2695 #define IEM_ACCESS_DATA_ATOMIC          (IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA | IEM_ACCESS_ATOMIC)
    2696 /** Read+write data alias. */
    2697 #define IEM_ACCESS_DATA_RW              (IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
    2698 /** Write data alias. */
    2699 #define IEM_ACCESS_DATA_W               (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
    2700 /** Read data alias. */
    2701 #define IEM_ACCESS_DATA_R               (IEM_ACCESS_TYPE_READ  | IEM_ACCESS_WHAT_DATA)
    2702 /** Instruction fetch alias. */
    2703 #define IEM_ACCESS_INSTRUCTION          (IEM_ACCESS_TYPE_EXEC  | IEM_ACCESS_WHAT_CODE)
    2704 /** Stack write alias. */
    2705 #define IEM_ACCESS_STACK_W              (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
    2706 /** Stack read alias. */
    2707 #define IEM_ACCESS_STACK_R              (IEM_ACCESS_TYPE_READ  | IEM_ACCESS_WHAT_STACK)
    2708 /** Stack read+write alias. */
    2709 #define IEM_ACCESS_STACK_RW             (IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
    2710 /** Read system table alias. */
    2711 #define IEM_ACCESS_SYS_R                (IEM_ACCESS_TYPE_READ  | IEM_ACCESS_WHAT_SYS)
    2712 /** Read+write system table alias. */
    2713 #define IEM_ACCESS_SYS_RW               (IEM_ACCESS_TYPE_READ  | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
    2714 /** @} */
    271544
    271645/** @name Prefix constants (IEMCPU::fPrefixes)
     
    2995324
    2996325
    2997 /** @def IEM_DECL_MSC_GUARD_IGNORE
    2998  * Disables control flow guards checks inside a method and any function pointers
    2999  * referenced by it. */
    3000 #if defined(_MSC_VER) && !defined(IN_RING0)
    3001 # define IEM_DECL_MSC_GUARD_IGNORE  __declspec(guard(ignore))
    3002 #else
    3003 # define IEM_DECL_MSC_GUARD_IGNORE
    3004 #endif
    3005 
    3006 /** @def IEM_DECL_MSC_GUARD_NONE
    3007  * Disables control flow guards checks inside a method and but continue track
    3008  * function pointers references by it. */
    3009 #if defined(_MSC_VER) && !defined(IN_RING0)
    3010 # define IEM_DECL_MSC_GUARD_NONE    __declspec(guard(nocf))
    3011 #else
    3012 # define IEM_DECL_MSC_GUARD_NONE
    3013 #endif
    3014 
    3015 
    3016 /** @def IEM_DECL_IMPL_TYPE
    3017  * For typedef'ing an instruction implementation function.
    3018  *
    3019  * @param   a_RetType           The return type.
    3020  * @param   a_Name              The name of the type.
    3021  * @param   a_ArgList           The argument list enclosed in parentheses.
    3022  */
    3023 
    3024 /** @def IEM_DECL_IMPL_DEF
    3025  * For defining an instruction implementation function.
    3026  *
    3027  * @param   a_RetType           The return type.
    3028  * @param   a_Name              The name of the type.
    3029  * @param   a_ArgList           The argument list enclosed in parentheses.
    3030  */
    3031 
    3032 #if defined(__GNUC__) && defined(RT_ARCH_X86)
    3033 # define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
    3034     __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
    3035 # define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
    3036     __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
    3037 # define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
    3038     __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
    3039 
    3040 #elif defined(_MSC_VER) && defined(RT_ARCH_X86)
    3041 # define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
    3042     a_RetType (__fastcall a_Name) a_ArgList
    3043 # define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
    3044     IEM_DECL_MSC_GUARD_IGNORE a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
    3045 # define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
    3046     IEM_DECL_MSC_GUARD_IGNORE a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
    3047 
    3048 #elif __cplusplus >= 201700 /* P0012R1 support */
    3049 # define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
    3050     a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
    3051 # define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
    3052     IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
    3053 # define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
    3054     IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
    3055 
    3056 #else
    3057 # define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
    3058     a_RetType (VBOXCALL a_Name) a_ArgList
    3059 # define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
    3060     IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
    3061 # define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
    3062     IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
    3063 
    3064 #endif
    3065 
    3066326/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
    3067327RT_C_DECLS_BEGIN
     
    3493753/** @} */
    3494754
     755
     756/**
     757 * A FPU result.
     758 * @note x86 specific
     759 */
     760typedef struct IEMFPURESULT
     761{
     762    /** The output value. */
     763    RTFLOAT80U      r80Result;
     764    /** The output status. */
     765    uint16_t        FSW;
     766} IEMFPURESULT;
     767AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
     768/** Pointer to a FPU result. */
     769typedef IEMFPURESULT *PIEMFPURESULT;
     770/** Pointer to a const FPU result. */
     771typedef IEMFPURESULT const *PCIEMFPURESULT;
     772
    3495773/** @name FPU operations taking a 32-bit float argument
    3496774 * @{ */
     
    3594872FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldln2;
    3595873FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldz;
     874
     875/**
     876 * A FPU result consisting of two output values and FSW.
     877 * @note x86 specific
     878 */
     879typedef struct IEMFPURESULTTWO
     880{
     881    /** The first output value. */
     882    RTFLOAT80U      r80Result1;
     883    /** The output status. */
     884    uint16_t        FSW;
     885    /** The second output value. */
     886    RTFLOAT80U      r80Result2;
     887} IEMFPURESULTTWO;
     888AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
     889AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
     890/** Pointer to a FPU result consisting of two output values and FSW. */
     891typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
     892/** Pointer to a const FPU result consisting of two output values and FSW. */
     893typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
    3596894
    3597895typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
     
    50802378/** @} */
    50812379
    5082 /** @name C instruction implementations for anything slightly complicated.
    5083  * @{ */
    5084 
    5085 /**
    5086  * For typedef'ing or declaring a C instruction implementation function taking
    5087  * no extra arguments.
    5088  *
    5089  * @param   a_Name              The name of the type.
    5090  */
    5091 # define IEM_CIMPL_DECL_TYPE_0(a_Name) \
    5092     IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
    5093 /**
    5094  * For defining a C instruction implementation function taking no extra
    5095  * arguments.
    5096  *
    5097  * @param   a_Name              The name of the function
    5098  */
    5099 # define IEM_CIMPL_DEF_0(a_Name) \
    5100     IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
    5101 /**
    5102  * Prototype version of IEM_CIMPL_DEF_0.
    5103  */
    5104 # define IEM_CIMPL_PROTO_0(a_Name) \
    5105     IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
    5106 /**
    5107  * For calling a C instruction implementation function taking no extra
    5108  * arguments.
    5109  *
    5110  * This special call macro adds default arguments to the call and allow us to
    5111  * change these later.
    5112  *
    5113  * @param   a_fn                The name of the function.
    5114  */
    5115 # define IEM_CIMPL_CALL_0(a_fn)            a_fn(pVCpu, cbInstr)
    5116 
    5117 /** Type for a C instruction implementation function taking no extra
    5118  *  arguments. */
    5119 typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
    5120 /** Function pointer type for a C instruction implementation function taking
    5121  *  no extra arguments. */
    5122 typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
    5123 
    5124 /**
    5125  * For typedef'ing or declaring a C instruction implementation function taking
    5126  * one extra argument.
    5127  *
    5128  * @param   a_Name              The name of the type.
    5129  * @param   a_Type0             The argument type.
    5130  * @param   a_Arg0              The argument name.
    5131  */
    5132 # define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
    5133     IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
    5134 /**
    5135  * For defining a C instruction implementation function taking one extra
    5136  * argument.
    5137  *
    5138  * @param   a_Name              The name of the function
    5139  * @param   a_Type0             The argument type.
    5140  * @param   a_Arg0              The argument name.
    5141  */
    5142 # define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
    5143     IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
    5144 /**
    5145  * Prototype version of IEM_CIMPL_DEF_1.
    5146  */
    5147 # define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
    5148     IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
    5149 /**
    5150  * For calling a C instruction implementation function taking one extra
    5151  * argument.
    5152  *
    5153  * This special call macro adds default arguments to the call and allow us to
    5154  * change these later.
    5155  *
    5156  * @param   a_fn                The name of the function.
    5157  * @param   a0                  The name of the 1st argument.
    5158  */
    5159 # define IEM_CIMPL_CALL_1(a_fn, a0)        a_fn(pVCpu, cbInstr, (a0))
    5160 
    5161 /**
    5162  * For typedef'ing or declaring a C instruction implementation function taking
    5163  * two extra arguments.
    5164  *
    5165  * @param   a_Name              The name of the type.
    5166  * @param   a_Type0             The type of the 1st argument
    5167  * @param   a_Arg0              The name of the 1st argument.
    5168  * @param   a_Type1             The type of the 2nd argument.
    5169  * @param   a_Arg1              The name of the 2nd argument.
    5170  */
    5171 # define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
    5172     IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
    5173 /**
    5174  * For defining a C instruction implementation function taking two extra
    5175  * arguments.
    5176  *
    5177  * @param   a_Name              The name of the function.
    5178  * @param   a_Type0             The type of the 1st argument
    5179  * @param   a_Arg0              The name of the 1st argument.
    5180  * @param   a_Type1             The type of the 2nd argument.
    5181  * @param   a_Arg1              The name of the 2nd argument.
    5182  */
    5183 # define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
    5184     IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
    5185 /**
    5186  * Prototype version of IEM_CIMPL_DEF_2.
    5187  */
    5188 # define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
    5189     IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
    5190 /**
    5191  * For calling a C instruction implementation function taking two extra
    5192  * arguments.
    5193  *
    5194  * This special call macro adds default arguments to the call and allow us to
    5195  * change these later.
    5196  *
    5197  * @param   a_fn                The name of the function.
    5198  * @param   a0                  The name of the 1st argument.
    5199  * @param   a1                  The name of the 2nd argument.
    5200  */
    5201 # define IEM_CIMPL_CALL_2(a_fn, a0, a1)    a_fn(pVCpu, cbInstr, (a0), (a1))
    5202 
    5203 /**
    5204  * For typedef'ing or declaring a C instruction implementation function taking
    5205  * three extra arguments.
    5206  *
    5207  * @param   a_Name              The name of the type.
    5208  * @param   a_Type0             The type of the 1st argument
    5209  * @param   a_Arg0              The name of the 1st argument.
    5210  * @param   a_Type1             The type of the 2nd argument.
    5211  * @param   a_Arg1              The name of the 2nd argument.
    5212  * @param   a_Type2             The type of the 3rd argument.
    5213  * @param   a_Arg2              The name of the 3rd argument.
    5214  */
    5215 # define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
    5216     IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
    5217 /**
    5218  * For defining a C instruction implementation function taking three extra
    5219  * arguments.
    5220  *
    5221  * @param   a_Name              The name of the function.
    5222  * @param   a_Type0             The type of the 1st argument
    5223  * @param   a_Arg0              The name of the 1st argument.
    5224  * @param   a_Type1             The type of the 2nd argument.
    5225  * @param   a_Arg1              The name of the 2nd argument.
    5226  * @param   a_Type2             The type of the 3rd argument.
    5227  * @param   a_Arg2              The name of the 3rd argument.
    5228  */
    5229 # define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
    5230     IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
    5231 /**
    5232  * Prototype version of IEM_CIMPL_DEF_3.
    5233  */
    5234 # define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
    5235     IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
    5236 /**
    5237  * For calling a C instruction implementation function taking three extra
    5238  * arguments.
    5239  *
    5240  * This special call macro adds default arguments to the call and allow us to
    5241  * change these later.
    5242  *
    5243  * @param   a_fn                The name of the function.
    5244  * @param   a0                  The name of the 1st argument.
    5245  * @param   a1                  The name of the 2nd argument.
    5246  * @param   a2                  The name of the 3rd argument.
    5247  */
    5248 # define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
    5249 
    5250 
    5251 /**
    5252  * For typedef'ing or declaring a C instruction implementation function taking
    5253  * four extra arguments.
    5254  *
    5255  * @param   a_Name              The name of the type.
    5256  * @param   a_Type0             The type of the 1st argument
    5257  * @param   a_Arg0              The name of the 1st argument.
    5258  * @param   a_Type1             The type of the 2nd argument.
    5259  * @param   a_Arg1              The name of the 2nd argument.
    5260  * @param   a_Type2             The type of the 3rd argument.
    5261  * @param   a_Arg2              The name of the 3rd argument.
    5262  * @param   a_Type3             The type of the 4th argument.
    5263  * @param   a_Arg3              The name of the 4th argument.
    5264  */
    5265 # define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
    5266     IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
    5267 /**
    5268  * For defining a C instruction implementation function taking four extra
    5269  * arguments.
    5270  *
    5271  * @param   a_Name              The name of the function.
    5272  * @param   a_Type0             The type of the 1st argument
    5273  * @param   a_Arg0              The name of the 1st argument.
    5274  * @param   a_Type1             The type of the 2nd argument.
    5275  * @param   a_Arg1              The name of the 2nd argument.
    5276  * @param   a_Type2             The type of the 3rd argument.
    5277  * @param   a_Arg2              The name of the 3rd argument.
    5278  * @param   a_Type3             The type of the 4th argument.
    5279  * @param   a_Arg3              The name of the 4th argument.
    5280  */
    5281 # define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
    5282     IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
    5283                                              a_Type2 a_Arg2, a_Type3 a_Arg3))
    5284 /**
    5285  * Prototype version of IEM_CIMPL_DEF_4.
    5286  */
    5287 # define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
    5288     IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
    5289                                                a_Type2 a_Arg2, a_Type3 a_Arg3))
    5290 /**
    5291  * For calling a C instruction implementation function taking four extra
    5292  * arguments.
    5293  *
    5294  * This special call macro adds default arguments to the call and allow us to
    5295  * change these later.
    5296  *
    5297  * @param   a_fn                The name of the function.
    5298  * @param   a0                  The name of the 1st argument.
    5299  * @param   a1                  The name of the 2nd argument.
    5300  * @param   a2                  The name of the 3rd argument.
    5301  * @param   a3                  The name of the 4th argument.
    5302  */
    5303 # define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
    5304 
    5305 
    5306 /**
    5307  * For typedef'ing or declaring a C instruction implementation function taking
    5308  * five extra arguments.
    5309  *
    5310  * @param   a_Name              The name of the type.
    5311  * @param   a_Type0             The type of the 1st argument
    5312  * @param   a_Arg0              The name of the 1st argument.
    5313  * @param   a_Type1             The type of the 2nd argument.
    5314  * @param   a_Arg1              The name of the 2nd argument.
    5315  * @param   a_Type2             The type of the 3rd argument.
    5316  * @param   a_Arg2              The name of the 3rd argument.
    5317  * @param   a_Type3             The type of the 4th argument.
    5318  * @param   a_Arg3              The name of the 4th argument.
    5319  * @param   a_Type4             The type of the 5th argument.
    5320  * @param   a_Arg4              The name of the 5th argument.
    5321  */
    5322 # define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
    5323     IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
    5324                                                a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
    5325                                                a_Type3 a_Arg3, a_Type4 a_Arg4))
    5326 /**
    5327  * For defining a C instruction implementation function taking five extra
    5328  * arguments.
    5329  *
    5330  * @param   a_Name              The name of the function.
    5331  * @param   a_Type0             The type of the 1st argument
    5332  * @param   a_Arg0              The name of the 1st argument.
    5333  * @param   a_Type1             The type of the 2nd argument.
    5334  * @param   a_Arg1              The name of the 2nd argument.
    5335  * @param   a_Type2             The type of the 3rd argument.
    5336  * @param   a_Arg2              The name of the 3rd argument.
    5337  * @param   a_Type3             The type of the 4th argument.
    5338  * @param   a_Arg3              The name of the 4th argument.
    5339  * @param   a_Type4             The type of the 5th argument.
    5340  * @param   a_Arg4              The name of the 5th argument.
    5341  */
    5342 # define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
    5343     IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
    5344                                              a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
    5345 /**
    5346  * Prototype version of IEM_CIMPL_DEF_5.
    5347  */
    5348 # define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
    5349     IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
    5350                                                a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
    5351 /**
    5352  * For calling a C instruction implementation function taking five extra
    5353  * arguments.
    5354  *
    5355  * This special call macro adds default arguments to the call and allow us to
    5356  * change these later.
    5357  *
    5358  * @param   a_fn                The name of the function.
    5359  * @param   a0                  The name of the 1st argument.
    5360  * @param   a1                  The name of the 2nd argument.
    5361  * @param   a2                  The name of the 3rd argument.
    5362  * @param   a3                  The name of the 4th argument.
    5363  * @param   a4                  The name of the 5th argument.
    5364  */
    5365 # define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
    5366 
    5367 /** @}  */
    5368 
    5369 
    5370 /** @name Opcode Decoder Function Types.
    5371  * @{ */
    5372 
    5373 /** @typedef PFNIEMOP
    5374  * Pointer to an opcode decoder function.
    5375  */
    5376 
    5377 /** @def FNIEMOP_DEF
    5378  * Define an opcode decoder function.
    5379  *
    5380  * We're using macors for this so that adding and removing parameters as well as
    5381  * tweaking compiler specific attributes becomes easier.  See FNIEMOP_CALL
    5382  *
    5383  * @param   a_Name      The function name.
    5384  */
    5385 
    5386 /** @typedef PFNIEMOPRM
    5387  * Pointer to an opcode decoder function with RM byte.
    5388  */
    5389 
    5390 /** @def FNIEMOPRM_DEF
    5391  * Define an opcode decoder function with RM byte.
    5392  *
    5393  * We're using macors for this so that adding and removing parameters as well as
    5394  * tweaking compiler specific attributes becomes easier.  See FNIEMOP_CALL_1
    5395  *
    5396  * @param   a_Name      The function name.
    5397  */
    5398 
    5399 #if defined(__GNUC__) && defined(RT_ARCH_X86)
    5400 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
    5401 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    5402 # define FNIEMOP_DEF(a_Name) \
    5403     IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
    5404 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    5405     IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
    5406 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    5407     IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
    5408 
    5409 #elif defined(_MSC_VER) && defined(RT_ARCH_X86)
    5410 typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
    5411 typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    5412 # define FNIEMOP_DEF(a_Name) \
    5413     IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    5414 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    5415     IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
    5416 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    5417     IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
    5418 
    5419 #elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
    5420 typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
    5421 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    5422 # define FNIEMOP_DEF(a_Name) \
    5423     IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
    5424 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    5425     IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
    5426 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    5427     IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
    5428 
    5429 #else
    5430 typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
    5431 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    5432 # define FNIEMOP_DEF(a_Name) \
    5433     IEM_STATIC IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
    5434 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    5435     IEM_STATIC IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
    5436 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    5437     IEM_STATIC IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
    5438 
    5439 #endif
    5440 #define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
    5441 
    5442 /**
    5443  * Call an opcode decoder function.
    5444  *
    5445  * We're using macors for this so that adding and removing parameters can be
    5446  * done as we please.  See FNIEMOP_DEF.
    5447  */
    5448 #define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
    5449 
    5450 /**
    5451  * Call a common opcode decoder function taking one extra argument.
    5452  *
    5453  * We're using macors for this so that adding and removing parameters can be
    5454  * done as we please.  See FNIEMOP_DEF_1.
    5455  */
    5456 #define FNIEMOP_CALL_1(a_pfn, a0)           (a_pfn)(pVCpu, a0)
    5457 
    5458 /**
    5459  * Call a common opcode decoder function taking one extra argument.
    5460  *
    5461  * We're using macors for this so that adding and removing parameters can be
    5462  * done as we please.  See FNIEMOP_DEF_1.
    5463  */
    5464 #define FNIEMOP_CALL_2(a_pfn, a0, a1)       (a_pfn)(pVCpu, a0, a1)
    5465 /** @} */
    5466 
    54672380
    54682381/** @name Misc Helpers
    54692382 * @{  */
    5470 
    5471 /** Used to shut up GCC warnings about variables that 'may be used uninitialized'
    5472  * due to GCC lacking knowledge about the value range of a switch. */
    5473 #if RT_CPLUSPLUS_PREREQ(202000)
    5474 # define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
    5475 #else
    5476 # define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
    5477 #endif
    5478 
    5479 /** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
    5480 #if RT_CPLUSPLUS_PREREQ(202000)
    5481 # define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
    5482 #else
    5483 # define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
    5484 #endif
    5485 
    5486 /**
    5487  * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
    5488  * occation.
    5489  */
    5490 #ifdef LOG_ENABLED
    5491 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
    5492     do { \
    5493         /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
    5494         return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
    5495     } while (0)
    5496 #else
    5497 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
    5498     return VERR_IEM_ASPECT_NOT_IMPLEMENTED
    5499 #endif
    5500 
    5501 /**
    5502  * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
    5503  * occation using the supplied logger statement.
    5504  *
    5505  * @param   a_LoggerArgs    What to log on failure.
    5506  */
    5507 #ifdef LOG_ENABLED
    5508 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
    5509     do { \
    5510         LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
    5511         /*LogFunc(a_LoggerArgs);*/ \
    5512         return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
    5513     } while (0)
    5514 #else
    5515 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
    5516     return VERR_IEM_ASPECT_NOT_IMPLEMENTED
    5517 #endif
    55182383
    55192384/**
     
    59572822
    59582823/** @} */
    5959 
    5960 uint32_t                iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
    5961 VBOXSTRICTRC            iemExecInjectPendingTrap(PVMCPUCC pVCpu);
    59622824
    59632825
     
    61513013VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
    61523014                          uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
    6153 VBOXSTRICTRC    iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    61543015#ifndef IN_RING3
    61553016VBOXSTRICTRC    iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    61563017#endif
    6157 void            iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    6158 void            iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
    61593018VBOXSTRICTRC    iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
    61603019VBOXSTRICTRC    iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
     
    63113170PRTUINT128U     iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    63123171PCRTUINT128U    iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6313 
    6314 void            iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    6315 void            iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    6316 void            iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    6317 void            iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    6318 void            iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    6319 void            iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    63203172#endif
    63213173
     
    63783230 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
    63793231 * @{ */
     3232
     3233/**
     3234 * INT instruction types - iemCImpl_int().
     3235 * @note x86 specific
     3236 */
     3237typedef enum IEMINT
     3238{
     3239    /** INT n instruction (opcode 0xcd imm). */
     3240    IEMINT_INTN  = 0,
     3241    /** Single byte INT3 instruction (opcode 0xcc). */
     3242    IEMINT_INT3  = IEM_XCPT_FLAGS_BP_INSTR,
     3243    /** Single byte INTO instruction (opcode 0xce). */
     3244    IEMINT_INTO  = IEM_XCPT_FLAGS_OF_INSTR,
     3245    /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
     3246    IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
     3247} IEMINT;
     3248AssertCompileSize(IEMINT, 4);
     3249
    63803250IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    63813251IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
     
    65043374IEM_CIMPL_PROTO_2(iemCImpl_vpgather_worker_xx, uint32_t, u32PackedArgs, uint32_t, u32Disp);
    65053375
     3376/** Packed 32-bit argument for iemCImpl_vpgather_worker_xx. */
     3377typedef union IEMGATHERARGS
     3378{
     3379    /** Integer view. */
     3380    uint32_t u;
     3381    /** Bitfield view. */
     3382    struct
     3383    {
     3384        uint32_t iYRegDst       : 4; /**<  0 - XMM or YMM register number (destination) */
     3385        uint32_t iYRegIdc       : 4; /**<  4 - XMM or YMM register number (indices)     */
     3386        uint32_t iYRegMsk       : 4; /**<  8 - XMM or YMM register number (mask)        */
     3387        uint32_t iGRegBase      : 4; /**< 12 - general register number    (base ptr)    */
     3388        uint32_t iScale         : 2; /**< 16 - scale factor               (1/2/4/8)     */
     3389        uint32_t enmEffOpSize   : 2; /**< 18 - operand size               (16/32/64/--) */
     3390        uint32_t enmEffAddrMode : 2; /**< 20 - addressing  mode           (16/32/64/--) */
     3391        uint32_t iEffSeg        : 3; /**< 22 - effective segment (ES/CS/SS/DS/FS/GS)    */
     3392        uint32_t fVex256        : 1; /**< 25 - overall instruction width (128/256 bits) */
     3393        uint32_t fIdxQword      : 1; /**< 26 - individual index width     (4/8 bytes)   */
     3394        uint32_t fValQword      : 1; /**< 27 - individual value width     (4/8 bytes)   */
     3395    } s;
     3396} IEMGATHERARGS;
     3397AssertCompileSize(IEMGATHERARGS, sizeof(uint32_t));
     3398
    65063399/** @} */
    65073400
     
    67333626extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
    67343627
    6735 DECLHIDDEN(int)     iemPollTimers(PVMCC pVM, PVMCPUCC pVCpu) RT_NOEXCEPT;
    6736 
    6737 DECLCALLBACK(int)   iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
    6738                               uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
    6739 void                iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
    6740 DECLHIDDEN(void)    iemTbAllocatorFree(PVMCPUCC pVCpu, PIEMTB pTb);
    6741 void                iemTbAllocatorProcessDelayedFrees(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator);
    6742 void                iemTbAllocatorFreeupNativeSpace(PVMCPUCC pVCpu, uint32_t cNeededInstrs);
    6743 DECLHIDDEN(PIEMTBALLOCATOR) iemTbAllocatorFreeBulkStart(PVMCPUCC pVCpu);
    6744 DECLHIDDEN(void)    iemTbAllocatorFreeBulk(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator, PIEMTB pTb);
    6745 DECLHIDDEN(const char *) iemTbFlagsToString(uint32_t fFlags, char *pszBuf, size_t cbBuf) RT_NOEXCEPT;
    6746 DECLHIDDEN(void)    iemThreadedDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
    6747 #if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) && defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
    6748 DECLHIDDEN(void)    iemThreadedSaveTbForProfilingCleanup(PVMCPU pVCpu);
    6749 #endif
    6750 
    6751 
    6752 /** @todo FNIEMTHREADEDFUNC and friends may need more work... */
    6753 #if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
    6754 typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
    6755 typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
    6756 # define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
    6757     VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
    6758 # define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
    6759     VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
    6760 
    6761 #else
    6762 typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
    6763 typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
    6764 # define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
    6765     IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
    6766 # define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
    6767     IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
    6768 #endif
    6769 
    67703628
    67713629IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_Nop);
     
    68153673#endif
    68163674
    6817 /* Native recompiler public bits: */
    6818 
    6819 DECLHIDDEN(PIEMTB)  iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
    6820 DECLHIDDEN(void)    iemNativeDisassembleTb(PVMCPU pVCpu, PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
    6821 int                 iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk) RT_NOEXCEPT;
    6822 DECLHIDDEN(PIEMNATIVEINSTR) iemExecMemAllocatorAlloc(PVMCPU pVCpu, uint32_t cbReq, PIEMTB pTb, PIEMNATIVEINSTR *ppaExec,
    6823                                                      struct IEMNATIVEPERCHUNKCTX const **ppChunkCtx) RT_NOEXCEPT;
    6824 DECLHIDDEN(PIEMNATIVEINSTR) iemExecMemAllocatorAllocFromChunk(PVMCPU pVCpu, uint32_t idxChunk, uint32_t cbReq,
    6825                                                               PIEMNATIVEINSTR *ppaExec);
    6826 DECLHIDDEN(void)    iemExecMemAllocatorReadyForUse(PVMCPUCC pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
    6827 void                iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
    6828 DECLASM(DECL_NO_RETURN(void)) iemNativeTbLongJmp(void *pvFramePointer, int rc) RT_NOEXCEPT;
    6829 DECLHIDDEN(struct IEMNATIVEPERCHUNKCTX const *) iemExecMemGetTbChunkCtx(PVMCPU pVCpu, PCIEMTB pTb);
    6830 DECLHIDDEN(int) iemNativeRecompileAttachExecMemChunkCtx(PVMCPU pVCpu, uint32_t idxChunk, struct IEMNATIVEPERCHUNKCTX const **ppCtx);
    6831 
    6832 /** Packed 32-bit argument for iemCImpl_vpgather_worker_xx. */
    6833 typedef union IEMGATHERARGS
    6834 {
    6835     /** Integer view. */
    6836     uint32_t u;
    6837     /** Bitfield view. */
    6838     struct
    6839     {
    6840         uint32_t iYRegDst       : 4; /**<  0 - XMM or YMM register number (destination) */
    6841         uint32_t iYRegIdc       : 4; /**<  4 - XMM or YMM register number (indices)     */
    6842         uint32_t iYRegMsk       : 4; /**<  8 - XMM or YMM register number (mask)        */
    6843         uint32_t iGRegBase      : 4; /**< 12 - general register number    (base ptr)    */
    6844         uint32_t iScale         : 2; /**< 16 - scale factor               (1/2/4/8)     */
    6845         uint32_t enmEffOpSize   : 2; /**< 18 - operand size               (16/32/64/--) */
    6846         uint32_t enmEffAddrMode : 2; /**< 20 - addressing  mode           (16/32/64/--) */
    6847         uint32_t iEffSeg        : 3; /**< 22 - effective segment (ES/CS/SS/DS/FS/GS)    */
    6848         uint32_t fVex256        : 1; /**< 25 - overall instruction width (128/256 bits) */
    6849         uint32_t fIdxQword      : 1; /**< 26 - individual index width     (4/8 bytes)   */
    6850         uint32_t fValQword      : 1; /**< 27 - individual value width     (4/8 bytes)   */
    6851     } s;
    6852 } IEMGATHERARGS;
    6853 AssertCompileSize(IEMGATHERARGS, sizeof(uint32_t));
    6854 
    6855 #endif /* !RT_IN_ASSEMBLER - ASM-NOINC-END */
    6856 
    68573675
    68583676/** @} */
     
    68603678RT_C_DECLS_END
    68613679
    6862 /* ASM-INC: %include "IEMInternalStruct.mac" */
    6863 
    6864 #endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
    6865 
     3680
     3681#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInternal_x86_h */
     3682
  • trunk/src/VBox/VMM/VMMR0/IEMR0.cpp

    r106061 r108195  
    3232#define LOG_GROUP   LOG_GROUP_IEM
    3333#define VMCPU_INCL_CPUM_GST_CTX
     34#ifdef IN_RING0
     35# define VBOX_VMM_TARGET_X86
     36#endif
    3437#include <VBox/vmm/iem.h>
    3538#include <VBox/vmm/cpum.h>
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r108188 r108195  
    372372#define IEM_TEMPL_ARG_3(a1, a2, a3)     <a1,a2,a3>
    373373/** @} */
    374 
    375 
    376 /**
    377  * Branch types - iemCImpl_BranchTaskSegment(), iemCImpl_BranchTaskGate(),
    378  * iemCImpl_BranchCallGate() and iemCImpl_BranchSysSel().
    379  * @note x86 specific
    380  */
    381 typedef enum IEMBRANCH
    382 {
    383     IEMBRANCH_JUMP = 1,
    384     IEMBRANCH_CALL,
    385     IEMBRANCH_TRAP,
    386     IEMBRANCH_SOFTWARE_INT,
    387     IEMBRANCH_HARDWARE_INT
    388 } IEMBRANCH;
    389 AssertCompileSize(IEMBRANCH, 4);
    390 
    391 
    392 /**
    393  * INT instruction types - iemCImpl_int().
    394  * @note x86 specific
    395  */
    396 typedef enum IEMINT
    397 {
    398     /** INT n instruction (opcode 0xcd imm). */
    399     IEMINT_INTN  = 0,
    400     /** Single byte INT3 instruction (opcode 0xcc). */
    401     IEMINT_INT3  = IEM_XCPT_FLAGS_BP_INSTR,
    402     /** Single byte INTO instruction (opcode 0xce). */
    403     IEMINT_INTO  = IEM_XCPT_FLAGS_OF_INSTR,
    404     /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
    405     IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
    406 } IEMINT;
    407 AssertCompileSize(IEMINT, 4);
    408 
    409 
    410 /**
    411  * A FPU result.
    412  * @note x86 specific
    413  */
    414 typedef struct IEMFPURESULT
    415 {
    416     /** The output value. */
    417     RTFLOAT80U      r80Result;
    418     /** The output status. */
    419     uint16_t        FSW;
    420 } IEMFPURESULT;
    421 AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
    422 /** Pointer to a FPU result. */
    423 typedef IEMFPURESULT *PIEMFPURESULT;
    424 /** Pointer to a const FPU result. */
    425 typedef IEMFPURESULT const *PCIEMFPURESULT;
    426 
    427 
    428 /**
    429  * A FPU result consisting of two output values and FSW.
    430  * @note x86 specific
    431  */
    432 typedef struct IEMFPURESULTTWO
    433 {
    434     /** The first output value. */
    435     RTFLOAT80U      r80Result1;
    436     /** The output status. */
    437     uint16_t        FSW;
    438     /** The second output value. */
    439     RTFLOAT80U      r80Result2;
    440 } IEMFPURESULTTWO;
    441 AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
    442 AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
    443 /** Pointer to a FPU result consisting of two output values and FSW. */
    444 typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
    445 /** Pointer to a const FPU result consisting of two output values and FSW. */
    446 typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
    447374
    448375
     
    27142641/** @} */
    27152642
    2716 /** @name Prefix constants (IEMCPU::fPrefixes)
    2717  * @note x86 specific
    2718  * @{ */
    2719 #define IEM_OP_PRF_SEG_CS               RT_BIT_32(0)  /**< CS segment prefix (0x2e). */
    2720 #define IEM_OP_PRF_SEG_SS               RT_BIT_32(1)  /**< SS segment prefix (0x36). */
    2721 #define IEM_OP_PRF_SEG_DS               RT_BIT_32(2)  /**< DS segment prefix (0x3e). */
    2722 #define IEM_OP_PRF_SEG_ES               RT_BIT_32(3)  /**< ES segment prefix (0x26). */
    2723 #define IEM_OP_PRF_SEG_FS               RT_BIT_32(4)  /**< FS segment prefix (0x64). */
    2724 #define IEM_OP_PRF_SEG_GS               RT_BIT_32(5)  /**< GS segment prefix (0x65). */
    2725 #define IEM_OP_PRF_SEG_MASK             UINT32_C(0x3f)
    2726 
    2727 #define IEM_OP_PRF_SIZE_OP              RT_BIT_32(8)  /**< Operand size prefix (0x66). */
    2728 #define IEM_OP_PRF_SIZE_REX_W           RT_BIT_32(9)  /**< REX.W prefix (0x48-0x4f). */
    2729 #define IEM_OP_PRF_SIZE_ADDR            RT_BIT_32(10) /**< Address size prefix (0x67). */
    2730 
    2731 #define IEM_OP_PRF_LOCK                 RT_BIT_32(16) /**< Lock prefix (0xf0). */
    2732 #define IEM_OP_PRF_REPNZ                RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
    2733 #define IEM_OP_PRF_REPZ                 RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
    2734 
    2735 #define IEM_OP_PRF_REX                  RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
    2736 #define IEM_OP_PRF_REX_B                RT_BIT_32(25) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
    2737 #define IEM_OP_PRF_REX_X                RT_BIT_32(26) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
    2738 #define IEM_OP_PRF_REX_R                RT_BIT_32(27) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
    2739 /** Mask with all the REX prefix flags.
    2740  * This is generally for use when needing to undo the REX prefixes when they
    2741  * are followed legacy prefixes and therefore does not immediately preceed
    2742  * the first opcode byte.
    2743  * For testing whether any REX prefix is present, use  IEM_OP_PRF_REX instead. */
    2744 #define IEM_OP_PRF_REX_MASK  (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
    2745 
    2746 #define IEM_OP_PRF_VEX                  RT_BIT_32(28) /**< Indiciates VEX prefix. */
    2747 #define IEM_OP_PRF_EVEX                 RT_BIT_32(29) /**< Indiciates EVEX prefix. */
    2748 #define IEM_OP_PRF_XOP                  RT_BIT_32(30) /**< Indiciates XOP prefix. */
    2749 /** @} */
    2750 
    2751 /** @name IEMOPFORM_XXX - Opcode forms
    2752  * @note These are ORed together with IEMOPHINT_XXX.
    2753  * @note x86 specific
    2754  * @{ */
    2755 /** ModR/M: reg, r/m */
    2756 #define IEMOPFORM_RM            0
    2757 /** ModR/M: reg, r/m (register) */
    2758 #define IEMOPFORM_RM_REG        (IEMOPFORM_RM | IEMOPFORM_MOD3)
    2759 /** ModR/M: reg, r/m (memory)   */
    2760 #define IEMOPFORM_RM_MEM        (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
    2761 /** ModR/M: reg, r/m, imm */
    2762 #define IEMOPFORM_RMI           1
    2763 /** ModR/M: reg, r/m (register), imm */
    2764 #define IEMOPFORM_RMI_REG       (IEMOPFORM_RMI | IEMOPFORM_MOD3)
    2765 /** ModR/M: reg, r/m (memory), imm   */
    2766 #define IEMOPFORM_RMI_MEM       (IEMOPFORM_RMI | IEMOPFORM_NOT_MOD3)
    2767 /** ModR/M: reg, r/m, xmm0 */
    2768 #define IEMOPFORM_RM0           2
    2769 /** ModR/M: reg, r/m (register), xmm0 */
    2770 #define IEMOPFORM_RM0_REG       (IEMOPFORM_RM0 | IEMOPFORM_MOD3)
    2771 /** ModR/M: reg, r/m (memory), xmm0   */
    2772 #define IEMOPFORM_RM0_MEM       (IEMOPFORM_RM0 | IEMOPFORM_NOT_MOD3)
    2773 /** ModR/M: r/m, reg */
    2774 #define IEMOPFORM_MR            3
    2775 /** ModR/M: r/m (register), reg */
    2776 #define IEMOPFORM_MR_REG        (IEMOPFORM_MR | IEMOPFORM_MOD3)
    2777 /** ModR/M: r/m (memory), reg */
    2778 #define IEMOPFORM_MR_MEM        (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
    2779 /** ModR/M: r/m, reg, imm */
    2780 #define IEMOPFORM_MRI           4
    2781 /** ModR/M: r/m (register), reg, imm */
    2782 #define IEMOPFORM_MRI_REG       (IEMOPFORM_MRI | IEMOPFORM_MOD3)
    2783 /** ModR/M: r/m (memory), reg, imm */
    2784 #define IEMOPFORM_MRI_MEM       (IEMOPFORM_MRI | IEMOPFORM_NOT_MOD3)
    2785 /** ModR/M: r/m only */
    2786 #define IEMOPFORM_M             5
    2787 /** ModR/M: r/m only (register). */
    2788 #define IEMOPFORM_M_REG         (IEMOPFORM_M | IEMOPFORM_MOD3)
    2789 /** ModR/M: r/m only (memory). */
    2790 #define IEMOPFORM_M_MEM         (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
    2791 /** ModR/M: r/m, imm */
    2792 #define IEMOPFORM_MI            6
    2793 /** ModR/M: r/m (register), imm */
    2794 #define IEMOPFORM_MI_REG        (IEMOPFORM_MI | IEMOPFORM_MOD3)
    2795 /** ModR/M: r/m (memory), imm */
    2796 #define IEMOPFORM_MI_MEM        (IEMOPFORM_MI | IEMOPFORM_NOT_MOD3)
    2797 /** ModR/M: r/m, 1  (shift and rotate instructions) */
    2798 #define IEMOPFORM_M1            7
    2799 /** ModR/M: r/m (register), 1. */
    2800 #define IEMOPFORM_M1_REG        (IEMOPFORM_M1 | IEMOPFORM_MOD3)
    2801 /** ModR/M: r/m (memory), 1. */
    2802 #define IEMOPFORM_M1_MEM        (IEMOPFORM_M1 | IEMOPFORM_NOT_MOD3)
    2803 /** ModR/M: r/m, CL  (shift and rotate instructions)
    2804  * @todo This should just've been a generic fixed register. But the python
    2805  *       code doesn't needs more convincing. */
    2806 #define IEMOPFORM_M_CL          8
    2807 /** ModR/M: r/m (register), CL. */
    2808 #define IEMOPFORM_M_CL_REG      (IEMOPFORM_M_CL | IEMOPFORM_MOD3)
    2809 /** ModR/M: r/m (memory), CL. */
    2810 #define IEMOPFORM_M_CL_MEM      (IEMOPFORM_M_CL | IEMOPFORM_NOT_MOD3)
    2811 /** ModR/M: reg only */
    2812 #define IEMOPFORM_R             9
    2813 
    2814 /** VEX+ModR/M: reg, r/m */
    2815 #define IEMOPFORM_VEX_RM        16
    2816 /** VEX+ModR/M: reg, r/m (register) */
    2817 #define IEMOPFORM_VEX_RM_REG    (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
    2818 /** VEX+ModR/M: reg, r/m (memory)   */
    2819 #define IEMOPFORM_VEX_RM_MEM    (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
    2820 /** VEX+ModR/M: r/m, reg */
    2821 #define IEMOPFORM_VEX_MR        17
    2822 /** VEX+ModR/M: r/m (register), reg */
    2823 #define IEMOPFORM_VEX_MR_REG    (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
    2824 /** VEX+ModR/M: r/m (memory), reg */
    2825 #define IEMOPFORM_VEX_MR_MEM    (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
    2826 /** VEX+ModR/M: r/m, reg, imm8 */
    2827 #define IEMOPFORM_VEX_MRI       18
    2828 /** VEX+ModR/M: r/m (register), reg, imm8 */
    2829 #define IEMOPFORM_VEX_MRI_REG   (IEMOPFORM_VEX_MRI | IEMOPFORM_MOD3)
    2830 /** VEX+ModR/M: r/m (memory), reg, imm8 */
    2831 #define IEMOPFORM_VEX_MRI_MEM   (IEMOPFORM_VEX_MRI | IEMOPFORM_NOT_MOD3)
    2832 /** VEX+ModR/M: r/m only */
    2833 #define IEMOPFORM_VEX_M         19
    2834 /** VEX+ModR/M: r/m only (register). */
    2835 #define IEMOPFORM_VEX_M_REG     (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
    2836 /** VEX+ModR/M: r/m only (memory). */
    2837 #define IEMOPFORM_VEX_M_MEM     (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
    2838 /** VEX+ModR/M: reg only */
    2839 #define IEMOPFORM_VEX_R         20
    2840 /** VEX+ModR/M: reg, vvvv, r/m */
    2841 #define IEMOPFORM_VEX_RVM       21
    2842 /** VEX+ModR/M: reg, vvvv, r/m (register). */
    2843 #define IEMOPFORM_VEX_RVM_REG   (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
    2844 /** VEX+ModR/M: reg, vvvv, r/m (memory). */
    2845 #define IEMOPFORM_VEX_RVM_MEM   (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
    2846 /** VEX+ModR/M: reg, vvvv, r/m, imm */
    2847 #define IEMOPFORM_VEX_RVMI      22
    2848 /** VEX+ModR/M: reg, vvvv, r/m (register), imm. */
    2849 #define IEMOPFORM_VEX_RVMI_REG  (IEMOPFORM_VEX_RVMI | IEMOPFORM_MOD3)
    2850 /** VEX+ModR/M: reg, vvvv, r/m (memory), imm. */
    2851 #define IEMOPFORM_VEX_RVMI_MEM  (IEMOPFORM_VEX_RVMI | IEMOPFORM_NOT_MOD3)
    2852 /** VEX+ModR/M: reg, vvvv, r/m, imm(reg) */
    2853 #define IEMOPFORM_VEX_RVMR      23
    2854 /** VEX+ModR/M: reg, vvvv, r/m (register), imm(reg). */
    2855 #define IEMOPFORM_VEX_RVMR_REG  (IEMOPFORM_VEX_RVMI | IEMOPFORM_MOD3)
    2856 /** VEX+ModR/M: reg, vvvv, r/m (memory), imm(reg). */
    2857 #define IEMOPFORM_VEX_RVMR_MEM  (IEMOPFORM_VEX_RVMI | IEMOPFORM_NOT_MOD3)
    2858 /** VEX+ModR/M: reg, r/m, vvvv */
    2859 #define IEMOPFORM_VEX_RMV       24
    2860 /** VEX+ModR/M: reg, r/m, vvvv (register). */
    2861 #define IEMOPFORM_VEX_RMV_REG   (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
    2862 /** VEX+ModR/M: reg, r/m, vvvv (memory). */
    2863 #define IEMOPFORM_VEX_RMV_MEM   (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
    2864 /** VEX+ModR/M: reg, r/m, imm8 */
    2865 #define IEMOPFORM_VEX_RMI       25
    2866 /** VEX+ModR/M: reg, r/m, imm8 (register). */
    2867 #define IEMOPFORM_VEX_RMI_REG   (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
    2868 /** VEX+ModR/M: reg, r/m, imm8 (memory). */
    2869 #define IEMOPFORM_VEX_RMI_MEM   (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
    2870 /** VEX+ModR/M: r/m, vvvv, reg */
    2871 #define IEMOPFORM_VEX_MVR       26
    2872 /** VEX+ModR/M: r/m, vvvv, reg (register) */
    2873 #define IEMOPFORM_VEX_MVR_REG   (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
    2874 /** VEX+ModR/M: r/m, vvvv, reg (memory) */
    2875 #define IEMOPFORM_VEX_MVR_MEM   (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
    2876 /** VEX+ModR/M+/n: vvvv, r/m */
    2877 #define IEMOPFORM_VEX_VM        27
    2878 /** VEX+ModR/M+/n: vvvv, r/m (register) */
    2879 #define IEMOPFORM_VEX_VM_REG    (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
    2880 /** VEX+ModR/M+/n: vvvv, r/m (memory) */
    2881 #define IEMOPFORM_VEX_VM_MEM    (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
    2882 /** VEX+ModR/M+/n: vvvv, r/m, imm8 */
    2883 #define IEMOPFORM_VEX_VMI       28
    2884 /** VEX+ModR/M+/n: vvvv, r/m, imm8 (register) */
    2885 #define IEMOPFORM_VEX_VMI_REG   (IEMOPFORM_VEX_VMI | IEMOPFORM_MOD3)
    2886 /** VEX+ModR/M+/n: vvvv, r/m, imm8 (memory) */
    2887 #define IEMOPFORM_VEX_VMI_MEM   (IEMOPFORM_VEX_VMI | IEMOPFORM_NOT_MOD3)
    2888 
    2889 /** Fixed register instruction, no R/M. */
    2890 #define IEMOPFORM_FIXED         32
    2891 
    2892 /** The r/m is a register. */
    2893 #define IEMOPFORM_MOD3          RT_BIT_32(8)
    2894 /** The r/m is a memory access. */
    2895 #define IEMOPFORM_NOT_MOD3      RT_BIT_32(9)
    2896 /** @} */
    2897 
    2898 /** @name IEMOPHINT_XXX - Additional Opcode Hints
    2899  * @note These are ORed together with IEMOPFORM_XXX.
    2900  * @note x86 specific
    2901  * @{ */
    2902 /** Ignores the operand size prefix (66h). */
    2903 #define IEMOPHINT_IGNORES_OZ_PFX    RT_BIT_32(10)
    2904 /** Ignores REX.W (aka WIG). */
    2905 #define IEMOPHINT_IGNORES_REXW      RT_BIT_32(11)
    2906 /** Both the operand size prefixes (66h + REX.W) are ignored. */
    2907 #define IEMOPHINT_IGNORES_OP_SIZES  (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
    2908 /** Allowed with the lock prefix. */
    2909 #define IEMOPHINT_LOCK_ALLOWED      RT_BIT_32(11)
    2910 /** The VEX.L value is ignored (aka LIG). */
    2911 #define IEMOPHINT_VEX_L_IGNORED     RT_BIT_32(12)
    2912 /** The VEX.L value must be zero (i.e. 128-bit width only). */
    2913 #define IEMOPHINT_VEX_L_ZERO        RT_BIT_32(13)
    2914 /** The VEX.L value must be one (i.e. 256-bit width only). */
    2915 #define IEMOPHINT_VEX_L_ONE         RT_BIT_32(14)
    2916 /** The VEX.V value must be zero. */
    2917 #define IEMOPHINT_VEX_V_ZERO        RT_BIT_32(15)
    2918 /** The REX.W/VEX.V value must be zero. */
    2919 #define IEMOPHINT_REX_W_ZERO        RT_BIT_32(16)
    2920 #define IEMOPHINT_VEX_W_ZERO        IEMOPHINT_REX_W_ZERO
    2921 /** The REX.W/VEX.V value must be one. */
    2922 #define IEMOPHINT_REX_W_ONE         RT_BIT_32(17)
    2923 #define IEMOPHINT_VEX_W_ONE         IEMOPHINT_REX_W_ONE
    2924 
    2925 /** Hint to IEMAllInstructionPython.py that this macro should be skipped.  */
    2926 #define IEMOPHINT_SKIP_PYTHON       RT_BIT_32(31)
    2927 /** @} */
    2928 
    2929 /**
    2930  * Possible hardware task switch sources - iemTaskSwitch(), iemVmxVmexitTaskSwitch().
    2931  * @note x86 specific
    2932  */
    2933 typedef enum IEMTASKSWITCH
    2934 {
    2935     /** Task switch caused by an interrupt/exception. */
    2936     IEMTASKSWITCH_INT_XCPT = 1,
    2937     /** Task switch caused by a far CALL. */
    2938     IEMTASKSWITCH_CALL,
    2939     /** Task switch caused by a far JMP. */
    2940     IEMTASKSWITCH_JUMP,
    2941     /** Task switch caused by an IRET. */
    2942     IEMTASKSWITCH_IRET
    2943 } IEMTASKSWITCH;
    2944 AssertCompileSize(IEMTASKSWITCH, 4);
    2945 
    2946 /**
    2947  * Possible CrX load (write) sources - iemCImpl_load_CrX().
    2948  * @note x86 specific
    2949  */
    2950 typedef enum IEMACCESSCRX
    2951 {
    2952     /** CrX access caused by 'mov crX' instruction. */
    2953     IEMACCESSCRX_MOV_CRX,
    2954     /** CrX (CR0) write caused by 'lmsw' instruction. */
    2955     IEMACCESSCRX_LMSW,
    2956     /** CrX (CR0) write caused by 'clts' instruction. */
    2957     IEMACCESSCRX_CLTS,
    2958     /** CrX (CR0) read caused by 'smsw' instruction. */
    2959     IEMACCESSCRX_SMSW
    2960 } IEMACCESSCRX;
    2961 
    2962 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2963 /** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
    2964  *
    2965  * These flags provide further context to SLAT page-walk failures that could not be
    2966  * determined by PGM (e.g, PGM is not privy to memory access permissions).
    2967  *
    2968  * @{
    2969  */
    2970 /** Translating a nested-guest linear address failed accessing a nested-guest
    2971  *  physical address. */
    2972 # define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR          RT_BIT_32(0)
    2973 /** Translating a nested-guest linear address failed accessing a
    2974  *  paging-structure entry or updating accessed/dirty bits. */
    2975 # define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE         RT_BIT_32(1)
    2976 /** @} */
    2977 
    2978 DECLCALLBACK(FNPGMPHYSHANDLER)      iemVmxApicAccessPageHandler;
    2979 # ifndef IN_RING3
    2980 DECLCALLBACK(FNPGMRZPHYSPFHANDLER)  iemVmxApicAccessPagePfHandler;
    2981 # endif
    2982 #endif
    2983 
    2984 /**
    2985  * Indicates to the verifier that the given flag set is undefined.
    2986  *
    2987  * Can be invoked again to add more flags.
    2988  *
    2989  * This is a NOOP if the verifier isn't compiled in.
    2990  *
    2991  * @note We're temporarily keeping this until code is converted to new
    2992  *       disassembler style opcode handling.
    2993  */
    2994 #define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
    2995 
    29962643
    29972644/** @def IEM_DECL_MSC_GUARD_IGNORE
     
    30642711#endif
    30652712
    3066 /** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
    3067 RT_C_DECLS_BEGIN
    3068 extern uint8_t const g_afParity[256];
    3069 RT_C_DECLS_END
    3070 
    3071 
    3072 /** @name Arithmetic assignment operations on bytes (binary).
    3073  * @{ */
    3074 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU8, (uint32_t fEFlagsIn, uint8_t  *pu8Dst,  uint8_t  u8Src));
    3075 typedef FNIEMAIMPLBINU8  *PFNIEMAIMPLBINU8;
    3076 FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
    3077 FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
    3078 FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
    3079 FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
    3080 FNIEMAIMPLBINU8  iemAImpl_or_u8,  iemAImpl_or_u8_locked;
    3081 FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
    3082 FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
    3083 /** @} */
    3084 
    3085 /** @name Arithmetic assignment operations on words (binary).
    3086  * @{ */
    3087 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU16, (uint32_t fEFlagsIn, uint16_t *pu16Dst, uint16_t u16Src));
    3088 typedef FNIEMAIMPLBINU16  *PFNIEMAIMPLBINU16;
    3089 FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
    3090 FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
    3091 FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
    3092 FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
    3093 FNIEMAIMPLBINU16  iemAImpl_or_u16,  iemAImpl_or_u16_locked;
    3094 FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
    3095 FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
    3096 /** @}  */
    3097 
    3098 
    3099 /** @name Arithmetic assignment operations on double words (binary).
    3100  * @{ */
    3101 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU32, (uint32_t fEFlagsIn, uint32_t *pu32Dst, uint32_t u32Src));
    3102 typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
    3103 FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
    3104 FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
    3105 FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
    3106 FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
    3107 FNIEMAIMPLBINU32  iemAImpl_or_u32,  iemAImpl_or_u32_locked;
    3108 FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
    3109 FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
    3110 FNIEMAIMPLBINU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
    3111 FNIEMAIMPLBINU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
    3112 FNIEMAIMPLBINU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
    3113 /** @}  */
    3114 
    3115 /** @name Arithmetic assignment operations on quad words (binary).
    3116  * @{ */
    3117 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU64, (uint32_t fEFlagsIn, uint64_t *pu64Dst, uint64_t u64Src));
    3118 typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
    3119 FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
    3120 FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
    3121 FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
    3122 FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
    3123 FNIEMAIMPLBINU64  iemAImpl_or_u64,  iemAImpl_or_u64_locked;
    3124 FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
    3125 FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
    3126 FNIEMAIMPLBINU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
    3127 FNIEMAIMPLBINU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
    3128 FNIEMAIMPLBINU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
    3129 /** @}  */
    3130 
    3131 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU8, (uint32_t fEFlagsIn, uint8_t const *pu8Dst, uint8_t u8Src));
    3132 typedef FNIEMAIMPLBINROU8 *PFNIEMAIMPLBINROU8;
    3133 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU16,(uint32_t fEFlagsIn, uint16_t const *pu16Dst, uint16_t u16Src));
    3134 typedef FNIEMAIMPLBINROU16 *PFNIEMAIMPLBINROU16;
    3135 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU32,(uint32_t fEFlagsIn, uint32_t const *pu32Dst, uint32_t u32Src));
    3136 typedef FNIEMAIMPLBINROU32 *PFNIEMAIMPLBINROU32;
    3137 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU64,(uint32_t fEFlagsIn, uint64_t const *pu64Dst, uint64_t u64Src));
    3138 typedef FNIEMAIMPLBINROU64 *PFNIEMAIMPLBINROU64;
    3139 
    3140 /** @name Compare operations (thrown in with the binary ops).
    3141  * @{ */
    3142 FNIEMAIMPLBINROU8  iemAImpl_cmp_u8;
    3143 FNIEMAIMPLBINROU16 iemAImpl_cmp_u16;
    3144 FNIEMAIMPLBINROU32 iemAImpl_cmp_u32;
    3145 FNIEMAIMPLBINROU64 iemAImpl_cmp_u64;
    3146 /** @}  */
    3147 
    3148 /** @name Test operations (thrown in with the binary ops).
    3149  * @{ */
    3150 FNIEMAIMPLBINROU8  iemAImpl_test_u8;
    3151 FNIEMAIMPLBINROU16 iemAImpl_test_u16;
    3152 FNIEMAIMPLBINROU32 iemAImpl_test_u32;
    3153 FNIEMAIMPLBINROU64 iemAImpl_test_u64;
    3154 /** @}  */
    3155 
    3156 /** @name Bit operations operations (thrown in with the binary ops).
    3157  * @{ */
    3158 FNIEMAIMPLBINROU16 iemAImpl_bt_u16;
    3159 FNIEMAIMPLBINROU32 iemAImpl_bt_u32;
    3160 FNIEMAIMPLBINROU64 iemAImpl_bt_u64;
    3161 FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
    3162 FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
    3163 FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
    3164 FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
    3165 FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
    3166 FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
    3167 FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
    3168 FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
    3169 FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
    3170 /** @}  */
    3171 
    3172 /** @name Arithmetic three operand operations on double words (binary).
    3173  * @{ */
    3174 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
    3175 typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
    3176 FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
    3177 FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
    3178 FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
    3179 /** @}  */
    3180 
    3181 /** @name Arithmetic three operand operations on quad words (binary).
    3182  * @{ */
    3183 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
    3184 typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
    3185 FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
    3186 FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
    3187 FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
    3188 /** @}  */
    3189 
    3190 /** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
    3191  * @{ */
    3192 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
    3193 typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
    3194 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
    3195 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
    3196 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
    3197 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
    3198 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
    3199 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
    3200 /** @}  */
    3201 
    3202 /** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
    3203  * @{ */
    3204 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
    3205 typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
    3206 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
    3207 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
    3208 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
    3209 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
    3210 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
    3211 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
    3212 /** @}  */
    3213 
    3214 /** @name MULX 32-bit and 64-bit.
    3215  * @{ */
    3216 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
    3217 typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
    3218 FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
    3219 
    3220 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
    3221 typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
    3222 FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
    3223 /** @}  */
    3224 
    3225 
    3226 /** @name Exchange memory with register operations.
    3227  * @{ */
    3228 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t  *pu8Mem,  uint8_t  *pu8Reg));
    3229 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
    3230 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
    3231 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
    3232 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t  *pu8Mem,  uint8_t  *pu8Reg));
    3233 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
    3234 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
    3235 IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
    3236 /** @}  */
    3237 
    3238 /** @name Exchange and add operations.
    3239  * @{ */
    3240 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t  *pu8Dst,  uint8_t  *pu8Reg,  uint32_t *pEFlags));
    3241 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
    3242 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
    3243 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
    3244 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t  *pu8Dst,  uint8_t  *pu8Reg,  uint32_t *pEFlags));
    3245 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
    3246 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
    3247 IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
    3248 /** @}  */
    3249 
    3250 /** @name Compare and exchange.
    3251  * @{ */
    3252 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8,        (uint8_t  *pu8Dst,  uint8_t  *puAl,  uint8_t  uSrcReg, uint32_t *pEFlags));
    3253 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t  *pu8Dst,  uint8_t  *puAl,  uint8_t  uSrcReg, uint32_t *pEFlags));
    3254 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16,       (uint16_t *pu16Dst, uint16_t *puAx,  uint16_t uSrcReg, uint32_t *pEFlags));
    3255 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx,  uint16_t uSrcReg, uint32_t *pEFlags));
    3256 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32,       (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
    3257 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
    3258 #if ARCH_BITS == 32
    3259 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64,       (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
    3260 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
    3261 #else
    3262 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64,       (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
    3263 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
    3264 #endif
    3265 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
    3266                                             uint32_t *pEFlags));
    3267 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
    3268                                                    uint32_t *pEFlags));
    3269 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
    3270                                              uint32_t *pEFlags));
    3271 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
    3272                                                     uint32_t *pEFlags));
    3273 #ifndef RT_ARCH_ARM64
    3274 IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
    3275                                                       PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
    3276 #endif
    3277 /** @} */
    3278 
    3279 /** @name Memory ordering
    3280  * @{ */
    3281 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
    3282 typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
    3283 IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
    3284 IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
    3285 IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
    3286 #ifndef RT_ARCH_ARM64
    3287 IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
    3288 #endif
    3289 /** @} */
    3290 
    3291 /** @name Double precision shifts
    3292  * @{ */
    3293 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
    3294 typedef FNIEMAIMPLSHIFTDBLU16  *PFNIEMAIMPLSHIFTDBLU16;
    3295 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
    3296 typedef FNIEMAIMPLSHIFTDBLU32  *PFNIEMAIMPLSHIFTDBLU32;
    3297 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
    3298 typedef FNIEMAIMPLSHIFTDBLU64  *PFNIEMAIMPLSHIFTDBLU64;
    3299 FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
    3300 FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
    3301 FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
    3302 FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
    3303 FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
    3304 FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
    3305 /** @}  */
    3306 
    3307 
    3308 /** @name Bit search operations (thrown in with the binary ops).
    3309  * @{ */
    3310 FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
    3311 FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
    3312 FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
    3313 FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
    3314 FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
    3315 FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
    3316 FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
    3317 FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
    3318 FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
    3319 FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
    3320 FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
    3321 FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
    3322 FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
    3323 FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
    3324 FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
    3325 /** @}  */
    3326 
    3327 /** @name Signed multiplication operations (thrown in with the binary ops).
    3328  * @{ */
    3329 FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
    3330 FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
    3331 FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
    3332 /** @}  */
    3333 
    3334 /** @name Arithmetic assignment operations on bytes (unary).
    3335  * @{ */
    3336 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8,  (uint8_t  *pu8Dst,  uint32_t *pEFlags));
    3337 typedef FNIEMAIMPLUNARYU8  *PFNIEMAIMPLUNARYU8;
    3338 FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
    3339 FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
    3340 FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
    3341 FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
    3342 /** @} */
    3343 
    3344 /** @name Arithmetic assignment operations on words (unary).
    3345  * @{ */
    3346 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16,  (uint16_t  *pu16Dst,  uint32_t *pEFlags));
    3347 typedef FNIEMAIMPLUNARYU16  *PFNIEMAIMPLUNARYU16;
    3348 FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
    3349 FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
    3350 FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
    3351 FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
    3352 /** @} */
    3353 
    3354 /** @name Arithmetic assignment operations on double words (unary).
    3355  * @{ */
    3356 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32,  (uint32_t  *pu32Dst,  uint32_t *pEFlags));
    3357 typedef FNIEMAIMPLUNARYU32  *PFNIEMAIMPLUNARYU32;
    3358 FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
    3359 FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
    3360 FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
    3361 FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
    3362 /** @} */
    3363 
    3364 /** @name Arithmetic assignment operations on quad words (unary).
    3365  * @{ */
    3366 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64,  (uint64_t  *pu64Dst,  uint32_t *pEFlags));
    3367 typedef FNIEMAIMPLUNARYU64  *PFNIEMAIMPLUNARYU64;
    3368 FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
    3369 FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
    3370 FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
    3371 FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
    3372 /** @} */
    3373 
    3374 
    3375 /** @name Shift operations on bytes (Group 2).
    3376  * @{ */
    3377 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU8,(uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t cShift));
    3378 typedef FNIEMAIMPLSHIFTU8  *PFNIEMAIMPLSHIFTU8;
    3379 FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
    3380 FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
    3381 FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
    3382 FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
    3383 FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
    3384 FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
    3385 FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
    3386 /** @} */
    3387 
    3388 /** @name Shift operations on words (Group 2).
    3389  * @{ */
    3390 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU16,(uint32_t fEFlagsIn, uint16_t *pu16Dst, uint8_t cShift));
    3391 typedef FNIEMAIMPLSHIFTU16  *PFNIEMAIMPLSHIFTU16;
    3392 FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
    3393 FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
    3394 FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
    3395 FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
    3396 FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
    3397 FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
    3398 FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
    3399 /** @} */
    3400 
    3401 /** @name Shift operations on double words (Group 2).
    3402  * @{ */
    3403 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU32,(uint32_t fEFlagsIn, uint32_t *pu32Dst, uint8_t cShift));
    3404 typedef FNIEMAIMPLSHIFTU32  *PFNIEMAIMPLSHIFTU32;
    3405 FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
    3406 FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
    3407 FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
    3408 FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
    3409 FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
    3410 FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
    3411 FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
    3412 /** @} */
    3413 
    3414 /** @name Shift operations on words (Group 2).
    3415  * @{ */
    3416 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU64,(uint32_t fEFlagsIn, uint64_t *pu64Dst, uint8_t cShift));
    3417 typedef FNIEMAIMPLSHIFTU64  *PFNIEMAIMPLSHIFTU64;
    3418 FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
    3419 FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
    3420 FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
    3421 FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
    3422 FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
    3423 FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
    3424 FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
    3425 /** @} */
    3426 
    3427 /** @name Multiplication and division operations.
    3428  * @{ */
    3429 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t fEFlags));
    3430 typedef FNIEMAIMPLMULDIVU8  *PFNIEMAIMPLMULDIVU8;
    3431 FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8,  iemAImpl_mul_u8_amd,  iemAImpl_mul_u8_intel;
    3432 FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
    3433 FNIEMAIMPLMULDIVU8 iemAImpl_div_u8,  iemAImpl_div_u8_amd,  iemAImpl_div_u8_intel;
    3434 FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
    3435 
    3436 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t fEFlags));
    3437 typedef FNIEMAIMPLMULDIVU16  *PFNIEMAIMPLMULDIVU16;
    3438 FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16,  iemAImpl_mul_u16_amd,  iemAImpl_mul_u16_intel;
    3439 FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
    3440 FNIEMAIMPLMULDIVU16 iemAImpl_div_u16,  iemAImpl_div_u16_amd,  iemAImpl_div_u16_intel;
    3441 FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
    3442 
    3443 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t fEFlags));
    3444 typedef FNIEMAIMPLMULDIVU32  *PFNIEMAIMPLMULDIVU32;
    3445 FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32,  iemAImpl_mul_u32_amd,  iemAImpl_mul_u32_intel;
    3446 FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
    3447 FNIEMAIMPLMULDIVU32 iemAImpl_div_u32,  iemAImpl_div_u32_amd,  iemAImpl_div_u32_intel;
    3448 FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
    3449 
    3450 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t fEFlags));
    3451 typedef FNIEMAIMPLMULDIVU64  *PFNIEMAIMPLMULDIVU64;
    3452 FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64,  iemAImpl_mul_u64_amd,  iemAImpl_mul_u64_intel;
    3453 FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
    3454 FNIEMAIMPLMULDIVU64 iemAImpl_div_u64,  iemAImpl_div_u64_amd,  iemAImpl_div_u64_intel;
    3455 FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
    3456 /** @} */
    3457 
    3458 /** @name Byte Swap.
    3459  * @{  */
    3460 IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
    3461 IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
    3462 IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
    3463 /** @}  */
    3464 
    3465 /** @name Misc.
    3466  * @{ */
    3467 FNIEMAIMPLBINU16 iemAImpl_arpl;
    3468 /** @} */
    3469 
    3470 /** @name RDRAND and RDSEED
    3471  * @{ */
    3472 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
    3473 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
    3474 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
    3475 typedef FNIEMAIMPLRDRANDSEEDU16  *PFNIEMAIMPLRDRANDSEEDU16;
    3476 typedef FNIEMAIMPLRDRANDSEEDU32  *PFNIEMAIMPLRDRANDSEEDU32;
    3477 typedef FNIEMAIMPLRDRANDSEEDU64  *PFNIEMAIMPLRDRANDSEEDU64;
    3478 
    3479 FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
    3480 FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
    3481 FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
    3482 FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
    3483 FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
    3484 FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
    3485 /** @} */
    3486 
    3487 /** @name ADOX and ADCX
    3488  * @{ */
    3489 FNIEMAIMPLBINU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback;
    3490 FNIEMAIMPLBINU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback;
    3491 FNIEMAIMPLBINU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback;
    3492 FNIEMAIMPLBINU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback;
    3493 /** @} */
    3494 
    3495 /** @name FPU operations taking a 32-bit float argument
    3496  * @{ */
    3497 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
    3498                                                       PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
    3499 typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
    3500 
    3501 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
    3502                                                    PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
    3503 typedef FNIEMAIMPLFPUR32    *PFNIEMAIMPLFPUR32;
    3504 
    3505 FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
    3506 FNIEMAIMPLFPUR32    iemAImpl_fadd_r80_by_r32;
    3507 FNIEMAIMPLFPUR32    iemAImpl_fmul_r80_by_r32;
    3508 FNIEMAIMPLFPUR32    iemAImpl_fsub_r80_by_r32;
    3509 FNIEMAIMPLFPUR32    iemAImpl_fsubr_r80_by_r32;
    3510 FNIEMAIMPLFPUR32    iemAImpl_fdiv_r80_by_r32;
    3511 FNIEMAIMPLFPUR32    iemAImpl_fdivr_r80_by_r32;
    3512 
    3513 IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
    3514 IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
    3515                                                  PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
    3516 /** @} */
    3517 
    3518 /** @name FPU operations taking a 64-bit float argument
    3519  * @{ */
    3520 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
    3521                                                       PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
    3522 typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
    3523 
    3524 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
    3525                                                    PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
    3526 typedef FNIEMAIMPLFPUR64   *PFNIEMAIMPLFPUR64;
    3527 
    3528 FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
    3529 FNIEMAIMPLFPUR64    iemAImpl_fadd_r80_by_r64;
    3530 FNIEMAIMPLFPUR64    iemAImpl_fmul_r80_by_r64;
    3531 FNIEMAIMPLFPUR64    iemAImpl_fsub_r80_by_r64;
    3532 FNIEMAIMPLFPUR64    iemAImpl_fsubr_r80_by_r64;
    3533 FNIEMAIMPLFPUR64    iemAImpl_fdiv_r80_by_r64;
    3534 FNIEMAIMPLFPUR64    iemAImpl_fdivr_r80_by_r64;
    3535 
    3536 IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
    3537 IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
    3538                                                  PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
    3539 /** @} */
    3540 
    3541 /** @name FPU operations taking a 80-bit float argument
    3542  * @{ */
    3543 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
    3544                                                    PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
    3545 typedef FNIEMAIMPLFPUR80    *PFNIEMAIMPLFPUR80;
    3546 FNIEMAIMPLFPUR80            iemAImpl_fadd_r80_by_r80;
    3547 FNIEMAIMPLFPUR80            iemAImpl_fmul_r80_by_r80;
    3548 FNIEMAIMPLFPUR80            iemAImpl_fsub_r80_by_r80;
    3549 FNIEMAIMPLFPUR80            iemAImpl_fsubr_r80_by_r80;
    3550 FNIEMAIMPLFPUR80            iemAImpl_fdiv_r80_by_r80;
    3551 FNIEMAIMPLFPUR80            iemAImpl_fdivr_r80_by_r80;
    3552 FNIEMAIMPLFPUR80            iemAImpl_fprem_r80_by_r80;
    3553 FNIEMAIMPLFPUR80            iemAImpl_fprem1_r80_by_r80;
    3554 FNIEMAIMPLFPUR80            iemAImpl_fscale_r80_by_r80;
    3555 
    3556 FNIEMAIMPLFPUR80            iemAImpl_fpatan_r80_by_r80,  iemAImpl_fpatan_r80_by_r80_amd,  iemAImpl_fpatan_r80_by_r80_intel;
    3557 FNIEMAIMPLFPUR80            iemAImpl_fyl2x_r80_by_r80,   iemAImpl_fyl2x_r80_by_r80_amd,   iemAImpl_fyl2x_r80_by_r80_intel;
    3558 FNIEMAIMPLFPUR80            iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
    3559 
    3560 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
    3561                                                       PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
    3562 typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
    3563 FNIEMAIMPLFPUR80FSW         iemAImpl_fcom_r80_by_r80;
    3564 FNIEMAIMPLFPUR80FSW         iemAImpl_fucom_r80_by_r80;
    3565 
    3566 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
    3567                                                           PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
    3568 typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
    3569 FNIEMAIMPLFPUR80EFL         iemAImpl_fcomi_r80_by_r80;
    3570 FNIEMAIMPLFPUR80EFL         iemAImpl_fucomi_r80_by_r80;
    3571 
    3572 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
    3573 typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
    3574 FNIEMAIMPLFPUR80UNARY       iemAImpl_fabs_r80;
    3575 FNIEMAIMPLFPUR80UNARY       iemAImpl_fchs_r80;
    3576 FNIEMAIMPLFPUR80UNARY       iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
    3577 FNIEMAIMPLFPUR80UNARY       iemAImpl_fsqrt_r80;
    3578 FNIEMAIMPLFPUR80UNARY       iemAImpl_frndint_r80;
    3579 FNIEMAIMPLFPUR80UNARY       iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
    3580 FNIEMAIMPLFPUR80UNARY       iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
    3581 
    3582 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
    3583 typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
    3584 FNIEMAIMPLFPUR80UNARYFSW    iemAImpl_ftst_r80;
    3585 FNIEMAIMPLFPUR80UNARYFSW    iemAImpl_fxam_r80;
    3586 
    3587 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
    3588 typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
    3589 FNIEMAIMPLFPUR80LDCONST     iemAImpl_fld1;
    3590 FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldl2t;
    3591 FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldl2e;
    3592 FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldpi;
    3593 FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldlg2;
    3594 FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldln2;
    3595 FNIEMAIMPLFPUR80LDCONST     iemAImpl_fldz;
    3596 
    3597 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
    3598                                                            PCRTFLOAT80U pr80Val));
    3599 typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
    3600 FNIEMAIMPLFPUR80UNARYTWO    iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
    3601 FNIEMAIMPLFPUR80UNARYTWO    iemAImpl_fxtract_r80_r80;
    3602 FNIEMAIMPLFPUR80UNARYTWO    iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
    3603 
    3604 IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
    3605 IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
    3606                                                  PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
    3607 
    3608 IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
    3609 IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
    3610                                                  PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
    3611 
    3612 /** @} */
    3613 
    3614 /** @name FPU operations taking a 16-bit signed integer argument
    3615  * @{  */
    3616 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
    3617                                                    PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
    3618 typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
    3619 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
    3620                                                           int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
    3621 typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
    3622 
    3623 FNIEMAIMPLFPUI16    iemAImpl_fiadd_r80_by_i16;
    3624 FNIEMAIMPLFPUI16    iemAImpl_fimul_r80_by_i16;
    3625 FNIEMAIMPLFPUI16    iemAImpl_fisub_r80_by_i16;
    3626 FNIEMAIMPLFPUI16    iemAImpl_fisubr_r80_by_i16;
    3627 FNIEMAIMPLFPUI16    iemAImpl_fidiv_r80_by_i16;
    3628 FNIEMAIMPLFPUI16    iemAImpl_fidivr_r80_by_i16;
    3629 
    3630 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
    3631                                                       PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
    3632 typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
    3633 FNIEMAIMPLFPUI16FSW     iemAImpl_ficom_r80_by_i16;
    3634 
    3635 IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
    3636 FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
    3637 FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
    3638 /** @}  */
    3639 
    3640 /** @name FPU operations taking a 32-bit signed integer argument
    3641  * @{  */
    3642 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
    3643                                                    PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
    3644 typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
    3645 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
    3646                                                           int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
    3647 typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
    3648 
    3649 FNIEMAIMPLFPUI32    iemAImpl_fiadd_r80_by_i32;
    3650 FNIEMAIMPLFPUI32    iemAImpl_fimul_r80_by_i32;
    3651 FNIEMAIMPLFPUI32    iemAImpl_fisub_r80_by_i32;
    3652 FNIEMAIMPLFPUI32    iemAImpl_fisubr_r80_by_i32;
    3653 FNIEMAIMPLFPUI32    iemAImpl_fidiv_r80_by_i32;
    3654 FNIEMAIMPLFPUI32    iemAImpl_fidivr_r80_by_i32;
    3655 
    3656 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
    3657                                                       PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
    3658 typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
    3659 FNIEMAIMPLFPUI32FSW     iemAImpl_ficom_r80_by_i32;
    3660 
    3661 IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
    3662 FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
    3663 FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
    3664 /** @}  */
    3665 
    3666 /** @name FPU operations taking a 64-bit signed integer argument
    3667  * @{  */
    3668 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
    3669                                                           int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
    3670 typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
    3671 
    3672 IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
    3673 FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
    3674 FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
    3675 /** @} */
    3676 
    3677 
    3678 /** Temporary type representing a 256-bit vector register. */
    3679 typedef struct { uint64_t au64[4]; } IEMVMM256;
    3680 /** Temporary type pointing to a 256-bit vector register. */
    3681 typedef IEMVMM256 *PIEMVMM256;
    3682 /** Temporary type pointing to a const 256-bit vector register. */
    3683 typedef IEMVMM256 *PCIEMVMM256;
    3684 
    3685 
    3686 /** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
    3687  * @{ */
    3688 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
    3689 typedef FNIEMAIMPLMEDIAF2U64   *PFNIEMAIMPLMEDIAF2U64;
    3690 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF2U128,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc));
    3691 typedef FNIEMAIMPLMEDIAF2U128  *PFNIEMAIMPLMEDIAF2U128;
    3692 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF2U256,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCX86YMMREG puSrc));
    3693 typedef FNIEMAIMPLMEDIAF2U256  *PFNIEMAIMPLMEDIAF2U256;
    3694 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF3U128,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
    3695 typedef FNIEMAIMPLMEDIAF3U128  *PFNIEMAIMPLMEDIAF3U128;
    3696 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF3U256,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
    3697 typedef FNIEMAIMPLMEDIAF3U256  *PFNIEMAIMPLMEDIAF3U256;
    3698 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
    3699 typedef FNIEMAIMPLMEDIAOPTF2U64   *PFNIEMAIMPLMEDIAOPTF2U64;
    3700 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
    3701 typedef FNIEMAIMPLMEDIAOPTF2U128  *PFNIEMAIMPLMEDIAOPTF2U128;
    3702 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
    3703 typedef FNIEMAIMPLMEDIAOPTF3U128  *PFNIEMAIMPLMEDIAOPTF3U128;
    3704 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
    3705 typedef FNIEMAIMPLMEDIAOPTF3U256  *PFNIEMAIMPLMEDIAOPTF3U256;
    3706 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
    3707 typedef FNIEMAIMPLMEDIAOPTF2U256  *PFNIEMAIMPLMEDIAOPTF2U256;
    3708 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
    3709 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
    3710 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pcmpeqb_u64,  iemAImpl_pcmpeqw_u64,  iemAImpl_pcmpeqd_u64;
    3711 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pcmpgtb_u64,  iemAImpl_pcmpgtw_u64,  iemAImpl_pcmpgtd_u64;
    3712 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
    3713 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
    3714 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_paddd_u64;
    3715 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_paddq_u64;
    3716 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
    3717 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
    3718 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psubd_u64;
    3719 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psubq_u64;
    3720 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pmaddwd_u64, iemAImpl_pmaddwd_u64_fallback;
    3721 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
    3722 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
    3723 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
    3724 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
    3725 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
    3726 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
    3727 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
    3728 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
    3729 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
    3730 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
    3731 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
    3732 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
    3733 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
    3734 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
    3735 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
    3736 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
    3737 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
    3738 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pmuludq_u64;
    3739 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
    3740 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
    3741 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
    3742 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
    3743 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_packssdw_u64;
    3744 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pmulhuw_u64;
    3745 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
    3746 FNIEMAIMPLMEDIAOPTF2U64  iemAImpl_psadbw_u64;
    3747 
    3748 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
    3749 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
    3750 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
    3751 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
    3752 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
    3753 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
    3754 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
    3755 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
    3756 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddd_u128;
    3757 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddq_u128;
    3758 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
    3759 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
    3760 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubd_u128;
    3761 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubq_u128;
    3762 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
    3763 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhw_u128;
    3764 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
    3765 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
    3766 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminub_u128;
    3767 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
    3768 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
    3769 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
    3770 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
    3771 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
    3772 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxub_u128;
    3773 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
    3774 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
    3775 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
    3776 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsw_u128;
    3777 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
    3778 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
    3779 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
    3780 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
    3781 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
    3782 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
    3783 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
    3784 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
    3785 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
    3786 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
    3787 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
    3788 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
    3789 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
    3790 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
    3791 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
    3792 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuludq_u128;
    3793 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
    3794 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
    3795 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
    3796 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
    3797 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
    3798 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
    3799 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
    3800 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
    3801 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
    3802 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
    3803 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
    3804 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
    3805 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
    3806 
    3807 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpshufb_u128,    iemAImpl_vpshufb_u128_fallback;
    3808 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpand_u128,      iemAImpl_vpand_u128_fallback;
    3809 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpandn_u128,     iemAImpl_vpandn_u128_fallback;
    3810 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpor_u128,       iemAImpl_vpor_u128_fallback;
    3811 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpxor_u128,      iemAImpl_vpxor_u128_fallback;
    3812 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqb_u128,   iemAImpl_vpcmpeqb_u128_fallback;
    3813 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqw_u128,   iemAImpl_vpcmpeqw_u128_fallback;
    3814 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqd_u128,   iemAImpl_vpcmpeqd_u128_fallback;
    3815 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqq_u128,   iemAImpl_vpcmpeqq_u128_fallback;
    3816 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtb_u128,   iemAImpl_vpcmpgtb_u128_fallback;
    3817 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtw_u128,   iemAImpl_vpcmpgtw_u128_fallback;
    3818 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtd_u128,   iemAImpl_vpcmpgtd_u128_fallback;
    3819 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtq_u128,   iemAImpl_vpcmpgtq_u128_fallback;
    3820 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddb_u128,     iemAImpl_vpaddb_u128_fallback;
    3821 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddw_u128,     iemAImpl_vpaddw_u128_fallback;
    3822 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddd_u128,     iemAImpl_vpaddd_u128_fallback;
    3823 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddq_u128,     iemAImpl_vpaddq_u128_fallback;
    3824 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubb_u128,     iemAImpl_vpsubb_u128_fallback;
    3825 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubw_u128,     iemAImpl_vpsubw_u128_fallback;
    3826 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubd_u128,     iemAImpl_vpsubd_u128_fallback;
    3827 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubq_u128,     iemAImpl_vpsubq_u128_fallback;
    3828 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminub_u128,    iemAImpl_vpminub_u128_fallback;
    3829 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminuw_u128,    iemAImpl_vpminuw_u128_fallback;
    3830 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminud_u128,    iemAImpl_vpminud_u128_fallback;
    3831 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsb_u128,    iemAImpl_vpminsb_u128_fallback;
    3832 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsw_u128,    iemAImpl_vpminsw_u128_fallback;
    3833 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsd_u128,    iemAImpl_vpminsd_u128_fallback;
    3834 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxub_u128,    iemAImpl_vpmaxub_u128_fallback;
    3835 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxuw_u128,    iemAImpl_vpmaxuw_u128_fallback;
    3836 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxud_u128,    iemAImpl_vpmaxud_u128_fallback;
    3837 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsb_u128,    iemAImpl_vpmaxsb_u128_fallback;
    3838 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsw_u128,    iemAImpl_vpmaxsw_u128_fallback;
    3839 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsd_u128,    iemAImpl_vpmaxsd_u128_fallback;
    3840 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128,  iemAImpl_vpacksswb_u128_fallback;
    3841 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128,  iemAImpl_vpackssdw_u128_fallback;
    3842 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128,  iemAImpl_vpackuswb_u128_fallback;
    3843 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128,  iemAImpl_vpackusdw_u128_fallback;
    3844 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128,    iemAImpl_vpmullw_u128_fallback;
    3845 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128,    iemAImpl_vpmulld_u128_fallback;
    3846 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128,    iemAImpl_vpmulhw_u128_fallback;
    3847 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128,   iemAImpl_vpmulhuw_u128_fallback;
    3848 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128,     iemAImpl_vpavgb_u128_fallback;
    3849 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128,     iemAImpl_vpavgw_u128_fallback;
    3850 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128,    iemAImpl_vpsignb_u128_fallback;
    3851 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128,    iemAImpl_vpsignw_u128_fallback;
    3852 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128,    iemAImpl_vpsignd_u128_fallback;
    3853 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128,    iemAImpl_vphaddw_u128_fallback;
    3854 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128,    iemAImpl_vphaddd_u128_fallback;
    3855 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128,    iemAImpl_vphsubw_u128_fallback;
    3856 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128,    iemAImpl_vphsubd_u128_fallback;
    3857 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128,   iemAImpl_vphaddsw_u128_fallback;
    3858 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128,   iemAImpl_vphsubsw_u128_fallback;
    3859 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
    3860 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128,  iemAImpl_vpmulhrsw_u128_fallback;
    3861 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128,    iemAImpl_vpsadbw_u128_fallback;
    3862 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128,    iemAImpl_vpmuldq_u128_fallback;
    3863 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128,   iemAImpl_vpmuludq_u128_fallback;
    3864 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsb_u128,    iemAImpl_vpsubsb_u128_fallback;
    3865 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsw_u128,    iemAImpl_vpsubsw_u128_fallback;
    3866 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusb_u128,   iemAImpl_vpsubusb_u128_fallback;
    3867 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusw_u128,   iemAImpl_vpsubusw_u128_fallback;
    3868 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusb_u128,   iemAImpl_vpaddusb_u128_fallback;
    3869 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusw_u128,   iemAImpl_vpaddusw_u128_fallback;
    3870 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsb_u128,    iemAImpl_vpaddsb_u128_fallback;
    3871 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsw_u128,    iemAImpl_vpaddsw_u128_fallback;
    3872 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllw_u128,     iemAImpl_vpsllw_u128_fallback;
    3873 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpslld_u128,     iemAImpl_vpslld_u128_fallback;
    3874 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllq_u128,     iemAImpl_vpsllq_u128_fallback;
    3875 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsraw_u128,     iemAImpl_vpsraw_u128_fallback;
    3876 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrad_u128,     iemAImpl_vpsrad_u128_fallback;
    3877 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlw_u128,     iemAImpl_vpsrlw_u128_fallback;
    3878 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrld_u128,     iemAImpl_vpsrld_u128_fallback;
    3879 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlq_u128,     iemAImpl_vpsrlq_u128_fallback;
    3880 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddwd_u128, iemAImpl_vpmaddwd_u128_fallback;
    3881 
    3882 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128,     iemAImpl_vpabsb_u128_fallback;
    3883 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128,     iemAImpl_vpabsd_u128_fallback;
    3884 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128,     iemAImpl_vpabsw_u128_fallback;
    3885 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
    3886 
    3887 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpshufb_u256,    iemAImpl_vpshufb_u256_fallback;
    3888 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpand_u256,      iemAImpl_vpand_u256_fallback;
    3889 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpandn_u256,     iemAImpl_vpandn_u256_fallback;
    3890 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpor_u256,       iemAImpl_vpor_u256_fallback;
    3891 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpxor_u256,      iemAImpl_vpxor_u256_fallback;
    3892 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqb_u256,   iemAImpl_vpcmpeqb_u256_fallback;
    3893 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqw_u256,   iemAImpl_vpcmpeqw_u256_fallback;
    3894 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqd_u256,   iemAImpl_vpcmpeqd_u256_fallback;
    3895 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqq_u256,   iemAImpl_vpcmpeqq_u256_fallback;
    3896 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtb_u256,   iemAImpl_vpcmpgtb_u256_fallback;
    3897 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtw_u256,   iemAImpl_vpcmpgtw_u256_fallback;
    3898 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtd_u256,   iemAImpl_vpcmpgtd_u256_fallback;
    3899 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtq_u256,   iemAImpl_vpcmpgtq_u256_fallback;
    3900 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddb_u256,     iemAImpl_vpaddb_u256_fallback;
    3901 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddw_u256,     iemAImpl_vpaddw_u256_fallback;
    3902 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddd_u256,     iemAImpl_vpaddd_u256_fallback;
    3903 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddq_u256,     iemAImpl_vpaddq_u256_fallback;
    3904 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubb_u256,     iemAImpl_vpsubb_u256_fallback;
    3905 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubw_u256,     iemAImpl_vpsubw_u256_fallback;
    3906 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubd_u256,     iemAImpl_vpsubd_u256_fallback;
    3907 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubq_u256,     iemAImpl_vpsubq_u256_fallback;
    3908 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminub_u256,    iemAImpl_vpminub_u256_fallback;
    3909 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminuw_u256,    iemAImpl_vpminuw_u256_fallback;
    3910 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminud_u256,    iemAImpl_vpminud_u256_fallback;
    3911 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsb_u256,    iemAImpl_vpminsb_u256_fallback;
    3912 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsw_u256,    iemAImpl_vpminsw_u256_fallback;
    3913 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsd_u256,    iemAImpl_vpminsd_u256_fallback;
    3914 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxub_u256,    iemAImpl_vpmaxub_u256_fallback;
    3915 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxuw_u256,    iemAImpl_vpmaxuw_u256_fallback;
    3916 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxud_u256,    iemAImpl_vpmaxud_u256_fallback;
    3917 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsb_u256,    iemAImpl_vpmaxsb_u256_fallback;
    3918 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsw_u256,    iemAImpl_vpmaxsw_u256_fallback;
    3919 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsd_u256,    iemAImpl_vpmaxsd_u256_fallback;
    3920 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256,  iemAImpl_vpacksswb_u256_fallback;
    3921 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256,  iemAImpl_vpackssdw_u256_fallback;
    3922 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256,  iemAImpl_vpackuswb_u256_fallback;
    3923 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256,  iemAImpl_vpackusdw_u256_fallback;
    3924 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256,    iemAImpl_vpmullw_u256_fallback;
    3925 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256,    iemAImpl_vpmulld_u256_fallback;
    3926 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256,    iemAImpl_vpmulhw_u256_fallback;
    3927 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256,   iemAImpl_vpmulhuw_u256_fallback;
    3928 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256,     iemAImpl_vpavgb_u256_fallback;
    3929 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256,     iemAImpl_vpavgw_u256_fallback;
    3930 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256,    iemAImpl_vpsignb_u256_fallback;
    3931 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256,    iemAImpl_vpsignw_u256_fallback;
    3932 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256,    iemAImpl_vpsignd_u256_fallback;
    3933 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256,    iemAImpl_vphaddw_u256_fallback;
    3934 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256,    iemAImpl_vphaddd_u256_fallback;
    3935 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256,    iemAImpl_vphsubw_u256_fallback;
    3936 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256,    iemAImpl_vphsubd_u256_fallback;
    3937 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256,   iemAImpl_vphaddsw_u256_fallback;
    3938 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256,   iemAImpl_vphsubsw_u256_fallback;
    3939 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
    3940 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256,  iemAImpl_vpmulhrsw_u256_fallback;
    3941 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256,    iemAImpl_vpsadbw_u256_fallback;
    3942 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256,    iemAImpl_vpmuldq_u256_fallback;
    3943 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256,   iemAImpl_vpmuludq_u256_fallback;
    3944 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsb_u256,    iemAImpl_vpsubsb_u256_fallback;
    3945 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsw_u256,    iemAImpl_vpsubsw_u256_fallback;
    3946 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusb_u256,   iemAImpl_vpsubusb_u256_fallback;
    3947 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusw_u256,   iemAImpl_vpsubusw_u256_fallback;
    3948 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusb_u256,   iemAImpl_vpaddusb_u256_fallback;
    3949 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusw_u256,   iemAImpl_vpaddusw_u256_fallback;
    3950 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsb_u256,    iemAImpl_vpaddsb_u256_fallback;
    3951 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsw_u256,    iemAImpl_vpaddsw_u256_fallback;
    3952 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllw_u256,     iemAImpl_vpsllw_u256_fallback;
    3953 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpslld_u256,     iemAImpl_vpslld_u256_fallback;
    3954 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllq_u256,     iemAImpl_vpsllq_u256_fallback;
    3955 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsraw_u256,     iemAImpl_vpsraw_u256_fallback;
    3956 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrad_u256,     iemAImpl_vpsrad_u256_fallback;
    3957 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlw_u256,     iemAImpl_vpsrlw_u256_fallback;
    3958 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrld_u256,     iemAImpl_vpsrld_u256_fallback;
    3959 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlq_u256,     iemAImpl_vpsrlq_u256_fallback;
    3960 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddwd_u256, iemAImpl_vpmaddwd_u256_fallback;
    3961 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermps_u256,    iemAImpl_vpermps_u256_fallback;
    3962 FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermd_u256,     iemAImpl_vpermd_u256_fallback;
    3963 
    3964 FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256,     iemAImpl_vpabsb_u256_fallback;
    3965 FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256,     iemAImpl_vpabsw_u256_fallback;
    3966 FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256,     iemAImpl_vpabsd_u256_fallback;
    3967 /** @} */
    3968 
    3969 /** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
    3970  * @{ */
    3971 FNIEMAIMPLMEDIAOPTF2U64   iemAImpl_punpcklbw_u64,  iemAImpl_punpcklwd_u64,  iemAImpl_punpckldq_u64;
    3972 FNIEMAIMPLMEDIAOPTF2U128  iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
    3973 FNIEMAIMPLMEDIAOPTF3U128  iemAImpl_vpunpcklbw_u128,  iemAImpl_vpunpcklbw_u128_fallback,
    3974                           iemAImpl_vpunpcklwd_u128,  iemAImpl_vpunpcklwd_u128_fallback,
    3975                           iemAImpl_vpunpckldq_u128,  iemAImpl_vpunpckldq_u128_fallback,
    3976                           iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
    3977                           iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
    3978                           iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
    3979                           iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
    3980                           iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
    3981 
    3982 FNIEMAIMPLMEDIAOPTF3U256  iemAImpl_vpunpcklbw_u256,  iemAImpl_vpunpcklbw_u256_fallback,
    3983                           iemAImpl_vpunpcklwd_u256,  iemAImpl_vpunpcklwd_u256_fallback,
    3984                           iemAImpl_vpunpckldq_u256,  iemAImpl_vpunpckldq_u256_fallback,
    3985                           iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
    3986                           iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
    3987                           iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
    3988                           iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
    3989                           iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
    3990 /** @} */
    3991 
    3992 /** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
    3993  * @{ */
    3994 FNIEMAIMPLMEDIAOPTF2U64   iemAImpl_punpckhbw_u64,  iemAImpl_punpckhwd_u64,  iemAImpl_punpckhdq_u64;
    3995 FNIEMAIMPLMEDIAOPTF2U128  iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
    3996 FNIEMAIMPLMEDIAOPTF3U128  iemAImpl_vpunpckhbw_u128,  iemAImpl_vpunpckhbw_u128_fallback,
    3997                           iemAImpl_vpunpckhwd_u128,  iemAImpl_vpunpckhwd_u128_fallback,
    3998                           iemAImpl_vpunpckhdq_u128,  iemAImpl_vpunpckhdq_u128_fallback,
    3999                           iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
    4000 FNIEMAIMPLMEDIAOPTF3U256  iemAImpl_vpunpckhbw_u256,  iemAImpl_vpunpckhbw_u256_fallback,
    4001                           iemAImpl_vpunpckhwd_u256,  iemAImpl_vpunpckhwd_u256_fallback,
    4002                           iemAImpl_vpunpckhdq_u256,  iemAImpl_vpunpckhdq_u256_fallback,
    4003                           iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
    4004 /** @} */
    4005 
    4006 /** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
    4007  * @{ */
    4008 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
    4009 typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
    4010 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
    4011 typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
    4012 IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
    4013 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
    4014 #ifndef IEM_WITHOUT_ASSEMBLY
    4015 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
    4016 #endif
    4017 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
    4018 /** @} */
    4019 
    4020 /** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
    4021  * @{ */
    4022 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
    4023 typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
    4024 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
    4025 typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
    4026 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
    4027 typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
    4028 FNIEMAIMPLMEDIAPSHIFTU64  iemAImpl_psllw_imm_u64,  iemAImpl_pslld_imm_u64,  iemAImpl_psllq_imm_u64;
    4029 FNIEMAIMPLMEDIAPSHIFTU64  iemAImpl_psrlw_imm_u64,  iemAImpl_psrld_imm_u64,  iemAImpl_psrlq_imm_u64;
    4030 FNIEMAIMPLMEDIAPSHIFTU64  iemAImpl_psraw_imm_u64,  iemAImpl_psrad_imm_u64;
    4031 FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
    4032 FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
    4033 FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
    4034 FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
    4035 /** @} */
    4036 
    4037 /** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
    4038  * @{ */
    4039 IEM_DECL_IMPL_DEF(void, iemAImpl_maskmovq_u64,(uint64_t *puMem, uint64_t const *puSrc, uint64_t const *puMsk));
    4040 IEM_DECL_IMPL_DEF(void, iemAImpl_maskmovdqu_u128,(PRTUINT128U puMem, PCRTUINT128U puSrc, PCRTUINT128U puMsk));
    4041 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
    4042 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
    4043 #ifndef IEM_WITHOUT_ASSEMBLY
    4044 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
    4045 #endif
    4046 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
    4047 /** @} */
    4048 
    4049 /** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
    4050  * @{ */
    4051 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
    4052 typedef FNIEMAIMPLBLENDU128  *PFNIEMAIMPLBLENDU128;
    4053 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
    4054 typedef FNIEMAIMPLAVXBLENDU128  *PFNIEMAIMPLAVXBLENDU128;
    4055 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
    4056 typedef FNIEMAIMPLAVXBLENDU256  *PFNIEMAIMPLAVXBLENDU256;
    4057 
    4058 FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
    4059 FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
    4060 FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
    4061 FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
    4062 FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
    4063 FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
    4064 
    4065 FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
    4066 FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
    4067 FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
    4068 FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
    4069 FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
    4070 FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
    4071 
    4072 FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
    4073 FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
    4074 FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
    4075 FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
    4076 FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
    4077 FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
    4078 /** @} */
    4079 
    4080 
    4081 /** @name Media (SSE/MMX/AVX) operation: Sort this later
    4082  * @{ */
    4083 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4084 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4085 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
    4086 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4087 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4088 
    4089 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4090 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4091 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
    4092 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4093 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4094 
    4095 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
    4096 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
    4097 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
    4098 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4099 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4100 
    4101 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4102 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4103 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
    4104 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4105 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4106 
    4107 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4108 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4109 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
    4110 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4111 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4112 
    4113 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4114 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4115 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
    4116 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4117 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4118 
    4119 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4120 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4121 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
    4122 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4123 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4124 
    4125 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4126 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4127 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
    4128 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4129 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4130 
    4131 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
    4132 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
    4133 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
    4134 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4135 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4136 
    4137 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4138 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4139 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
    4140 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4141 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4142 
    4143 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4144 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
    4145 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
    4146 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4147 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4148 
    4149 IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4150 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
    4151 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
    4152 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4153 IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
    4154 
    4155 IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
    4156 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
    4157 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
    4158 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
    4159 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
    4160 
    4161 IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
    4162 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
    4163 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
    4164 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
    4165 IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
    4166 
    4167 IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
    4168 IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
    4169 
    4170 IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
    4171 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
    4172 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
    4173 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
    4174 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
    4175 
    4176 IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
    4177 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
    4178 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
    4179 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
    4180 IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
    4181 
    4182 
    4183 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
    4184 typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
    4185 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
    4186 typedef FNIEMAIMPLMEDIAOPTF2U256IMM8 *PFNIEMAIMPLMEDIAOPTF2U256IMM8;
    4187 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
    4188 typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
    4189 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
    4190 typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
    4191 
    4192 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
    4193 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
    4194 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
    4195 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
    4196 
    4197 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
    4198 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
    4199 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendd_u128, iemAImpl_vpblendd_u128_fallback;
    4200 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
    4201 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
    4202 
    4203 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
    4204 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
    4205 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendd_u256, iemAImpl_vpblendd_u256_fallback;
    4206 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
    4207 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
    4208 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2i128_u256, iemAImpl_vperm2i128_u256_fallback;
    4209 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2f128_u256, iemAImpl_vperm2f128_u256_fallback;
    4210 
    4211 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128,     iemAImpl_aesimc_u128_fallback;
    4212 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128,     iemAImpl_aesenc_u128_fallback;
    4213 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
    4214 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128,     iemAImpl_aesdec_u128_fallback;
    4215 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
    4216 
    4217 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128,     iemAImpl_vaesimc_u128_fallback;
    4218 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vaesenc_u128,     iemAImpl_vaesenc_u128_fallback;
    4219 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
    4220 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vaesdec_u128,     iemAImpl_vaesdec_u128_fallback;
    4221 FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
    4222 
    4223 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
    4224 
    4225 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
    4226 
    4227 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128,       iemAImpl_sha1nexte_u128_fallback;
    4228 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128,        iemAImpl_sha1msg1_u128_fallback;
    4229 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128,        iemAImpl_sha1msg2_u128_fallback;
    4230 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128,      iemAImpl_sha256msg1_u128_fallback;
    4231 FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128,      iemAImpl_sha256msg2_u128_fallback;
    4232 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128,   iemAImpl_sha1rnds4_u128_fallback;
    4233 IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
    4234 IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
    4235 
    4236 FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermq_u256,      iemAImpl_vpermq_u256_fallback;
    4237 FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermpd_u256,     iemAImpl_vpermpd_u256_fallback;
    4238 
    4239 typedef struct IEMPCMPISTRXSRC
    4240 {
    4241     RTUINT128U              uSrc1;
    4242     RTUINT128U              uSrc2;
    4243 } IEMPCMPISTRXSRC;
    4244 typedef IEMPCMPISTRXSRC *PIEMPCMPISTRXSRC;
    4245 typedef const IEMPCMPISTRXSRC *PCIEMPCMPISTRXSRC;
    4246 
    4247 typedef struct IEMPCMPESTRXSRC
    4248 {
    4249     RTUINT128U              uSrc1;
    4250     RTUINT128U              uSrc2;
    4251     uint64_t                u64Rax;
    4252     uint64_t                u64Rdx;
    4253 } IEMPCMPESTRXSRC;
    4254 typedef IEMPCMPESTRXSRC *PIEMPCMPESTRXSRC;
    4255 typedef const IEMPCMPESTRXSRC *PCIEMPCMPESTRXSRC;
    4256 
    4257 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLPCMPISTRIU128IMM8,(uint32_t *pEFlags, PCRTUINT128U pSrc1, PCRTUINT128U pSrc2, uint8_t bEvil));
    4258 typedef FNIEMAIMPLPCMPISTRIU128IMM8 *PFNIEMAIMPLPCMPISTRIU128IMM8;
    4259 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
    4260 typedef FNIEMAIMPLPCMPESTRIU128IMM8 *PFNIEMAIMPLPCMPESTRIU128IMM8;
    4261 
    4262 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
    4263 typedef FNIEMAIMPLPCMPISTRMU128IMM8 *PFNIEMAIMPLPCMPISTRMU128IMM8;
    4264 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
    4265 typedef FNIEMAIMPLPCMPESTRMU128IMM8 *PFNIEMAIMPLPCMPESTRMU128IMM8;
    4266 
    4267 FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_pcmpistri_u128,  iemAImpl_pcmpistri_u128_fallback;
    4268 FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_pcmpestri_u128,  iemAImpl_pcmpestri_u128_fallback;
    4269 FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_pcmpistrm_u128,  iemAImpl_pcmpistrm_u128_fallback;
    4270 FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_pcmpestrm_u128,  iemAImpl_pcmpestrm_u128_fallback;
    4271 FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_vpcmpistri_u128, iemAImpl_vpcmpistri_u128_fallback;
    4272 FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_vpcmpestri_u128, iemAImpl_vpcmpestri_u128_fallback;
    4273 FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_vpcmpistrm_u128, iemAImpl_vpcmpistrm_u128_fallback;
    4274 FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_vpcmpestrm_u128, iemAImpl_vpcmpestrm_u128_fallback;
    4275 
    4276 
    4277 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
    4278 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
    4279 
    4280 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_mpsadbw_u128, iemAImpl_mpsadbw_u128_fallback;
    4281 FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vmpsadbw_u128, iemAImpl_vmpsadbw_u128_fallback;
    4282 FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vmpsadbw_u256, iemAImpl_vmpsadbw_u256_fallback;
    4283 
    4284 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllw_imm_u128, iemAImpl_vpsllw_imm_u128_fallback;
    4285 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllw_imm_u256, iemAImpl_vpsllw_imm_u256_fallback;
    4286 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpslld_imm_u128, iemAImpl_vpslld_imm_u128_fallback;
    4287 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpslld_imm_u256, iemAImpl_vpslld_imm_u256_fallback;
    4288 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllq_imm_u128, iemAImpl_vpsllq_imm_u128_fallback;
    4289 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllq_imm_u256, iemAImpl_vpsllq_imm_u256_fallback;
    4290 IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
    4291 IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
    4292 IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
    4293 IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
    4294 
    4295 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsraw_imm_u128, iemAImpl_vpsraw_imm_u128_fallback;
    4296 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsraw_imm_u256, iemAImpl_vpsraw_imm_u256_fallback;
    4297 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrad_imm_u128, iemAImpl_vpsrad_imm_u128_fallback;
    4298 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrad_imm_u256, iemAImpl_vpsrad_imm_u256_fallback;
    4299 
    4300 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlw_imm_u128, iemAImpl_vpsrlw_imm_u128_fallback;
    4301 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlw_imm_u256, iemAImpl_vpsrlw_imm_u256_fallback;
    4302 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrld_imm_u128, iemAImpl_vpsrld_imm_u128_fallback;
    4303 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrld_imm_u256, iemAImpl_vpsrld_imm_u256_fallback;
    4304 FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlq_imm_u128, iemAImpl_vpsrlq_imm_u128_fallback;
    4305 FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlq_imm_u256, iemAImpl_vpsrlq_imm_u256_fallback;
    4306 IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
    4307 IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
    4308 IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
    4309 IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
    4310 
    4311 FNIEMAIMPLMEDIAOPTF3U128     iemAImpl_vpermilps_u128,     iemAImpl_vpermilps_u128_fallback;
    4312 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilps_imm_u128, iemAImpl_vpermilps_imm_u128_fallback;
    4313 FNIEMAIMPLMEDIAOPTF3U256     iemAImpl_vpermilps_u256,     iemAImpl_vpermilps_u256_fallback;
    4314 FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilps_imm_u256, iemAImpl_vpermilps_imm_u256_fallback;
    4315 
    4316 FNIEMAIMPLMEDIAOPTF3U128     iemAImpl_vpermilpd_u128,     iemAImpl_vpermilpd_u128_fallback;
    4317 FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilpd_imm_u128, iemAImpl_vpermilpd_imm_u128_fallback;
    4318 FNIEMAIMPLMEDIAOPTF3U256     iemAImpl_vpermilpd_u256,     iemAImpl_vpermilpd_u256_fallback;
    4319 FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilpd_imm_u256, iemAImpl_vpermilpd_imm_u256_fallback;
    4320 
    4321 FNIEMAIMPLMEDIAOPTF3U128     iemAImpl_vpsllvd_u128, iemAImpl_vpsllvd_u128_fallback;
    4322 FNIEMAIMPLMEDIAOPTF3U256     iemAImpl_vpsllvd_u256, iemAImpl_vpsllvd_u256_fallback;
    4323 FNIEMAIMPLMEDIAOPTF3U128     iemAImpl_vpsllvq_u128, iemAImpl_vpsllvq_u128_fallback;
    4324 FNIEMAIMPLMEDIAOPTF3U256     iemAImpl_vpsllvq_u256, iemAImpl_vpsllvq_u256_fallback;
    4325 FNIEMAIMPLMEDIAOPTF3U128     iemAImpl_vpsravd_u128, iemAImpl_vpsravd_u128_fallback;
    4326 FNIEMAIMPLMEDIAOPTF3U256     iemAImpl_vpsravd_u256, iemAImpl_vpsravd_u256_fallback;
    4327 FNIEMAIMPLMEDIAOPTF3U128     iemAImpl_vpsrlvd_u128, iemAImpl_vpsrlvd_u128_fallback;
    4328 FNIEMAIMPLMEDIAOPTF3U256     iemAImpl_vpsrlvd_u256, iemAImpl_vpsrlvd_u256_fallback;
    4329 FNIEMAIMPLMEDIAOPTF3U128     iemAImpl_vpsrlvq_u128, iemAImpl_vpsrlvq_u128_fallback;
    4330 FNIEMAIMPLMEDIAOPTF3U256     iemAImpl_vpsrlvq_u256, iemAImpl_vpsrlvq_u256_fallback;
    4331 /** @} */
    4332 
    4333 /** @name Media Odds and Ends
    4334  * @{ */
    4335 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
    4336 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
    4337 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
    4338 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
    4339 FNIEMAIMPLCR32U8  iemAImpl_crc32_u8,  iemAImpl_crc32_u8_fallback;
    4340 FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
    4341 FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
    4342 FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
    4343 
    4344 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
    4345 typedef FNIEMAIMPLF2EFL128 *PFNIEMAIMPLF2EFL128;
    4346 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
    4347 typedef FNIEMAIMPLF2EFL256 *PFNIEMAIMPLF2EFL256;
    4348 FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
    4349 FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
    4350 FNIEMAIMPLF2EFL128 iemAImpl_vtestps_u128, iemAImpl_vtestps_u128_fallback;
    4351 FNIEMAIMPLF2EFL256 iemAImpl_vtestps_u256, iemAImpl_vtestps_u256_fallback;
    4352 FNIEMAIMPLF2EFL128 iemAImpl_vtestpd_u128, iemAImpl_vtestpd_u128_fallback;
    4353 FNIEMAIMPLF2EFL256 iemAImpl_vtestpd_u256, iemAImpl_vtestpd_u256_fallback;
    4354 
    4355 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32U64,(uint32_t uMxCsrIn, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
    4356 typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
    4357 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64U64,(uint32_t uMxCsrIn, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
    4358 typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
    4359 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32U32,(uint32_t uMxCsrIn, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
    4360 typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
    4361 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64U32,(uint32_t uMxCsrIn, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
    4362 typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
    4363 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32R32,(uint32_t uMxCsrIn, int32_t *pi32Dst, PCRTFLOAT32U pr32Src));
    4364 typedef FNIEMAIMPLSSEF2I32R32 *PFNIEMAIMPLSSEF2I32R32;
    4365 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64R32,(uint32_t uMxCsrIn, int64_t *pi64Dst, PCRTFLOAT32U pr32Src));
    4366 typedef FNIEMAIMPLSSEF2I64R32 *PFNIEMAIMPLSSEF2I64R32;
    4367 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32R64,(uint32_t uMxCsrIn, int32_t *pi32Dst, PCRTFLOAT64U pr64Src));
    4368 typedef FNIEMAIMPLSSEF2I32R64 *PFNIEMAIMPLSSEF2I32R64;
    4369 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64R64,(uint32_t uMxCsrIn, int64_t *pi64Dst, PCRTFLOAT64U pr64Src));
    4370 typedef FNIEMAIMPLSSEF2I64R64 *PFNIEMAIMPLSSEF2I64R64;
    4371 
    4372 FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
    4373 FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
    4374 
    4375 FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
    4376 FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
    4377 
    4378 FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
    4379 FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
    4380 
    4381 FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
    4382 FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
    4383 
    4384 FNIEMAIMPLSSEF2I32R32 iemAImpl_vcvttss2si_i32_r32, iemAImpl_vcvttss2si_i32_r32_fallback;
    4385 FNIEMAIMPLSSEF2I64R32 iemAImpl_vcvttss2si_i64_r32, iemAImpl_vcvttss2si_i64_r32_fallback;
    4386 FNIEMAIMPLSSEF2I32R32 iemAImpl_vcvtss2si_i32_r32,  iemAImpl_vcvtss2si_i32_r32_fallback;
    4387 FNIEMAIMPLSSEF2I64R32 iemAImpl_vcvtss2si_i64_r32,  iemAImpl_vcvtss2si_i64_r32_fallback;
    4388 
    4389 FNIEMAIMPLSSEF2I32R64 iemAImpl_vcvttss2si_i32_r64, iemAImpl_vcvttss2si_i32_r64_fallback;
    4390 FNIEMAIMPLSSEF2I64R64 iemAImpl_vcvttss2si_i64_r64, iemAImpl_vcvttss2si_i64_r64_fallback;
    4391 FNIEMAIMPLSSEF2I32R64 iemAImpl_vcvtss2si_i32_r64,  iemAImpl_vcvtss2si_i32_r64_fallback;
    4392 FNIEMAIMPLSSEF2I64R64 iemAImpl_vcvtss2si_i64_r64,  iemAImpl_vcvtss2si_i64_r64_fallback;
    4393 
    4394 FNIEMAIMPLSSEF2I32R32 iemAImpl_vcvttsd2si_i32_r32, iemAImpl_vcvttsd2si_i32_r32_fallback;
    4395 FNIEMAIMPLSSEF2I64R32 iemAImpl_vcvttsd2si_i64_r32, iemAImpl_vcvttsd2si_i64_r32_fallback;
    4396 FNIEMAIMPLSSEF2I32R32 iemAImpl_vcvtsd2si_i32_r32,  iemAImpl_vcvtsd2si_i32_r32_fallback;
    4397 FNIEMAIMPLSSEF2I64R32 iemAImpl_vcvtsd2si_i64_r32,  iemAImpl_vcvtsd2si_i64_r32_fallback;
    4398 
    4399 FNIEMAIMPLSSEF2I32R64 iemAImpl_vcvttsd2si_i32_r64, iemAImpl_vcvttsd2si_i32_r64_fallback;
    4400 FNIEMAIMPLSSEF2I64R64 iemAImpl_vcvttsd2si_i64_r64, iemAImpl_vcvttsd2si_i64_r64_fallback;
    4401 FNIEMAIMPLSSEF2I32R64 iemAImpl_vcvtsd2si_i32_r64,  iemAImpl_vcvtsd2si_i32_r64_fallback;
    4402 FNIEMAIMPLSSEF2I64R64 iemAImpl_vcvtsd2si_i64_r64,  iemAImpl_vcvtsd2si_i64_r64_fallback;
    4403 
    4404 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R32I32,(uint32_t uMxCsrIn, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
    4405 typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
    4406 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R32I64,(uint32_t uMxCsrIn, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
    4407 typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
    4408 
    4409 FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
    4410 FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
    4411 
    4412 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLAVXF3XMMI32,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc, const int32_t *pi32Src));
    4413 typedef FNIEMAIMPLAVXF3XMMI32 *PFNIEMAIMPLAVXF3XMMI32;
    4414 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLAVXF3XMMI64,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc, const int64_t *pi64Src));
    4415 typedef FNIEMAIMPLAVXF3XMMI64 *PFNIEMAIMPLAVXF3XMMI64;
    4416 
    4417 FNIEMAIMPLAVXF3XMMI32 iemAImpl_vcvtsi2ss_u128_i32, iemAImpl_vcvtsi2ss_u128_i32_fallback;
    4418 FNIEMAIMPLAVXF3XMMI64 iemAImpl_vcvtsi2ss_u128_i64, iemAImpl_vcvtsi2ss_u128_i64_fallback;
    4419 
    4420 
    4421 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R64I32,(uint32_t uMxCsrIn, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
    4422 typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
    4423 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R64I64,(uint32_t uMxCsrIn, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
    4424 typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
    4425 
    4426 FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
    4427 FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
    4428 
    4429 FNIEMAIMPLAVXF3XMMI32 iemAImpl_vcvtsi2sd_u128_i32, iemAImpl_vcvtsi2sd_u128_i32_fallback;
    4430 FNIEMAIMPLAVXF3XMMI64 iemAImpl_vcvtsi2sd_u128_i64, iemAImpl_vcvtsi2sd_u128_i64_fallback;
    4431 
    4432 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtps2pd_u128_u64,(uint32_t uMxCsrIn, PX86XMMREG puDst, const uint64_t *pu64Src)); /* Actually two single precision floating point values. */
    4433 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtps2pd_u128_u64_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst,  const uint64_t *pu64Src)); /* Actually two single precision floating point values. */
    4434 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtps2pd_u256_u128,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCX86XMMREG puSrc));
    4435 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtps2pd_u256_u128_fallback,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCX86XMMREG puSrc));
    4436 
    4437 
    4438 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtdq2pd_u128_u64,(uint32_t uMxCsrIn, PX86XMMREG puDst, const uint64_t *pu64Src)); /* Actually two single precision floating point values. */
    4439 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtdq2pd_u128_u64_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst,  const uint64_t *pu64Src)); /* Actually two single precision floating point values. */
    4440 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtdq2pd_u256_u128,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCX86XMMREG puSrc));
    4441 IEM_DECL_IMPL_DEF(uint32_t, iemAImpl_vcvtdq2pd_u256_u128_fallback,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCX86XMMREG puSrc));
    4442 
    4443 
    4444 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLF2EFLMXCSRR32R32,(uint32_t uMxCsrIn, uint32_t *pfEFlags, RTFLOAT32U uSrc1, RTFLOAT32U uSrc2));
    4445 typedef FNIEMAIMPLF2EFLMXCSRR32R32 *PFNIEMAIMPLF2EFLMXCSRR32R32;
    4446 
    4447 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLF2EFLMXCSRR64R64,(uint32_t uMxCsrIn, uint32_t *pfEFlags, RTFLOAT64U uSrc1, RTFLOAT64U uSrc2));
    4448 typedef FNIEMAIMPLF2EFLMXCSRR64R64 *PFNIEMAIMPLF2EFLMXCSRR64R64;
    4449 
    4450 FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_ucomiss_u128;
    4451 FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
    4452 
    4453 FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_ucomisd_u128;
    4454 FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
    4455 
    4456 FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_comiss_u128;
    4457 FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
    4458 
    4459 FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_comisd_u128;
    4460 FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
    4461 
    4462 
    4463 typedef struct IEMMEDIAF2XMMSRC
    4464 {
    4465     X86XMMREG               uSrc1;
    4466     X86XMMREG               uSrc2;
    4467 } IEMMEDIAF2XMMSRC;
    4468 typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
    4469 typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
    4470 
    4471 
    4472 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF3XMMIMM8,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
    4473 typedef FNIEMAIMPLMEDIAF3XMMIMM8 *PFNIEMAIMPLMEDIAF3XMMIMM8;
    4474 
    4475 
    4476 typedef struct IEMMEDIAF2YMMSRC
    4477 {
    4478     X86YMMREG               uSrc1;
    4479     X86YMMREG               uSrc2;
    4480 } IEMMEDIAF2YMMSRC;
    4481 typedef IEMMEDIAF2YMMSRC *PIEMMEDIAF2YMMSRC;
    4482 typedef const IEMMEDIAF2YMMSRC *PCIEMMEDIAF2YMMSRC;
    4483 
    4484 
    4485 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF3YMMIMM8,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCIEMMEDIAF2YMMSRC puSrc, uint8_t bEvil));
    4486 typedef FNIEMAIMPLMEDIAF3YMMIMM8 *PFNIEMAIMPLMEDIAF3YMMIMM8;
    4487 
    4488 
    4489 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_cmpps_u128;
    4490 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_cmppd_u128;
    4491 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_cmpss_u128;
    4492 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_cmpsd_u128;
    4493 
    4494 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_vcmpps_u128, iemAImpl_vcmpps_u128_fallback;
    4495 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_vcmppd_u128, iemAImpl_vcmppd_u128_fallback;
    4496 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_vcmpss_u128, iemAImpl_vcmpss_u128_fallback;
    4497 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_vcmpsd_u128, iemAImpl_vcmpsd_u128_fallback;
    4498 
    4499 FNIEMAIMPLMEDIAF3YMMIMM8 iemAImpl_vcmpps_u256, iemAImpl_vcmpps_u256_fallback;
    4500 FNIEMAIMPLMEDIAF3YMMIMM8 iemAImpl_vcmppd_u256, iemAImpl_vcmppd_u256_fallback;
    4501 
    4502 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_roundss_u128;
    4503 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_roundsd_u128;
    4504 
    4505 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_dpps_u128,     iemAImpl_dpps_u128_fallback;
    4506 FNIEMAIMPLMEDIAF3XMMIMM8 iemAImpl_dppd_u128,     iemAImpl_dppd_u128_fallback;
    4507 
    4508 
    4509 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF2U128IMM8,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc, uint8_t bEvil));
    4510 typedef FNIEMAIMPLMEDIAF2U128IMM8 *PFNIEMAIMPLMEDIAF2U128IMM8;
    4511 
    4512 
    4513 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMEDIAF2U256IMM8,(uint32_t uMxCsrIn, PX86YMMREG puDst, PCX86YMMREG puSrc, uint8_t bEvil));
    4514 typedef FNIEMAIMPLMEDIAF2U256IMM8 *PFNIEMAIMPLMEDIAF2U256IMM8;
    4515 
    4516 
    4517 FNIEMAIMPLMEDIAF2U128IMM8 iemAImpl_roundps_u128,  iemAImpl_roundps_u128_fallback;
    4518 FNIEMAIMPLMEDIAF2U128IMM8 iemAImpl_roundpd_u128,  iemAImpl_roundpd_u128_fallback;
    4519 
    4520 FNIEMAIMPLMEDIAF2U128IMM8 iemAImpl_vroundps_u128, iemAImpl_vroundps_u128_fallback;
    4521 FNIEMAIMPLMEDIAF2U128IMM8 iemAImpl_vroundpd_u128, iemAImpl_vroundpd_u128_fallback;
    4522 
    4523 FNIEMAIMPLMEDIAF2U256IMM8 iemAImpl_vroundps_u256, iemAImpl_vroundps_u256_fallback;
    4524 FNIEMAIMPLMEDIAF2U256IMM8 iemAImpl_vroundpd_u256, iemAImpl_vroundpd_u256_fallback;
    4525 
    4526 FNIEMAIMPLMEDIAF3XMMIMM8  iemAImpl_vroundss_u128, iemAImpl_vroundss_u128_fallback;
    4527 FNIEMAIMPLMEDIAF3XMMIMM8  iemAImpl_vroundsd_u128, iemAImpl_vroundsd_u128_fallback;
    4528 
    4529 FNIEMAIMPLMEDIAF3XMMIMM8  iemAImpl_vdpps_u128,     iemAImpl_vdpps_u128_fallback;
    4530 FNIEMAIMPLMEDIAF3XMMIMM8  iemAImpl_vdppd_u128,     iemAImpl_vdppd_u128_fallback;
    4531 
    4532 FNIEMAIMPLMEDIAF3YMMIMM8  iemAImpl_vdpps_u256,     iemAImpl_vdpps_u256_fallback;
    4533 
    4534 
    4535 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU64U128,(uint32_t fMxCsrIn, uint64_t *pu64Dst, PCX86XMMREG pSrc));
    4536 typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
    4537 
    4538 FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
    4539 FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
    4540 
    4541 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU128U64,(uint32_t fMxCsrIn, PX86XMMREG pDst, uint64_t u64Src));
    4542 typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
    4543 
    4544 FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
    4545 FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
    4546 
    4547 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU64U64,(uint32_t fMxCsrIn, uint64_t *pu64Dst, uint64_t u64Src));
    4548 typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
    4549 
    4550 FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
    4551 FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
    4552 
    4553 /** @} */
    4554 
    4555 
    4556 /** @name Function tables.
    4557  * @{
    4558  */
    4559 
    4560 /**
    4561  * Function table for a binary operator providing implementation based on
    4562  * operand size.
    4563  */
    4564 typedef struct IEMOPBINSIZES
    4565 {
    4566     PFNIEMAIMPLBINU8  pfnNormalU8,    pfnLockedU8;
    4567     PFNIEMAIMPLBINU16 pfnNormalU16,   pfnLockedU16;
    4568     PFNIEMAIMPLBINU32 pfnNormalU32,   pfnLockedU32;
    4569     PFNIEMAIMPLBINU64 pfnNormalU64,   pfnLockedU64;
    4570 } IEMOPBINSIZES;
    4571 /** Pointer to a binary operator function table. */
    4572 typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
    4573 
    4574 
    4575 /**
    4576  * Function table for a unary operator providing implementation based on
    4577  * operand size.
    4578  */
    4579 typedef struct IEMOPUNARYSIZES
    4580 {
    4581     PFNIEMAIMPLUNARYU8  pfnNormalU8,    pfnLockedU8;
    4582     PFNIEMAIMPLUNARYU16 pfnNormalU16,   pfnLockedU16;
    4583     PFNIEMAIMPLUNARYU32 pfnNormalU32,   pfnLockedU32;
    4584     PFNIEMAIMPLUNARYU64 pfnNormalU64,   pfnLockedU64;
    4585 } IEMOPUNARYSIZES;
    4586 /** Pointer to a unary operator function table. */
    4587 typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
    4588 
    4589 
    4590 /**
    4591  * Function table for a shift operator providing implementation based on
    4592  * operand size.
    4593  */
    4594 typedef struct IEMOPSHIFTSIZES
    4595 {
    4596     PFNIEMAIMPLSHIFTU8  pfnNormalU8;
    4597     PFNIEMAIMPLSHIFTU16 pfnNormalU16;
    4598     PFNIEMAIMPLSHIFTU32 pfnNormalU32;
    4599     PFNIEMAIMPLSHIFTU64 pfnNormalU64;
    4600 } IEMOPSHIFTSIZES;
    4601 /** Pointer to a shift operator function table. */
    4602 typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
    4603 
    4604 
    4605 /**
    4606  * Function table for a multiplication or division operation.
    4607  */
    4608 typedef struct IEMOPMULDIVSIZES
    4609 {
    4610     PFNIEMAIMPLMULDIVU8  pfnU8;
    4611     PFNIEMAIMPLMULDIVU16 pfnU16;
    4612     PFNIEMAIMPLMULDIVU32 pfnU32;
    4613     PFNIEMAIMPLMULDIVU64 pfnU64;
    4614 } IEMOPMULDIVSIZES;
    4615 /** Pointer to a multiplication or division operation function table. */
    4616 typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
    4617 
    4618 
    4619 /**
    4620  * Function table for a double precision shift operator providing implementation
    4621  * based on operand size.
    4622  */
    4623 typedef struct IEMOPSHIFTDBLSIZES
    4624 {
    4625     PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
    4626     PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
    4627     PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
    4628 } IEMOPSHIFTDBLSIZES;
    4629 /** Pointer to a double precision shift function table. */
    4630 typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
    4631 
    4632 
    4633 /**
    4634  * Function table for media instruction taking two full sized media source
    4635  * registers and one full sized destination register (AVX).
    4636  */
    4637 typedef struct IEMOPMEDIAF3
    4638 {
    4639     PFNIEMAIMPLMEDIAF3U128 pfnU128;
    4640     PFNIEMAIMPLMEDIAF3U256 pfnU256;
    4641 } IEMOPMEDIAF3;
    4642 /** Pointer to a media operation function table for 3 full sized ops (AVX). */
    4643 typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
    4644 
    4645 /** @def IEMOPMEDIAF3_INIT_VARS_EX
    4646  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4647  * given functions as initializers.  For use in AVX functions where a pair of
    4648  * functions are only used once and the function table need not be public. */
    4649 #ifndef TST_IEM_CHECK_MC
    4650 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4651 #  define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4652     static IEMOPMEDIAF3 const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4653     static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4654 # else
    4655 #  define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4656     static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4657 # endif
    4658 #else
    4659 # define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4660 #endif
    4661 /** @def IEMOPMEDIAF3_INIT_VARS
    4662  * Generate AVX function tables for the @a a_InstrNm instruction.
    4663  * @sa IEMOPMEDIAF3_INIT_VARS_EX */
    4664 #define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
    4665     IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
    4666                               RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
    4667 
    4668 
    4669 /**
    4670  * Function table for media instruction taking one full sized media source
    4671  * registers and one full sized destination register (AVX).
    4672  */
    4673 typedef struct IEMOPMEDIAF2
    4674 {
    4675     PFNIEMAIMPLMEDIAF2U128 pfnU128;
    4676     PFNIEMAIMPLMEDIAF2U256 pfnU256;
    4677 } IEMOPMEDIAF2;
    4678 /** Pointer to a media operation function table for 2 full sized ops (AVX). */
    4679 typedef IEMOPMEDIAF2 const *PCIEMOPMEDIAF2;
    4680 
    4681 /** @def IEMOPMEDIAF2_INIT_VARS_EX
    4682  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4683  * given functions as initializers.  For use in AVX functions where a pair of
    4684  * functions are only used once and the function table need not be public. */
    4685 #ifndef TST_IEM_CHECK_MC
    4686 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4687 #  define IEMOPMEDIAF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4688     static IEMOPMEDIAF2 const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4689     static IEMOPMEDIAF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4690 # else
    4691 #  define IEMOPMEDIAF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4692     static IEMOPMEDIAF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4693 # endif
    4694 #else
    4695 # define IEMOPMEDIAF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4696 #endif
    4697 /** @def IEMOPMEDIAF2_INIT_VARS
    4698  * Generate AVX function tables for the @a a_InstrNm instruction.
    4699  * @sa IEMOPMEDIAF2_INIT_VARS_EX */
    4700 #define IEMOPMEDIAF2_INIT_VARS(a_InstrNm) \
    4701     IEMOPMEDIAF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
    4702                               RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
    4703 
    4704 
    4705 /**
    4706  * Function table for media instruction taking two full sized media source
    4707  * registers and one full sized destination register, but no additional state
    4708  * (AVX).
    4709  */
    4710 typedef struct IEMOPMEDIAOPTF3
    4711 {
    4712     PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
    4713     PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
    4714 } IEMOPMEDIAOPTF3;
    4715 /** Pointer to a media operation function table for 3 full sized ops (AVX). */
    4716 typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
    4717 
    4718 /** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
    4719  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4720  * given functions as initializers.  For use in AVX functions where a pair of
    4721  * functions are only used once and the function table need not be public. */
    4722 #ifndef TST_IEM_CHECK_MC
    4723 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4724 #  define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4725     static IEMOPMEDIAOPTF3 const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4726     static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4727 # else
    4728 #  define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4729     static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4730 # endif
    4731 #else
    4732 # define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4733 #endif
    4734 /** @def IEMOPMEDIAOPTF3_INIT_VARS
    4735  * Generate AVX function tables for the @a a_InstrNm instruction.
    4736  * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
    4737 #define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
    4738     IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
    4739                                  RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
    4740 
    4741 /**
    4742  * Function table for media instruction taking one full sized media source
    4743  * registers and one full sized destination register, but no additional state
    4744  * (AVX).
    4745  */
    4746 typedef struct IEMOPMEDIAOPTF2
    4747 {
    4748     PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
    4749     PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
    4750 } IEMOPMEDIAOPTF2;
    4751 /** Pointer to a media operation function table for 2 full sized ops (AVX). */
    4752 typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
    4753 
    4754 /** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
    4755  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4756  * given functions as initializers.  For use in AVX functions where a pair of
    4757  * functions are only used once and the function table need not be public. */
    4758 #ifndef TST_IEM_CHECK_MC
    4759 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4760 #  define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4761     static IEMOPMEDIAOPTF2 const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4762     static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4763 # else
    4764 #  define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4765     static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4766 # endif
    4767 #else
    4768 # define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4769 #endif
    4770 /** @def IEMOPMEDIAOPTF2_INIT_VARS
    4771  * Generate AVX function tables for the @a a_InstrNm instruction.
    4772  * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
    4773 #define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
    4774     IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
    4775                                  RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
    4776 
    4777 
    4778 /**
    4779  * Function table for media instruction taking one full sized media source
    4780  * register and one full sized destination register and an 8-bit immediate (AVX).
    4781  */
    4782 typedef struct IEMOPMEDIAF2IMM8
    4783 {
    4784     PFNIEMAIMPLMEDIAF2U128IMM8 pfnU128;
    4785     PFNIEMAIMPLMEDIAF2U256IMM8 pfnU256;
    4786 } IEMOPMEDIAF2IMM8;
    4787 /** Pointer to a media operation function table for 2 full sized ops (AVX). */
    4788 typedef IEMOPMEDIAF2IMM8 const *PCIEMOPMEDIAF2IMM8;
    4789 
    4790 /** @def IEMOPMEDIAF2IMM8_INIT_VARS_EX
    4791  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4792  * given functions as initializers.  For use in AVX functions where a pair of
    4793  * functions are only used once and the function table need not be public. */
    4794 #ifndef TST_IEM_CHECK_MC
    4795 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4796 #  define IEMOPMEDIAF2IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4797     static IEMOPMEDIAF2IMM8 const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4798     static IEMOPMEDIAF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4799 # else
    4800 #  define IEMOPMEDIAF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4801     static IEMOPMEDIAF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4802 # endif
    4803 #else
    4804 # define IEMOPMEDIAF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4805 #endif
    4806 /** @def IEMOPMEDIAF2IMM8_INIT_VARS
    4807  * Generate AVX function tables for the @a a_InstrNm instruction.
    4808  * @sa IEMOPMEDIAF2IMM8_INIT_VARS_EX */
    4809 #define IEMOPMEDIAF2IMM8_INIT_VARS(a_InstrNm) \
    4810     IEMOPMEDIAF2IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
    4811                                   RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
    4812 
    4813 
    4814 /**
    4815  * Function table for media instruction taking one full sized media source
    4816  * register and one full sized destination register and an 8-bit immediate, but no additional state
    4817  * (AVX).
    4818  */
    4819 typedef struct IEMOPMEDIAOPTF2IMM8
    4820 {
    4821     PFNIEMAIMPLMEDIAOPTF2U128IMM8 pfnU128;
    4822     PFNIEMAIMPLMEDIAOPTF2U256IMM8 pfnU256;
    4823 } IEMOPMEDIAOPTF2IMM8;
    4824 /** Pointer to a media operation function table for 2 full sized ops (AVX). */
    4825 typedef IEMOPMEDIAOPTF2IMM8 const *PCIEMOPMEDIAOPTF2IMM8;
    4826 
    4827 /** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX
    4828  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4829  * given functions as initializers.  For use in AVX functions where a pair of
    4830  * functions are only used once and the function table need not be public. */
    4831 #ifndef TST_IEM_CHECK_MC
    4832 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4833 #  define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4834     static IEMOPMEDIAOPTF2IMM8 const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4835     static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4836 # else
    4837 #  define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4838     static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4839 # endif
    4840 #else
    4841 # define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4842 #endif
    4843 /** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS
    4844  * Generate AVX function tables for the @a a_InstrNm instruction.
    4845  * @sa IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX */
    4846 #define IEMOPMEDIAOPTF2IMM8_INIT_VARS(a_InstrNm) \
    4847     IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256),\
    4848                                      RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256_fallback))
    4849 
    4850 /**
    4851  * Function table for media instruction taking two full sized media source
    4852  * registers and one full sized destination register and an 8-bit immediate, but no additional state
    4853  * (AVX).
    4854  */
    4855 typedef struct IEMOPMEDIAOPTF3IMM8
    4856 {
    4857     PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
    4858     PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
    4859 } IEMOPMEDIAOPTF3IMM8;
    4860 /** Pointer to a media operation function table for 3 full sized ops (AVX). */
    4861 typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
    4862 
    4863 /** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
    4864  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4865  * given functions as initializers.  For use in AVX functions where a pair of
    4866  * functions are only used once and the function table need not be public. */
    4867 #ifndef TST_IEM_CHECK_MC
    4868 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4869 #  define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4870     static IEMOPMEDIAOPTF3IMM8 const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4871     static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4872 # else
    4873 #  define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4874     static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4875 # endif
    4876 #else
    4877 # define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4878 #endif
    4879 /** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
    4880  * Generate AVX function tables for the @a a_InstrNm instruction.
    4881  * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
    4882 #define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
    4883     IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
    4884                                      RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
    4885 /** @} */
    4886 
    4887 
    4888 /**
    4889  * Function table for blend type instruction taking three full sized media source
    4890  * registers and one full sized destination register, but no additional state
    4891  * (AVX).
    4892  */
    4893 typedef struct IEMOPBLENDOP
    4894 {
    4895     PFNIEMAIMPLAVXBLENDU128 pfnU128;
    4896     PFNIEMAIMPLAVXBLENDU256 pfnU256;
    4897 } IEMOPBLENDOP;
    4898 /** Pointer to a media operation function table for 4 full sized ops (AVX). */
    4899 typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
    4900 
    4901 /** @def IEMOPBLENDOP_INIT_VARS_EX
    4902  * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
    4903  * given functions as initializers.  For use in AVX functions where a pair of
    4904  * functions are only used once and the function table need not be public. */
    4905 #ifndef TST_IEM_CHECK_MC
    4906 # if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
    4907 #  define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4908     static IEMOPBLENDOP const s_Host     = { a_pfnHostU128,     a_pfnHostU256 }; \
    4909     static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4910 # else
    4911 #  define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
    4912     static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
    4913 # endif
    4914 #else
    4915 # define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
    4916 #endif
    4917 /** @def IEMOPBLENDOP_INIT_VARS
    4918  * Generate AVX function tables for the @a a_InstrNm instruction.
    4919  * @sa IEMOPBLENDOP_INIT_VARS_EX */
    4920 #define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
    4921     IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128),           RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
    4922                               RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback),  RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
    4923 
    4924 
    4925 /** @name SSE/AVX single/double precision floating point operations.
    4926  * @{ */
    4927 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
    4928 typedef FNIEMAIMPLFPSSEF2U128  *PFNIEMAIMPLFPSSEF2U128;
    4929 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128R32,(uint32_t uMxCsrIn, PX86XMMREG Result, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
    4930 typedef FNIEMAIMPLFPSSEF2U128R32  *PFNIEMAIMPLFPSSEF2U128R32;
    4931 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128R64,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
    4932 typedef FNIEMAIMPLFPSSEF2U128R64  *PFNIEMAIMPLFPSSEF2U128R64;
    4933 
    4934 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
    4935 typedef FNIEMAIMPLFPAVXF3U128  *PFNIEMAIMPLFPAVXF3U128;
    4936 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128R32,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
    4937 typedef FNIEMAIMPLFPAVXF3U128R32  *PFNIEMAIMPLFPAVXF3U128R32;
    4938 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128R64,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
    4939 typedef FNIEMAIMPLFPAVXF3U128R64  *PFNIEMAIMPLFPAVXF3U128R64;
    4940 
    4941 typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U256,(uint32_t uMxCsrIn, PX86YMMREG pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
    4942 typedef FNIEMAIMPLFPAVXF3U256  *PFNIEMAIMPLFPAVXF3U256;
    4943 
    4944 FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
    4945 FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
    4946 FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
    4947 FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
    4948 FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
    4949 FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
    4950 FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
    4951 FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
    4952 FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
    4953 FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
    4954 FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
    4955 FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
    4956 FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
    4957 FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
    4958 FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
    4959 FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
    4960 FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
    4961 FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
    4962 FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
    4963 FNIEMAIMPLFPSSEF2U128 iemAImpl_rcpps_u128;
    4964 FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
    4965 FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
    4966 
    4967 FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
    4968 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_cvtps2pd_u128,(uint32_t uMxCsrIn, PX86XMMREG pResult, uint64_t const *pu64Src));
    4969 
    4970 FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
    4971 FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
    4972 FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
    4973 FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
    4974 FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
    4975 FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
    4976 
    4977 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
    4978 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
    4979 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
    4980 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
    4981 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
    4982 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
    4983 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
    4984 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
    4985 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
    4986 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
    4987 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
    4988 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
    4989 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
    4990 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
    4991 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
    4992 FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
    4993 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
    4994 FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rcpss_u128_r32;
    4995 
    4996 FNIEMAIMPLMEDIAF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
    4997 FNIEMAIMPLMEDIAF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
    4998 FNIEMAIMPLMEDIAF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
    4999 FNIEMAIMPLMEDIAF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
    5000 FNIEMAIMPLMEDIAF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
    5001 FNIEMAIMPLMEDIAF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
    5002 FNIEMAIMPLMEDIAF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
    5003 FNIEMAIMPLMEDIAF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
    5004 FNIEMAIMPLMEDIAF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
    5005 FNIEMAIMPLMEDIAF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
    5006 FNIEMAIMPLMEDIAF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
    5007 FNIEMAIMPLMEDIAF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
    5008 FNIEMAIMPLMEDIAF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
    5009 FNIEMAIMPLMEDIAF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
    5010 FNIEMAIMPLMEDIAF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
    5011 FNIEMAIMPLMEDIAF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
    5012 FNIEMAIMPLMEDIAF2U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
    5013 FNIEMAIMPLMEDIAF2U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
    5014 FNIEMAIMPLMEDIAF2U128 iemAImpl_vrsqrtps_u128,  iemAImpl_vrsqrtps_u128_fallback;
    5015 FNIEMAIMPLMEDIAF2U128 iemAImpl_vrcpps_u128,    iemAImpl_vrcpps_u128_fallback;
    5016 FNIEMAIMPLMEDIAF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
    5017 FNIEMAIMPLMEDIAF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
    5018 FNIEMAIMPLMEDIAF2U128 iemAImpl_vcvtdq2ps_u128, iemAImpl_vcvtdq2ps_u128_fallback;
    5019 FNIEMAIMPLMEDIAF2U128 iemAImpl_vcvtps2dq_u128, iemAImpl_vcvtps2dq_u128_fallback;
    5020 FNIEMAIMPLMEDIAF2U128 iemAImpl_vcvttps2dq_u128, iemAImpl_vcvttps2dq_u128_fallback;
    5021 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2ps_u128_u128,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc));
    5022 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2ps_u128_u128_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc));
    5023 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvttpd2dq_u128_u128,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc));
    5024 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvttpd2dq_u128_u128_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc));
    5025 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2dq_u128_u128,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc));
    5026 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2dq_u128_u128_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86XMMREG puSrc));
    5027 
    5028 
    5029 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
    5030 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
    5031 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
    5032 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
    5033 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
    5034 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
    5035 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
    5036 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
    5037 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
    5038 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
    5039 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
    5040 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
    5041 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
    5042 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
    5043 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vrsqrtss_u128_r32, iemAImpl_vrsqrtss_u128_r32_fallback;
    5044 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vrcpss_u128_r32,   iemAImpl_vrcpss_u128_r32_fallback;
    5045 FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vcvtss2sd_u128_r32, iemAImpl_vcvtss2sd_u128_r32_fallback;
    5046 FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vcvtsd2ss_u128_r64, iemAImpl_vcvtsd2ss_u128_r64_fallback;
    5047 
    5048 
    5049 FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
    5050 FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
    5051 FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
    5052 FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
    5053 FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
    5054 FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
    5055 FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
    5056 FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
    5057 FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
    5058 FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
    5059 FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
    5060 FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
    5061 FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
    5062 FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
    5063 FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
    5064 FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
    5065 FNIEMAIMPLMEDIAF3U256 iemAImpl_vaddsubps_u256, iemAImpl_vaddsubps_u256_fallback;
    5066 FNIEMAIMPLMEDIAF3U256 iemAImpl_vaddsubpd_u256, iemAImpl_vaddsubpd_u256_fallback;
    5067 FNIEMAIMPLMEDIAF2U256 iemAImpl_vsqrtps_u256, iemAImpl_vsqrtps_u256_fallback;
    5068 FNIEMAIMPLMEDIAF2U256 iemAImpl_vsqrtpd_u256, iemAImpl_vsqrtpd_u256_fallback;
    5069 FNIEMAIMPLMEDIAF2U256 iemAImpl_vrsqrtps_u256,  iemAImpl_vrsqrtps_u256_fallback;
    5070 FNIEMAIMPLMEDIAF2U256 iemAImpl_vrcpps_u256,    iemAImpl_vrcpps_u256_fallback;
    5071 FNIEMAIMPLMEDIAF2U256 iemAImpl_vcvtdq2ps_u256,  iemAImpl_vcvtdq2ps_u256_fallback;
    5072 FNIEMAIMPLMEDIAF2U256 iemAImpl_vcvtps2dq_u256,  iemAImpl_vcvtps2dq_u256_fallback;
    5073 FNIEMAIMPLMEDIAF2U256 iemAImpl_vcvttps2dq_u256, iemAImpl_vcvttps2dq_u256_fallback;
    5074 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2ps_u128_u256,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86YMMREG puSrc));
    5075 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2ps_u128_u256_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86YMMREG puSrc));
    5076 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvttpd2dq_u128_u256,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86YMMREG puSrc));
    5077 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvttpd2dq_u128_u256_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86YMMREG puSrc));
    5078 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2dq_u128_u256,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86YMMREG puSrc));
    5079 IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_vcvtpd2dq_u128_u256_fallback,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCX86YMMREG puSrc));
    5080 /** @} */
    50812713
    50822714/** @name C instruction implementations for anything slightly complicated.
     
    55173149#endif
    55183150
    5519 /**
    5520  * Gets the CPU mode (from fExec) as a IEMMODE value.
    5521  *
    5522  * @returns IEMMODE
    5523  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5524  */
    5525 #define IEM_GET_CPU_MODE(a_pVCpu)           ((a_pVCpu)->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK)
    5526 
    5527 /**
    5528  * Check if we're currently executing in real or virtual 8086 mode.
    5529  *
    5530  * @returns @c true if it is, @c false if not.
    5531  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5532  */
    5533 #define IEM_IS_REAL_OR_V86_MODE(a_pVCpu)    ((  ((a_pVCpu)->iem.s.fExec  ^ IEM_F_MODE_X86_PROT_MASK) \
    5534                                               & (IEM_F_MODE_X86_V86_MASK | IEM_F_MODE_X86_PROT_MASK)) != 0)
    5535 
    5536 /**
    5537  * Check if we're currently executing in virtual 8086 mode.
    5538  *
    5539  * @returns @c true if it is, @c false if not.
    5540  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5541  */
    5542 #define IEM_IS_V86_MODE(a_pVCpu)            (((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_V86_MASK) != 0)
    5543 
    5544 /**
    5545  * Check if we're currently executing in long mode.
    5546  *
    5547  * @returns @c true if it is, @c false if not.
    5548  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5549  */
    5550 #define IEM_IS_LONG_MODE(a_pVCpu)           (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
    5551 
    5552 /**
    5553  * Check if we're currently executing in a 16-bit code segment.
    5554  *
    5555  * @returns @c true if it is, @c false if not.
    5556  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5557  */
    5558 #define IEM_IS_16BIT_CODE(a_pVCpu)          (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_16BIT)
    5559 
    5560 /**
    5561  * Check if we're currently executing in a 32-bit code segment.
    5562  *
    5563  * @returns @c true if it is, @c false if not.
    5564  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5565  */
    5566 #define IEM_IS_32BIT_CODE(a_pVCpu)          (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_32BIT)
    5567 
    5568 /**
    5569  * Check if we're currently executing in a 64-bit code segment.
    5570  *
    5571  * @returns @c true if it is, @c false if not.
    5572  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5573  */
    5574 #define IEM_IS_64BIT_CODE(a_pVCpu)          (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_64BIT)
    5575 
    5576 /**
    5577  * Check if we're currently executing in real mode.
    5578  *
    5579  * @returns @c true if it is, @c false if not.
    5580  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5581  */
    5582 #define IEM_IS_REAL_MODE(a_pVCpu)           (!((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_PROT_MASK))
    5583 
    5584 /**
    5585  * Gets the current protection level (CPL).
    5586  *
    5587  * @returns 0..3
    5588  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5589  */
    5590 #define IEM_GET_CPL(a_pVCpu)                (((a_pVCpu)->iem.s.fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK)
    5591 
    5592 /**
    5593  * Sets the current protection level (CPL).
    5594  *
    5595  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5596  */
    5597 #define IEM_SET_CPL(a_pVCpu, a_uCpl) \
    5598     do { (a_pVCpu)->iem.s.fExec = ((a_pVCpu)->iem.s.fExec & ~IEM_F_X86_CPL_MASK) | ((a_uCpl) << IEM_F_X86_CPL_SHIFT); } while (0)
    5599 
    5600 /**
    5601  * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
    5602  * @returns PCCPUMFEATURES
    5603  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5604  */
    5605 #define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
    5606 
    5607 /**
    5608  * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
    5609  * @returns PCCPUMFEATURES
    5610  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5611  */
    5612 #define IEM_GET_HOST_CPU_FEATURES(a_pVCpu)  (&g_CpumHostFeatures.s)
    5613 
    5614 /**
    5615  * Evaluates to true if we're presenting an Intel CPU to the guest.
    5616  */
    5617 #define IEM_IS_GUEST_CPU_INTEL(a_pVCpu)     ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
    5618 
    5619 /**
    5620  * Evaluates to true if we're presenting an AMD CPU to the guest.
    5621  */
    5622 #define IEM_IS_GUEST_CPU_AMD(a_pVCpu)       ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
    5623 
    5624 /**
    5625  * Check if the address is canonical.
    5626  */
    5627 #define IEM_IS_CANONICAL(a_u64Addr)         X86_IS_CANONICAL(a_u64Addr)
    5628 
    5629 /** Checks if the ModR/M byte is in register mode or not.  */
    5630 #define IEM_IS_MODRM_REG_MODE(a_bRm)        ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
    5631 /** Checks if the ModR/M byte is in memory mode or not.  */
    5632 #define IEM_IS_MODRM_MEM_MODE(a_bRm)        ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
    5633 
    5634 /**
    5635  * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
    5636  *
    5637  * For use during decoding.
    5638  */
    5639 #define IEM_GET_MODRM_REG(a_pVCpu, a_bRm)   ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
    5640 /**
    5641  * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
    5642  *
    5643  * For use during decoding.
    5644  */
    5645 #define IEM_GET_MODRM_RM(a_pVCpu, a_bRm)    ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
    5646 
    5647 /**
    5648  * Gets the register (reg) part of a ModR/M encoding, without REX.R.
    5649  *
    5650  * For use during decoding.
    5651  */
    5652 #define IEM_GET_MODRM_REG_8(a_bRm)          ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
    5653 /**
    5654  * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
    5655  *
    5656  * For use during decoding.
    5657  */
    5658 #define IEM_GET_MODRM_RM_8(a_bRm)           ( ((a_bRm) & X86_MODRM_RM_MASK) )
    5659 
    5660 /**
    5661  * Gets the register (reg) part of a ModR/M encoding as an extended 8-bit
    5662  * register index, with REX.R added in.
    5663  *
    5664  * For use during decoding.
    5665  *
    5666  * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
    5667  */
    5668 #define IEM_GET_MODRM_REG_EX8(a_pVCpu, a_bRm) \
    5669     (   (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
    5670      || !((a_bRm) & (4 << X86_MODRM_REG_SHIFT)) /* IEM_GET_MODRM_REG(pVCpu, a_bRm) < 4 */ \
    5671      ? IEM_GET_MODRM_REG(pVCpu, a_bRm) : (((a_bRm) >> X86_MODRM_REG_SHIFT) & 3) | 16)
    5672 /**
    5673  * Gets the r/m part of a ModR/M encoding as an extended 8-bit register index,
    5674  * with REX.B added in.
    5675  *
    5676  * For use during decoding.
    5677  *
    5678  * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
    5679  */
    5680 #define IEM_GET_MODRM_RM_EX8(a_pVCpu, a_bRm) \
    5681     (   (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
    5682      || !((a_bRm) & 4) /* IEM_GET_MODRM_RM(pVCpu, a_bRm) < 4 */ \
    5683      ? IEM_GET_MODRM_RM(pVCpu, a_bRm) : ((a_bRm) & 3) | 16)
    5684 
    5685 /**
    5686  * Combines the prefix REX and ModR/M byte for passing to
    5687  * iemOpHlpCalcRmEffAddrThreadedAddr64().
    5688  *
    5689  * @returns The ModRM byte but with bit 3 set to REX.B and bit 4 to REX.X.
    5690  *          The two bits are part of the REG sub-field, which isn't needed in
    5691  *          iemOpHlpCalcRmEffAddrThreadedAddr64().
    5692  *
    5693  * For use during decoding/recompiling.
    5694  */
    5695 #define IEM_GET_MODRM_EX(a_pVCpu, a_bRm) \
    5696     (  ((a_bRm) & ~X86_MODRM_REG_MASK) \
    5697      | (uint8_t)( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X)) >> (25 - 3) ) )
    5698 AssertCompile(IEM_OP_PRF_REX_B == RT_BIT_32(25));
    5699 AssertCompile(IEM_OP_PRF_REX_X == RT_BIT_32(26));
    5700 
    5701 /**
    5702  * Gets the effective VEX.VVVV value.
    5703  *
    5704  * The 4th bit is ignored if not 64-bit code.
    5705  * @returns effective V-register value.
    5706  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    5707  */
    5708 #define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
    5709     (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
    5710 
    5711 
    5712 /**
    5713  * Gets the register (reg) part of a the special 4th register byte used by
    5714  * vblendvps and vblendvpd.
    5715  *
    5716  * For use during decoding.
    5717  */
    5718 #define IEM_GET_IMM8_REG(a_pVCpu, a_bRegImm8) \
    5719     (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_bRegImm8) >> 4 : ((a_bRegImm8) >> 4) & 7)
    5720 
    5721 
    5722 /**
    5723  * Checks if we're executing inside an AMD-V or VT-x guest.
    5724  */
    5725 #if defined(VBOX_WITH_NESTED_HWVIRT_VMX) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
    5726 # define IEM_IS_IN_GUEST(a_pVCpu)       RT_BOOL((a_pVCpu)->iem.s.fExec & IEM_F_X86_CTX_IN_GUEST)
    5727 #else
    5728 # define IEM_IS_IN_GUEST(a_pVCpu)       false
    5729 #endif
    5730 
    5731 
    5732 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    5733 
    5734 /**
    5735  * Check if the guest has entered VMX root operation.
    5736  */
    5737 # define IEM_VMX_IS_ROOT_MODE(a_pVCpu)      (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
    5738 
    5739 /**
    5740  * Check if the guest has entered VMX non-root operation.
    5741  */
    5742 # define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu)  (   ((a_pVCpu)->iem.s.fExec & (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST)) \
    5743                                              ==                           (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST) )
    5744 
    5745 /**
    5746  * Check if the nested-guest has the given Pin-based VM-execution control set.
    5747  */
    5748 # define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl)  (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
    5749 
    5750 /**
    5751  * Check if the nested-guest has the given Processor-based VM-execution control set.
    5752  */
    5753 # define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
    5754 
    5755 /**
    5756  * Check if the nested-guest has the given Secondary Processor-based VM-execution
    5757  * control set.
    5758  */
    5759 # define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
    5760 
    5761 /** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
    5762 # define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu)           ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
    5763 
    5764 /** Whether a shadow VMCS is present for the given VCPU. */
    5765 # define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu)           RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
    5766 
    5767 /** Gets the VMXON region pointer. */
    5768 # define IEM_VMX_GET_VMXON_PTR(a_pVCpu)             ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
    5769 
    5770 /** Gets the guest-physical address of the current VMCS for the given VCPU. */
    5771 # define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu)          ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
    5772 
    5773 /** Whether a current VMCS is present for the given VCPU. */
    5774 # define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu)          RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
    5775 
    5776 /** Assigns the guest-physical address of the current VMCS for the given VCPU. */
    5777 # define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
    5778     do \
    5779     { \
    5780         Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
    5781         (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
    5782     } while (0)
    5783 
    5784 /** Clears any current VMCS for the given VCPU. */
    5785 # define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
    5786     do \
    5787     { \
    5788         (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
    5789     } while (0)
    5790 
    5791 /**
    5792  * Invokes the VMX VM-exit handler for an instruction intercept.
    5793  */
    5794 # define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
    5795     do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
    5796 
    5797 /**
    5798  * Invokes the VMX VM-exit handler for an instruction intercept where the
    5799  * instruction provides additional VM-exit information.
    5800  */
    5801 # define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
    5802     do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
    5803 
    5804 /**
    5805  * Invokes the VMX VM-exit handler for a task switch.
    5806  */
    5807 # define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
    5808     do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
    5809 
    5810 /**
    5811  * Invokes the VMX VM-exit handler for MWAIT.
    5812  */
    5813 # define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
    5814     do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
    5815 
    5816 /**
    5817  * Invokes the VMX VM-exit handler for EPT faults.
    5818  */
    5819 # define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
    5820     do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
    5821 
    5822 /**
    5823  * Invokes the VMX VM-exit handler.
    5824  */
    5825 # define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
    5826     do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
    5827 
    5828 #else
    5829 # define IEM_VMX_IS_ROOT_MODE(a_pVCpu)                                          (false)
    5830 # define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu)                                      (false)
    5831 # define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr)                             (false)
    5832 # define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr)                            (false)
    5833 # define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr)                           (false)
    5834 # define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr)            do { return VERR_VMX_IPE_1; } while (0)
    5835 # define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr)  do { return VERR_VMX_IPE_1; } while (0)
    5836 # define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr)    do { return VERR_VMX_IPE_1; } while (0)
    5837 # define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr)          do { return VERR_VMX_IPE_1; } while (0)
    5838 # define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr)       do { return VERR_VMX_IPE_1; } while (0)
    5839 # define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual)   do { return VERR_VMX_IPE_1; } while (0)
    5840 
    5841 #endif
    5842 
    5843 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    5844 /**
    5845  * Checks if we're executing a guest using AMD-V.
    5846  */
    5847 # define IEM_SVM_IS_IN_GUEST(a_pVCpu) (   (a_pVCpu->iem.s.fExec & (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST)) \
    5848                                        ==                         (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST))
    5849 /**
    5850  * Check if an SVM control/instruction intercept is set.
    5851  */
    5852 # define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
    5853     (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
    5854 
    5855 /**
    5856  * Check if an SVM read CRx intercept is set.
    5857  */
    5858 # define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
    5859     (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
    5860 
    5861 /**
    5862  * Check if an SVM write CRx intercept is set.
    5863  */
    5864 # define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
    5865     (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
    5866 
    5867 /**
    5868  * Check if an SVM read DRx intercept is set.
    5869  */
    5870 # define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
    5871     (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
    5872 
    5873 /**
    5874  * Check if an SVM write DRx intercept is set.
    5875  */
    5876 # define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
    5877     (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
    5878 
    5879 /**
    5880  * Check if an SVM exception intercept is set.
    5881  */
    5882 # define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
    5883     (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
    5884 
    5885 /**
    5886  * Invokes the SVM \#VMEXIT handler for the nested-guest.
    5887  */
    5888 # define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
    5889     do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
    5890 
    5891 /**
    5892  * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
    5893  * corresponding decode assist information.
    5894  */
    5895 # define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
    5896     do \
    5897     { \
    5898         uint64_t uExitInfo1; \
    5899         if (   IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
    5900             && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
    5901             uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
    5902         else \
    5903             uExitInfo1 = 0; \
    5904         IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
    5905     } while (0)
    5906 
    5907 /** Check and handles SVM nested-guest instruction intercept and updates
    5908  *  NRIP if needed.
    5909  */
    5910 # define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
    5911     do \
    5912     { \
    5913         if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
    5914         { \
    5915             IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
    5916             IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
    5917         } \
    5918     } while (0)
    5919 
    5920 /** Checks and handles SVM nested-guest CR0 read intercept. */
    5921 # define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
    5922     do \
    5923     { \
    5924         if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
    5925         { /* probably likely */ } \
    5926         else \
    5927         { \
    5928             IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
    5929             IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
    5930         } \
    5931     } while (0)
    5932 
    5933 /**
    5934  * Updates the NextRIP (NRI) field in the nested-guest VMCB.
    5935  */
    5936 # define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) \
    5937     do { \
    5938         if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
    5939             CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_cbInstr)); \
    5940     } while (0)
    5941 
    5942 #else
    5943 # define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)                                (false)
    5944 # define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                   (false)
    5945 # define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                  (false)
    5946 # define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                   (false)
    5947 # define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                  (false)
    5948 # define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)                                  (false)
    5949 # define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)               do { return VERR_SVM_IPE_1; } while (0)
    5950 # define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg)              do { return VERR_SVM_IPE_1; } while (0)
    5951 # define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, \
    5952                                        a_uExitInfo1, a_uExitInfo2, a_cbInstr)               do { } while (0)
    5953 # define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr)   do { } while (0)
    5954 # define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr)                                            do { } while (0)
    5955 
    5956 #endif
    59573151
    59583152/** @} */
     
    59613155VBOXSTRICTRC            iemExecInjectPendingTrap(PVMCPUCC pVCpu);
    59623156
    5963 
    5964 /**
    5965  * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
    5966  */
    5967 typedef union IEMSELDESC
    5968 {
    5969     /** The legacy view. */
    5970     X86DESC     Legacy;
    5971     /** The long mode view. */
    5972     X86DESC64   Long;
    5973 } IEMSELDESC;
    5974 /** Pointer to a selector descriptor table entry. */
    5975 typedef IEMSELDESC *PIEMSELDESC;
    5976 
    5977 /** @name  Raising Exceptions.
    5978  * @{ */
    5979 VBOXSTRICTRC            iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
    5980                                       uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
    5981 
    5982 VBOXSTRICTRC            iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
    5983                                           uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
    5984 #ifdef IEM_WITH_SETJMP
    5985 DECL_NO_RETURN(void)    iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
    5986                                              uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
    5987 #endif
    5988 VBOXSTRICTRC            iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
    5989 #ifdef IEM_WITH_SETJMP
    5990 DECL_NO_RETURN(void)    iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    5991 #endif
    5992 VBOXSTRICTRC            iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    5993 VBOXSTRICTRC            iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
    5994 VBOXSTRICTRC            iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
    5995 #ifdef IEM_WITH_SETJMP
    5996 DECL_NO_RETURN(void)    iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    5997 #endif
    5998 VBOXSTRICTRC            iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
    5999 #ifdef IEM_WITH_SETJMP
    6000 DECL_NO_RETURN(void)    iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6001 #endif
    6002 VBOXSTRICTRC            iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
    6003 VBOXSTRICTRC            iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6004 VBOXSTRICTRC            iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6005 VBOXSTRICTRC            iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
    6006 /*VBOXSTRICTRC            iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
    6007 VBOXSTRICTRC            iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
    6008 VBOXSTRICTRC            iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
    6009 VBOXSTRICTRC            iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
    6010 VBOXSTRICTRC            iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
    6011 VBOXSTRICTRC            iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
    6012 VBOXSTRICTRC            iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6013 #ifdef IEM_WITH_SETJMP
    6014 DECL_NO_RETURN(void)    iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6015 #endif
    6016 VBOXSTRICTRC            iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
    6017 VBOXSTRICTRC            iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6018 VBOXSTRICTRC            iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
    6019 #ifdef IEM_WITH_SETJMP
    6020 DECL_NO_RETURN(void)    iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
    6021 #endif
    6022 VBOXSTRICTRC            iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
    6023 #ifdef IEM_WITH_SETJMP
    6024 DECL_NO_RETURN(void)    iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
    6025 #endif
    6026 VBOXSTRICTRC            iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
    6027 #ifdef IEM_WITH_SETJMP
    6028 DECL_NO_RETURN(void)    iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
    6029 #endif
    6030 VBOXSTRICTRC            iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
    6031 #ifdef IEM_WITH_SETJMP
    6032 DECL_NO_RETURN(void)    iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
    6033 #endif
    6034 VBOXSTRICTRC            iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6035 #ifdef IEM_WITH_SETJMP
    6036 DECL_NO_RETURN(void)    iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6037 #endif
    6038 VBOXSTRICTRC            iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6039 #ifdef IEM_WITH_SETJMP
    6040 DECL_NO_RETURN(void)    iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6041 #endif
    6042 VBOXSTRICTRC            iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6043 #ifdef IEM_WITH_SETJMP
    6044 DECL_NO_RETURN(void)    iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6045 #endif
    6046 
    6047 void                    iemLogSyscallRealModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
    6048 void                    iemLogSyscallProtModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
    6049 
    6050 IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
    6051 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
    6052 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
    6053 
    6054 /**
    6055  * Macro for calling iemCImplRaiseDivideError().
    6056  *
    6057  * This is for things that will _always_ decode to an \#DE, taking the
    6058  * recompiler into consideration and everything.
    6059  *
    6060  * @return  Strict VBox status code.
    6061  */
    6062 #define IEMOP_RAISE_DIVIDE_ERROR_RET()          IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseDivideError)
    6063 
    6064 /**
    6065  * Macro for calling iemCImplRaiseInvalidLockPrefix().
    6066  *
    6067  * This is for things that will _always_ decode to an \#UD, taking the
    6068  * recompiler into consideration and everything.
    6069  *
    6070  * @return  Strict VBox status code.
    6071  */
    6072 #define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET()   IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidLockPrefix)
    6073 
    6074 /**
    6075  * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
    6076  *
    6077  * This is for things that will _always_ decode to an \#UD, taking the
    6078  * recompiler into consideration and everything.
    6079  *
    6080  * @return  Strict VBox status code.
    6081  */
    6082 #define IEMOP_RAISE_INVALID_OPCODE_RET()        IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
    6083 
    6084 /**
    6085  * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
    6086  *
    6087  * Using this macro means you've got _buggy_ _code_ and are doing things that
    6088  * belongs exclusively in IEMAllCImpl.cpp during decoding.
    6089  *
    6090  * @return  Strict VBox status code.
    6091  * @see     IEMOP_RAISE_INVALID_OPCODE_RET
    6092  */
    6093 #define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
    6094 
    60953157/** @} */
    60963158
    6097 /** @name Register Access.
    6098  * @{ */
    6099 VBOXSTRICTRC    iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    6100                                                            IEMMODE enmEffOpSize) RT_NOEXCEPT;
    6101 VBOXSTRICTRC    iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
    6102 VBOXSTRICTRC    iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
    6103                                                             IEMMODE enmEffOpSize) RT_NOEXCEPT;
    6104 /** @} */
    6105 
    6106 /** @name FPU access and helpers.
    6107  * @{ */
    6108 void            iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6109 void            iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6110 void            iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6111 void            iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6112 void            iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6113 void            iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
    6114                                            uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6115 void            iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
    6116                                                   uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6117 void            iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6118 void            iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6119 void            iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6120 void            iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6121 void            iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6122 void            iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6123 void            iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6124 void            iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6125 void            iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6126 void            iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6127 void            iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6128 void            iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6129 void            iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6130 void            iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6131 void            iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
    6132 /** @} */
    6133 
    6134 /** @name SSE+AVX SIMD access and helpers.
    6135  * @{ */
    6136 void            iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
    6137 /** @} */
    61383159
    61393160/** @name   Memory access.
    61403161 * @{ */
    6141 
    6142 /** Report a \#GP instead of \#AC and do not restrict to ring-3 */
    6143 #define IEM_MEMMAP_F_ALIGN_GP       RT_BIT_32(16)
    6144 /** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
    6145  *  when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
    6146 #define IEM_MEMMAP_F_ALIGN_SSE      RT_BIT_32(17)
    6147 /** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
    6148  * Users include FXSAVE & FXRSTOR. */
    6149 #define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
    6150 
    6151 VBOXSTRICTRC    iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
    6152                           uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
    61533162VBOXSTRICTRC    iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    61543163#ifndef IN_RING3
     
    61573166void            iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    61583167void            iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6159 VBOXSTRICTRC    iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
    6160 VBOXSTRICTRC    iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
    6161 VBOXSTRICTRC    iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
    6162 
    6163 void            iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr);
    6164 void            iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr);
    6165 #ifdef IEM_WITH_CODE_TLB
    6166 void            iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
    6167 #else
    6168 VBOXSTRICTRC    iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
    6169 #endif
     3168
    61703169#ifdef IEM_WITH_SETJMP
    6171 uint8_t         iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6172 uint16_t        iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6173 uint32_t        iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6174 uint64_t        iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    6175 #else
    6176 VBOXSTRICTRC    iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
    6177 VBOXSTRICTRC    iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
    6178 VBOXSTRICTRC    iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
    6179 VBOXSTRICTRC    iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    6180 VBOXSTRICTRC    iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
    6181 VBOXSTRICTRC    iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
    6182 VBOXSTRICTRC    iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    6183 VBOXSTRICTRC    iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
    6184 VBOXSTRICTRC    iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    6185 VBOXSTRICTRC    iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    6186 VBOXSTRICTRC    iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
    6187 #endif
    6188 
    6189 VBOXSTRICTRC    iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6190 VBOXSTRICTRC    iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6191 VBOXSTRICTRC    iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6192 VBOXSTRICTRC    iemMemFetchDataU32NoAc(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6193 VBOXSTRICTRC    iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6194 VBOXSTRICTRC    iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6195 VBOXSTRICTRC    iemMemFetchDataU64NoAc(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6196 VBOXSTRICTRC    iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6197 VBOXSTRICTRC    iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6198 VBOXSTRICTRC    iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6199 VBOXSTRICTRC    iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6200 VBOXSTRICTRC    iemMemFetchDataU128NoAc(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6201 VBOXSTRICTRC    iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6202 VBOXSTRICTRC    iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6203 VBOXSTRICTRC    iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6204 VBOXSTRICTRC    iemMemFetchDataU256AlignedAvx(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6205 VBOXSTRICTRC    iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
    6206                                     RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
    6207 #ifdef IEM_WITH_SETJMP
    6208 uint8_t         iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6209 uint16_t        iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6210 uint32_t        iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6211 uint32_t        iemMemFetchDataU32NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6212 uint32_t        iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6213 uint64_t        iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6214 uint64_t        iemMemFetchDataU64NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6215 uint64_t        iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6216 void            iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6217 void            iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6218 void            iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6219 void            iemMemFetchDataU128NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6220 void            iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6221 void            iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6222 void            iemMemFetchDataU256NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6223 void            iemMemFetchDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6224 # if 0 /* these are inlined now */
    6225 uint8_t         iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6226 uint16_t        iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6227 uint32_t        iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6228 uint32_t        iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6229 uint64_t        iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6230 uint64_t        iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6231 void            iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6232 void            iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6233 void            iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6234 void            iemMemFetchDataU128NoAcJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6235 void            iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6236 void            iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6237 void            iemMemFetchDataU256AlignedAvxJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6238 # endif
    6239 void            iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6240 #endif
    6241 
    6242 VBOXSTRICTRC    iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6243 VBOXSTRICTRC    iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6244 VBOXSTRICTRC    iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6245 VBOXSTRICTRC    iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6246 VBOXSTRICTRC    iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
    6247 
    6248 VBOXSTRICTRC    iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
    6249 VBOXSTRICTRC    iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
    6250 VBOXSTRICTRC    iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
    6251 VBOXSTRICTRC    iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
    6252 VBOXSTRICTRC    iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
    6253 VBOXSTRICTRC    iemMemStoreDataU128NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
    6254 VBOXSTRICTRC    iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
    6255 VBOXSTRICTRC    iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
    6256 VBOXSTRICTRC    iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
    6257 VBOXSTRICTRC    iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
    6258 VBOXSTRICTRC    iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
    6259 #ifdef IEM_WITH_SETJMP
    6260 void            iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6261 void            iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6262 void            iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6263 void            iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6264 void            iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6265 void            iemMemStoreDataU128NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6266 void            iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6267 void            iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6268 void            iemMemStoreDataU256NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6269 void            iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6270 void            iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6271 void            iemMemStoreDataD80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTPBCD80U pd80Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6272 #if 0
    6273 void            iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6274 void            iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6275 void            iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6276 void            iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6277 void            iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6278 void            iemMemStoreDataNoAcU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6279 void            iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6280 void            iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6281 #endif
    6282 void            iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6283 void            iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
    6284 #endif
    6285 
    6286 #ifdef IEM_WITH_SETJMP
    6287 uint8_t        *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6288 uint8_t        *iemMemMapDataU8AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6289 uint8_t        *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6290 uint8_t const  *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6291 uint16_t       *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6292 uint16_t       *iemMemMapDataU16AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6293 uint16_t       *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6294 uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6295 uint32_t       *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6296 uint32_t       *iemMemMapDataU32AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6297 uint32_t       *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6298 uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6299 uint64_t       *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6300 uint64_t       *iemMemMapDataU64AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6301 uint64_t       *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6302 uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6303 PRTFLOAT80U     iemMemMapDataR80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6304 PRTFLOAT80U     iemMemMapDataR80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6305 PCRTFLOAT80U    iemMemMapDataR80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6306 PRTPBCD80U      iemMemMapDataD80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6307 PRTPBCD80U      iemMemMapDataD80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6308 PCRTPBCD80U     iemMemMapDataD80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6309 PRTUINT128U     iemMemMapDataU128RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6310 PRTUINT128U     iemMemMapDataU128AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6311 PRTUINT128U     iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6312 PCRTUINT128U    iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6313 
    63143170void            iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
    63153171void            iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
     
    63193175void            iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    63203176#endif
    6321 
    6322 VBOXSTRICTRC    iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
    6323                                             void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
    6324 VBOXSTRICTRC    iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT;
    6325 VBOXSTRICTRC    iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
    6326 VBOXSTRICTRC    iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
    6327 VBOXSTRICTRC    iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
    6328 VBOXSTRICTRC    iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    6329 VBOXSTRICTRC    iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    6330 VBOXSTRICTRC    iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    6331 VBOXSTRICTRC    iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
    6332 VBOXSTRICTRC    iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
    6333                                            void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
    6334 VBOXSTRICTRC    iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
    6335                                               void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT;
    6336 VBOXSTRICTRC    iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
    6337 VBOXSTRICTRC    iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
    6338 VBOXSTRICTRC    iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
    6339 VBOXSTRICTRC    iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
    6340 VBOXSTRICTRC    iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    6341 VBOXSTRICTRC    iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    6342 VBOXSTRICTRC    iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
    6343 
    6344 #ifdef IEM_WITH_SETJMP
    6345 void            iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6346 void            iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6347 void            iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6348 void            iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6349 void            iemMemStackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    6350 void            iemMemStackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    6351 void            iemMemStackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    6352 
    6353 void            iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6354 void            iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6355 void            iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6356 void            iemMemFlat32StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    6357 void            iemMemFlat32StackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    6358 
    6359 void            iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6360 void            iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6361 void            iemMemFlat64StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    6362 void            iemMemFlat64StackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    6363 
    6364 void            iemMemStoreStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6365 void            iemMemStoreStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6366 void            iemMemStoreStackU32SRegSafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6367 void            iemMemStoreStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
    6368 
    6369 uint16_t        iemMemFetchStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6370 uint32_t        iemMemFetchStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6371 uint64_t        iemMemFetchStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
    6372 
    6373 #endif
    6374 
    63753177/** @} */
    6376 
    6377 /** @name IEMAllCImpl.cpp
    6378  * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
    6379  * @{ */
    6380 IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6381 IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6382 IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6383 IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
    6384 IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
    6385 IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
    6386 IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
    6387 IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
    6388 IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
    6389 IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
    6390 IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
    6391 typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
    6392 typedef FNIEMCIMPLFARBRANCH *PFNIEMCIMPLFARBRANCH;
    6393 IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
    6394 IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
    6395 IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
    6396 IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
    6397 IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
    6398 IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
    6399 IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
    6400 IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
    6401 IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
    6402 IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
    6403 IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
    6404 IEM_CIMPL_PROTO_0(iemCImpl_syscall);
    6405 IEM_CIMPL_PROTO_1(iemCImpl_sysret, IEMMODE, enmEffOpSize);
    6406 IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
    6407 IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
    6408 IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
    6409 IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
    6410 IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
    6411 IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
    6412 IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
    6413 IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
    6414 IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
    6415 IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
    6416 IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6417 IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
    6418 IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6419 IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
    6420 IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
    6421 IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6422 IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
    6423 IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
    6424 IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6425 IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
    6426 IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
    6427 IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6428 IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
    6429 IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
    6430 IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
    6431 IEM_CIMPL_PROTO_0(iemCImpl_clts);
    6432 IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
    6433 IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
    6434 IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
    6435 IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
    6436 IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
    6437 IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
    6438 IEM_CIMPL_PROTO_0(iemCImpl_invd);
    6439 IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
    6440 IEM_CIMPL_PROTO_0(iemCImpl_rsm);
    6441 IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
    6442 IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
    6443 IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
    6444 IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
    6445 IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
    6446 IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
    6447 IEM_CIMPL_PROTO_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
    6448 IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
    6449 IEM_CIMPL_PROTO_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
    6450 IEM_CIMPL_PROTO_0(iemCImpl_cli);
    6451 IEM_CIMPL_PROTO_0(iemCImpl_sti);
    6452 IEM_CIMPL_PROTO_0(iemCImpl_hlt);
    6453 IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
    6454 IEM_CIMPL_PROTO_0(iemCImpl_mwait);
    6455 IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
    6456 IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
    6457 IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
    6458 IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
    6459 IEM_CIMPL_PROTO_0(iemCImpl_daa);
    6460 IEM_CIMPL_PROTO_0(iemCImpl_das);
    6461 IEM_CIMPL_PROTO_0(iemCImpl_aaa);
    6462 IEM_CIMPL_PROTO_0(iemCImpl_aas);
    6463 IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
    6464 IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
    6465 IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
    6466 IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
    6467 IEM_CIMPL_PROTO_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
    6468                   PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo);
    6469 IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
    6470 IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
    6471 IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
    6472 IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
    6473 IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
    6474 IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
    6475 IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
    6476 IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
    6477 IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
    6478 IEM_CIMPL_PROTO_2(iemCImpl_vldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
    6479 IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6480 IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
    6481 IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6482 IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6483 IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
    6484 IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
    6485 IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
    6486 IEM_CIMPL_PROTO_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize);
    6487 IEM_CIMPL_PROTO_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize);
    6488 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6489 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6490 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
    6491 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
    6492 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6493 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6494 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
    6495 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
    6496 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6497 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6498 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
    6499 IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
    6500 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6501 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
    6502 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
    6503 IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
    6504 IEM_CIMPL_PROTO_2(iemCImpl_vpgather_worker_xx, uint32_t, u32PackedArgs, uint32_t, u32Disp);
    6505 
    6506 /** @} */
    6507 
    6508 /** @name IEMAllCImplStrInstr.cpp.h
    6509  * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
    6510  *           -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
    6511  * @{ */
    6512 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
    6513 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
    6514 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
    6515 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
    6516 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
    6517 IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
    6518 IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
    6519 IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
    6520 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
    6521 IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
    6522 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
    6523 
    6524 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
    6525 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
    6526 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
    6527 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
    6528 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
    6529 IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
    6530 IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
    6531 IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
    6532 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
    6533 IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
    6534 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
    6535 
    6536 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
    6537 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
    6538 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
    6539 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
    6540 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
    6541 IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
    6542 IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
    6543 IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
    6544 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
    6545 IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
    6546 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
    6547 
    6548 
    6549 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
    6550 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
    6551 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
    6552 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
    6553 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
    6554 IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
    6555 IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
    6556 IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
    6557 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
    6558 IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6559 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6560 
    6561 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
    6562 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
    6563 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
    6564 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
    6565 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
    6566 IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
    6567 IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
    6568 IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
    6569 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
    6570 IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6571 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6572 
    6573 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
    6574 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
    6575 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
    6576 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
    6577 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
    6578 IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
    6579 IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
    6580 IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
    6581 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
    6582 IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6583 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6584 
    6585 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
    6586 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
    6587 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
    6588 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
    6589 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
    6590 IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
    6591 IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
    6592 IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
    6593 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
    6594 IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6595 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
    6596 
    6597 
    6598 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
    6599 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
    6600 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
    6601 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
    6602 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
    6603 IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
    6604 IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
    6605 IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
    6606 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
    6607 IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6608 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6609 
    6610 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
    6611 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
    6612 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
    6613 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
    6614 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
    6615 IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
    6616 IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
    6617 IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
    6618 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
    6619 IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6620 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6621 
    6622 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
    6623 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
    6624 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
    6625 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
    6626 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
    6627 IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
    6628 IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
    6629 IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
    6630 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
    6631 IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6632 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6633 
    6634 IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
    6635 IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
    6636 IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
    6637 IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
    6638 IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
    6639 IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
    6640 IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
    6641 IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
    6642 IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
    6643 IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6644 IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
    6645 /** @} */
    6646 
    6647 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    6648 VBOXSTRICTRC    iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
    6649 VBOXSTRICTRC    iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
    6650 VBOXSTRICTRC    iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
    6651 VBOXSTRICTRC    iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
    6652 VBOXSTRICTRC    iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr)  RT_NOEXCEPT;
    6653 VBOXSTRICTRC    iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6654 VBOXSTRICTRC    iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALKFAST pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
    6655 VBOXSTRICTRC    iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6656 VBOXSTRICTRC    iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
    6657 VBOXSTRICTRC    iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
    6658                                     bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
    6659 VBOXSTRICTRC    iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
    6660                                        bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
    6661 VBOXSTRICTRC    iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
    6662 VBOXSTRICTRC    iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
    6663 VBOXSTRICTRC    iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
    6664 VBOXSTRICTRC    iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
    6665 VBOXSTRICTRC    iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
    6666 VBOXSTRICTRC    iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
    6667 VBOXSTRICTRC    iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
    6668 VBOXSTRICTRC    iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
    6669                                       RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
    6670 VBOXSTRICTRC    iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
    6671 VBOXSTRICTRC    iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
    6672 VBOXSTRICTRC    iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
    6673 uint32_t        iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
    6674 void            iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
    6675 VBOXSTRICTRC    iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
    6676                               uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
    6677 bool            iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
    6678 IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
    6679 IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
    6680 IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
    6681 IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
    6682 IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
    6683 IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
    6684 IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
    6685 IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
    6686 IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
    6687 IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
    6688 IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint64_t *, pu32Dst, uint32_t, u32VmcsField);
    6689 IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
    6690 IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
    6691 IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
    6692 IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
    6693 IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
    6694 #endif
    6695 
    6696 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    6697 VBOXSTRICTRC    iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
    6698 VBOXSTRICTRC    iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
    6699 VBOXSTRICTRC    iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
    6700                                         uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
    6701 VBOXSTRICTRC    iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT;
    6702 IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
    6703 IEM_CIMPL_PROTO_0(iemCImpl_vmload);
    6704 IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
    6705 IEM_CIMPL_PROTO_0(iemCImpl_clgi);
    6706 IEM_CIMPL_PROTO_0(iemCImpl_stgi);
    6707 IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
    6708 IEM_CIMPL_PROTO_0(iemCImpl_skinit);
    6709 IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
    6710 #endif
    6711 
    6712 IEM_CIMPL_PROTO_0(iemCImpl_vmcall);  /* vmx */
    6713 IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
    6714 IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
    6715 
    6716 extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
    6717 extern const PFNIEMOP g_apfnIemInterpretOnlyTwoByteMap[1024];
    6718 extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f3a[1024];
    6719 extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f38[1024];
    6720 extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap1[1024];
    6721 extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap2[1024];
    6722 extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap3[1024];
    67233178
    67243179/*
    67253180 * Recompiler related stuff.
    67263181 */
    6727 extern const PFNIEMOP g_apfnIemThreadedRecompilerOneByteMap[256];
    6728 extern const PFNIEMOP g_apfnIemThreadedRecompilerTwoByteMap[1024];
    6729 extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f3a[1024];
    6730 extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f38[1024];
    6731 extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap1[1024];
    6732 extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap2[1024];
    6733 extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
    67343182
    67353183DECLHIDDEN(int)     iemPollTimers(PVMCC pVM, PVMCPUCC pVCpu) RT_NOEXCEPT;
     
    67693217
    67703218
    6771 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_Nop);
    6772 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_LogCpuState);
    6773 
    6774 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_DeferToCImpl0);
    6775 
    6776 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckIrq);
    6777 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckTimers);
    6778 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckTimersAndIrq);
    6779 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckMode);
    6780 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckHwInstrBps);
    6781 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLim);
    6782 
    6783 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes);
    6784 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodes);
    6785 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim);
    6786 
    6787 /* Branching: */
    6788 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes);
    6789 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodes);
    6790 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
    6791 
    6792 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
    6793 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb);
    6794 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
    6795 
    6796 /* Natural page crossing: */
    6797 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
    6798 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
    6799 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
    6800 
    6801 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
    6802 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
    6803 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
    6804 
    6805 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
    6806 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
    6807 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
    6808 
    6809 IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_Jump);
    6810 
    6811 bool iemThreadedCompileEmitIrqCheckBefore(PVMCPUCC pVCpu, PIEMTB pTb);
    6812 bool iemThreadedCompileBeginEmitCallsComplications(PVMCPUCC pVCpu, PIEMTB pTb);
    6813 #ifdef IEM_WITH_INTRA_TB_JUMPS
    6814 DECLHIDDEN(int)     iemThreadedCompileBackAtFirstInstruction(PVMCPU pVCpu, PIEMTB pTb) RT_NOEXCEPT;
    6815 #endif
    6816 
    68173219/* Native recompiler public bits: */
    68183220
     
    68303232DECLHIDDEN(int) iemNativeRecompileAttachExecMemChunkCtx(PVMCPU pVCpu, uint32_t idxChunk, struct IEMNATIVEPERCHUNKCTX const **ppCtx);
    68313233
    6832 /** Packed 32-bit argument for iemCImpl_vpgather_worker_xx. */
    6833 typedef union IEMGATHERARGS
    6834 {
    6835     /** Integer view. */
    6836     uint32_t u;
    6837     /** Bitfield view. */
    6838     struct
    6839     {
    6840         uint32_t iYRegDst       : 4; /**<  0 - XMM or YMM register number (destination) */
    6841         uint32_t iYRegIdc       : 4; /**<  4 - XMM or YMM register number (indices)     */
    6842         uint32_t iYRegMsk       : 4; /**<  8 - XMM or YMM register number (mask)        */
    6843         uint32_t iGRegBase      : 4; /**< 12 - general register number    (base ptr)    */
    6844         uint32_t iScale         : 2; /**< 16 - scale factor               (1/2/4/8)     */
    6845         uint32_t enmEffOpSize   : 2; /**< 18 - operand size               (16/32/64/--) */
    6846         uint32_t enmEffAddrMode : 2; /**< 20 - addressing  mode           (16/32/64/--) */
    6847         uint32_t iEffSeg        : 3; /**< 22 - effective segment (ES/CS/SS/DS/FS/GS)    */
    6848         uint32_t fVex256        : 1; /**< 25 - overall instruction width (128/256 bits) */
    6849         uint32_t fIdxQword      : 1; /**< 26 - individual index width     (4/8 bytes)   */
    6850         uint32_t fValQword      : 1; /**< 27 - individual value width     (4/8 bytes)   */
    6851     } s;
    6852 } IEMGATHERARGS;
    6853 AssertCompileSize(IEMGATHERARGS, sizeof(uint32_t));
     3234# ifdef VBOX_VMM_TARGET_X86
     3235#  include "VMMAll/target-x86/IEMInternal-x86.h"
     3236# elif defined(VBOX_VMM_TARGET_ARMV8)
     3237//#  include "VMMAll/target-armv8/IEMInternal-armv8.h"
     3238# endif
    68543239
    68553240#endif /* !RT_IN_ASSEMBLER - ASM-NOINC-END */
  • trunk/src/VBox/VMM/testcase/Makefile.kmk

    r107916 r108195  
    670670tstIEMAImpl_DEFS      = VBOX_VMM_TARGET_X86 $(VMM_COMMON_DEFS) IEM_WITHOUT_ASSEMBLY IEM_WITHOUT_INSTRUCTION_STATS
    671671tstIEMAImpl_SDKS      = VBoxSoftFloatR3Shared
    672 tstIEMAImpl_INCS      = ../include .
     672tstIEMAImpl_INCS      = \
     673        $(VBOX_PATH_VMM_SRC)/include \
     674        $(VBOX_PATH_VMM_SRC) \
     675        $(VBOX_PATH_VMM_SRC)/testcase
    673676tstIEMAImpl_SOURCES   = \
    674677        tstIEMAImpl.cpp \
     
    693696tstIEMAImplAsm_ASFLAGS.amd64 := -Werror
    694697tstIEMAImplAsm_ASFLAGS.x86   := -Werror
    695 tstIEMAImplAsm_INCS          := ../include .
     698tstIEMAImplAsm_INCS          := \
     699        $(VBOX_PATH_VMM_SRC)/include \
     700        $(VBOX_PATH_VMM_SRC) \
     701        $(VBOX_PATH_VMM_SRC)/testcase
    696702tstIEMAImplAsm_SOURCES        = \
    697703        tstIEMAImpl.cpp \
     
    715721tstIEMCheckMc_SOURCES   = tstIEMCheckMc.cpp
    716722tstIEMCheckMc_DEFS      = VBOX_VMM_TARGET_X86 $(VMM_COMMON_DEFS) IEM_WITHOUT_INSTRUCTION_STATS
     723tstIEMCheckMc_INCS     := \
     724        $(VBOX_PATH_VMM_SRC)/include \
     725        $(VBOX_PATH_VMM_SRC)
    717726tstIEMCheckMc_LIBS      = $(LIB_RUNTIME)
    718727ifeq ($(KBUILD_TARGET),win)
  • trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp

    r107218 r108195  
    4040#define TST_IEM_CHECK_MC    /**< For hacks.  */
    4141#define IN_TSTVMSTRUCT 1    /**< Ditto. */
    42 #include "../include/IEMInternal.h"
     42#include "IEMInternal.h"
    4343#include <VBox/vmm/vm.h>
    4444
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette