VirtualBox

Ignore:
Timestamp:
May 1, 2022 10:02:17 PM (3 years ago)
Author:
vboxsync
Message:

VMM/IEM: Split up IEMAll.cpp into a few more compilation units. bugref:9898

File:
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/include/IEMMc.h

    r94751 r94768  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - All Contexts.
     3 * IEM - Interpreted Execution Manager - IEM_MC_XXX.
    44 */
    55
     
    1616 */
    1717
    18 
    19 /** @page pg_iem    IEM - Interpreted Execution Manager
    20  *
    21  * The interpreted exeuction manager (IEM) is for executing short guest code
    22  * sequences that are causing too many exits / virtualization traps.  It will
    23  * also be used to interpret single instructions, thus replacing the selective
    24  * interpreters in EM and IOM.
    25  *
    26  * Design goals:
    27  *      - Relatively small footprint, although we favour speed and correctness
    28  *        over size.
    29  *      - Reasonably fast.
    30  *      - Correctly handle lock prefixed instructions.
    31  *      - Complete instruction set - eventually.
    32  *      - Refactorable into a recompiler, maybe.
    33  *      - Replace EMInterpret*.
    34  *
    35  * Using the existing disassembler has been considered, however this is thought
    36  * to conflict with speed as the disassembler chews things a bit too much while
    37  * leaving us with a somewhat complicated state to interpret afterwards.
    38  *
    39  *
    40  * The current code is very much work in progress. You've been warned!
    41  *
    42  *
    43  * @section sec_iem_fpu_instr   FPU Instructions
    44  *
    45  * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
    46  * same or equivalent instructions on the host FPU.  To make life easy, we also
    47  * let the FPU prioritize the unmasked exceptions for us.  This however, only
    48  * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
    49  * for FPU exception delivery, because with CR0.NE=0 there is a window where we
    50  * can trigger spurious FPU exceptions.
    51  *
    52  * The guest FPU state is not loaded into the host CPU and kept there till we
    53  * leave IEM because the calling conventions have declared an all year open
    54  * season on much of the FPU state.  For instance an innocent looking call to
    55  * memcpy might end up using a whole bunch of XMM or MM registers if the
    56  * particular implementation finds it worthwhile.
    57  *
    58  *
    59  * @section sec_iem_logging     Logging
    60  *
    61  * The IEM code uses the \"IEM\" log group for the main logging. The different
    62  * logging levels/flags are generally used for the following purposes:
    63  *      - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
    64  *      - Flow (LogFlow): Basic enter/exit IEM state info.
    65  *      - Level 2 (Log2): ?
    66  *      - Level 3 (Log3): More detailed enter/exit IEM state info.
    67  *      - Level 4 (Log4): Decoding mnemonics w/ EIP.
    68  *      - Level 5 (Log5): Decoding details.
    69  *      - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
    70  *      - Level 7 (Log7): iret++ execution logging.
    71  *      - Level 8 (Log8): Memory writes.
    72  *      - Level 9 (Log9): Memory reads.
    73  *
    74  */
    75 
    76 //#define IEM_LOG_MEMORY_WRITES
    77 #define IEM_IMPLEMENTS_TASKSWITCH
    78 
    79 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
    80 #ifdef _MSC_VER
    81 # pragma warning(disable:4505)
     18#ifndef VMM_INCLUDED_SRC_include_IEMMc_h
     19#define VMM_INCLUDED_SRC_include_IEMMc_h
     20#ifndef RT_WITHOUT_PRAGMA_ONCE
     21# pragma once
    8222#endif
    83 
    84 
    85 /*********************************************************************************************************************************
    86 *   Header Files                                                                                                                 *
    87 *********************************************************************************************************************************/
    88 #define LOG_GROUP   LOG_GROUP_IEM
    89 #define VMCPU_INCL_CPUM_GST_CTX
    90 #include <VBox/vmm/iem.h>
    91 #include <VBox/vmm/cpum.h>
    92 #include <VBox/vmm/apic.h>
    93 #include <VBox/vmm/pdm.h>
    94 #include <VBox/vmm/pgm.h>
    95 #include <VBox/vmm/iom.h>
    96 #include <VBox/vmm/em.h>
    97 #include <VBox/vmm/hm.h>
    98 #include <VBox/vmm/nem.h>
    99 #include <VBox/vmm/gim.h>
    100 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    101 # include <VBox/vmm/em.h>
    102 # include <VBox/vmm/hm_svm.h>
    103 #endif
    104 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    105 # include <VBox/vmm/hmvmxinline.h>
    106 #endif
    107 #include <VBox/vmm/tm.h>
    108 #include <VBox/vmm/dbgf.h>
    109 #include <VBox/vmm/dbgftrace.h>
    110 #include "IEMInternal.h"
    111 #include <VBox/vmm/vmcc.h>
    112 #include <VBox/log.h>
    113 #include <VBox/err.h>
    114 #include <VBox/param.h>
    115 #include <VBox/dis.h>
    116 #include <VBox/disopcode.h>
    117 #include <iprt/asm-math.h>
    118 #include <iprt/assert.h>
    119 #include <iprt/string.h>
    120 #include <iprt/x86.h>
    121 
    122 
    123 /*********************************************************************************************************************************
    124 *   Structures and Typedefs                                                                                                      *
    125 *********************************************************************************************************************************/
    126 /** @typedef PFNIEMOP
    127  * Pointer to an opcode decoder function.
    128  */
    129 
    130 /** @def FNIEMOP_DEF
    131  * Define an opcode decoder function.
    132  *
    133  * We're using macors for this so that adding and removing parameters as well as
    134  * tweaking compiler specific attributes becomes easier.  See FNIEMOP_CALL
    135  *
    136  * @param   a_Name      The function name.
    137  */
    138 
    139 /** @typedef PFNIEMOPRM
    140  * Pointer to an opcode decoder function with RM byte.
    141  */
    142 
    143 /** @def FNIEMOPRM_DEF
    144  * Define an opcode decoder function with RM byte.
    145  *
    146  * We're using macors for this so that adding and removing parameters as well as
    147  * tweaking compiler specific attributes becomes easier.  See FNIEMOP_CALL_1
    148  *
    149  * @param   a_Name      The function name.
    150  */
    151 
    152 #if defined(__GNUC__) && defined(RT_ARCH_X86)
    153 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
    154 typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    155 # define FNIEMOP_DEF(a_Name) \
    156     IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
    157 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    158     IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
    159 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    160     IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
    161 
    162 #elif defined(_MSC_VER) && defined(RT_ARCH_X86)
    163 typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
    164 typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    165 # define FNIEMOP_DEF(a_Name) \
    166     IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
    167 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    168     IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
    169 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    170     IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
    171 
    172 #elif defined(__GNUC__)
    173 typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
    174 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    175 # define FNIEMOP_DEF(a_Name) \
    176     IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
    177 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    178     IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
    179 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    180     IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
    181 
    182 #else
    183 typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
    184 typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
    185 # define FNIEMOP_DEF(a_Name) \
    186     IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
    187 # define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    188     IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
    189 # define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
    190     IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
    191 
    192 #endif
    193 #define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
    194 
    195 
    196 /**
    197  * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
    198  */
    199 typedef union IEMSELDESC
    200 {
    201     /** The legacy view. */
    202     X86DESC     Legacy;
    203     /** The long mode view. */
    204     X86DESC64   Long;
    205 } IEMSELDESC;
    206 /** Pointer to a selector descriptor table entry. */
    207 typedef IEMSELDESC *PIEMSELDESC;
    208 
    209 /**
    210  * CPU exception classes.
    211  */
    212 typedef enum IEMXCPTCLASS
    213 {
    214     IEMXCPTCLASS_BENIGN,
    215     IEMXCPTCLASS_CONTRIBUTORY,
    216     IEMXCPTCLASS_PAGE_FAULT,
    217     IEMXCPTCLASS_DOUBLE_FAULT
    218 } IEMXCPTCLASS;
    219 
    220 
    221 /*********************************************************************************************************************************
    222 *   Defined Constants And Macros                                                                                                 *
    223 *********************************************************************************************************************************/
    224 /** @def IEM_WITH_SETJMP
    225  * Enables alternative status code handling using setjmps.
    226  *
    227  * This adds a bit of expense via the setjmp() call since it saves all the
    228  * non-volatile registers.  However, it eliminates return code checks and allows
    229  * for more optimal return value passing (return regs instead of stack buffer).
    230  */
    231 #if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
    232 # define IEM_WITH_SETJMP
    233 #endif
    234 
    235 /** Used to shut up GCC warnings about variables that 'may be used uninitialized'
    236  * due to GCC lacking knowledge about the value range of a switch. */
    237 #define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
    238 
    239 /** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
    240 #define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
    241 
    242 /**
    243  * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
    244  * occation.
    245  */
    246 #ifdef LOG_ENABLED
    247 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
    248     do { \
    249         /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
    250         return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
    251     } while (0)
    252 #else
    253 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
    254     return VERR_IEM_ASPECT_NOT_IMPLEMENTED
    255 #endif
    256 
    257 /**
    258  * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
    259  * occation using the supplied logger statement.
    260  *
    261  * @param   a_LoggerArgs    What to log on failure.
    262  */
    263 #ifdef LOG_ENABLED
    264 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
    265     do { \
    266         LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
    267         /*LogFunc(a_LoggerArgs);*/ \
    268         return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
    269     } while (0)
    270 #else
    271 # define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
    272     return VERR_IEM_ASPECT_NOT_IMPLEMENTED
    273 #endif
    274 
    275 /**
    276  * Call an opcode decoder function.
    277  *
    278  * We're using macors for this so that adding and removing parameters can be
    279  * done as we please.  See FNIEMOP_DEF.
    280  */
    281 #define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
    282 
    283 /**
    284  * Call a common opcode decoder function taking one extra argument.
    285  *
    286  * We're using macors for this so that adding and removing parameters can be
    287  * done as we please.  See FNIEMOP_DEF_1.
    288  */
    289 #define FNIEMOP_CALL_1(a_pfn, a0)           (a_pfn)(pVCpu, a0)
    290 
    291 /**
    292  * Call a common opcode decoder function taking one extra argument.
    293  *
    294  * We're using macors for this so that adding and removing parameters can be
    295  * done as we please.  See FNIEMOP_DEF_1.
    296  */
    297 #define FNIEMOP_CALL_2(a_pfn, a0, a1)       (a_pfn)(pVCpu, a0, a1)
    298 
    299 /**
    300  * Check if we're currently executing in real or virtual 8086 mode.
    301  *
    302  * @returns @c true if it is, @c false if not.
    303  * @param   a_pVCpu         The IEM state of the current CPU.
    304  */
    305 #define IEM_IS_REAL_OR_V86_MODE(a_pVCpu)    (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
    306 
    307 /**
    308  * Check if we're currently executing in virtual 8086 mode.
    309  *
    310  * @returns @c true if it is, @c false if not.
    311  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    312  */
    313 #define IEM_IS_V86_MODE(a_pVCpu)            (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
    314 
    315 /**
    316  * Check if we're currently executing in long mode.
    317  *
    318  * @returns @c true if it is, @c false if not.
    319  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    320  */
    321 #define IEM_IS_LONG_MODE(a_pVCpu)           (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
    322 
    323 /**
    324  * Check if we're currently executing in a 64-bit code segment.
    325  *
    326  * @returns @c true if it is, @c false if not.
    327  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    328  */
    329 #define IEM_IS_64BIT_CODE(a_pVCpu)          (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
    330 
    331 /**
    332  * Check if we're currently executing in real mode.
    333  *
    334  * @returns @c true if it is, @c false if not.
    335  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    336  */
    337 #define IEM_IS_REAL_MODE(a_pVCpu)           (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
    338 
    339 /**
    340  * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
    341  * @returns PCCPUMFEATURES
    342  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    343  */
    344 #define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
    345 
    346 /**
    347  * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
    348  * @returns PCCPUMFEATURES
    349  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    350  */
    351 #define IEM_GET_HOST_CPU_FEATURES(a_pVCpu)  (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
    352 
    353 /**
    354  * Evaluates to true if we're presenting an Intel CPU to the guest.
    355  */
    356 #define IEM_IS_GUEST_CPU_INTEL(a_pVCpu)     ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
    357 
    358 /**
    359  * Evaluates to true if we're presenting an AMD CPU to the guest.
    360  */
    361 #define IEM_IS_GUEST_CPU_AMD(a_pVCpu)       ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
    362 
    363 /**
    364  * Check if the address is canonical.
    365  */
    366 #define IEM_IS_CANONICAL(a_u64Addr)         X86_IS_CANONICAL(a_u64Addr)
    367 
    368 /**
    369  * Gets the effective VEX.VVVV value.
    370  *
    371  * The 4th bit is ignored if not 64-bit code.
    372  * @returns effective V-register value.
    373  * @param   a_pVCpu         The cross context virtual CPU structure of the calling thread.
    374  */
    375 #define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
    376     ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
    377 
    378 /** @def IEM_USE_UNALIGNED_DATA_ACCESS
    379  * Use unaligned accesses instead of elaborate byte assembly. */
    380 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
    381 # define IEM_USE_UNALIGNED_DATA_ACCESS
    382 #endif
    383 
    384 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    385 
    386 /**
    387  * Check if the guest has entered VMX root operation.
    388  */
    389 # define IEM_VMX_IS_ROOT_MODE(a_pVCpu)      (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
    390 
    391 /**
    392  * Check if the guest has entered VMX non-root operation.
    393  */
    394 # define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu)  (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
    395 
    396 /**
    397  * Check if the nested-guest has the given Pin-based VM-execution control set.
    398  */
    399 # define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
    400     (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
    401 
    402 /**
    403  * Check if the nested-guest has the given Processor-based VM-execution control set.
    404  */
    405 #define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
    406     (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
    407 
    408 /**
    409  * Check if the nested-guest has the given Secondary Processor-based VM-execution
    410  * control set.
    411  */
    412 #define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
    413     (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
    414 
    415 /**
    416  * Invokes the VMX VM-exit handler for an instruction intercept.
    417  */
    418 # define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
    419     do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
    420 
    421 /**
    422  * Invokes the VMX VM-exit handler for an instruction intercept where the
    423  * instruction provides additional VM-exit information.
    424  */
    425 # define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
    426     do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
    427 
    428 /**
    429  * Invokes the VMX VM-exit handler for a task switch.
    430  */
    431 # define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
    432     do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
    433 
    434 /**
    435  * Invokes the VMX VM-exit handler for MWAIT.
    436  */
    437 # define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
    438     do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
    439 
    440 /**
    441  * Invokes the VMX VM-exit handler for EPT faults.
    442  */
    443 # define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
    444     do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
    445 
    446 /**
    447  * Invokes the VMX VM-exit handler.
    448  */
    449 # define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
    450     do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
    451 
    452 #else
    453 # define IEM_VMX_IS_ROOT_MODE(a_pVCpu)                                          (false)
    454 # define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu)                                      (false)
    455 # define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr)                             (false)
    456 # define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr)                            (false)
    457 # define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr)                           (false)
    458 # define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr)            do { return VERR_VMX_IPE_1; } while (0)
    459 # define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr)  do { return VERR_VMX_IPE_1; } while (0)
    460 # define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr)    do { return VERR_VMX_IPE_1; } while (0)
    461 # define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr)          do { return VERR_VMX_IPE_1; } while (0)
    462 # define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr)       do { return VERR_VMX_IPE_1; } while (0)
    463 # define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual)   do { return VERR_VMX_IPE_1; } while (0)
    464 
    465 #endif
    466 
    467 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    468 /**
    469  * Check if an SVM control/instruction intercept is set.
    470  */
    471 # define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
    472     (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
    473 
    474 /**
    475  * Check if an SVM read CRx intercept is set.
    476  */
    477 # define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
    478     (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
    479 
    480 /**
    481  * Check if an SVM write CRx intercept is set.
    482  */
    483 # define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
    484     (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
    485 
    486 /**
    487  * Check if an SVM read DRx intercept is set.
    488  */
    489 # define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
    490     (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
    491 
    492 /**
    493  * Check if an SVM write DRx intercept is set.
    494  */
    495 # define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
    496     (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
    497 
    498 /**
    499  * Check if an SVM exception intercept is set.
    500  */
    501 # define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
    502     (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
    503 
    504 /**
    505  * Invokes the SVM \#VMEXIT handler for the nested-guest.
    506  */
    507 # define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
    508     do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
    509 
    510 /**
    511  * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
    512  * corresponding decode assist information.
    513  */
    514 # define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
    515     do \
    516     { \
    517         uint64_t uExitInfo1; \
    518         if (   IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
    519             && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
    520             uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
    521         else \
    522             uExitInfo1 = 0; \
    523         IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
    524     } while (0)
    525 
    526 /** Check and handles SVM nested-guest instruction intercept and updates
    527  *  NRIP if needed.
    528  */
    529 # define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
    530     do \
    531     { \
    532         if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
    533         { \
    534             IEM_SVM_UPDATE_NRIP(a_pVCpu); \
    535             IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
    536         } \
    537     } while (0)
    538 
    539 /** Checks and handles SVM nested-guest CR0 read intercept. */
    540 # define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
    541     do \
    542     { \
    543         if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
    544         { /* probably likely */ } \
    545         else \
    546         { \
    547             IEM_SVM_UPDATE_NRIP(a_pVCpu); \
    548             IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
    549         } \
    550     } while (0)
    551 
    552 /**
    553  * Updates the NextRIP (NRI) field in the nested-guest VMCB.
    554  */
    555 # define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
    556     do { \
    557         if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
    558             CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
    559     } while (0)
    560 
    561 #else
    562 # define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)                              (false)
    563 # define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                 (false)
    564 # define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)                                (false)
    565 # define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                 (false)
    566 # define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)                                (false)
    567 # define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector)                                (false)
    568 # define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2)             do { return VERR_SVM_IPE_1; } while (0)
    569 # define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg)            do { return VERR_SVM_IPE_1; } while (0)
    570 # define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2)   do { } while (0)
    571 # define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2)                          do { } while (0)
    572 # define IEM_SVM_UPDATE_NRIP(a_pVCpu)                                                     do { } while (0)
    573 
    574 #endif
    575 
    576 
    577 /*********************************************************************************************************************************
    578 *   Global Variables                                                                                                             *
    579 *********************************************************************************************************************************/
    580 extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
    581 
    582 
    583 /** Function table for the ADD instruction. */
    584 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
    585 {
    586     iemAImpl_add_u8,  iemAImpl_add_u8_locked,
    587     iemAImpl_add_u16, iemAImpl_add_u16_locked,
    588     iemAImpl_add_u32, iemAImpl_add_u32_locked,
    589     iemAImpl_add_u64, iemAImpl_add_u64_locked
    590 };
    591 
    592 /** Function table for the ADC instruction. */
    593 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
    594 {
    595     iemAImpl_adc_u8,  iemAImpl_adc_u8_locked,
    596     iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
    597     iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
    598     iemAImpl_adc_u64, iemAImpl_adc_u64_locked
    599 };
    600 
    601 /** Function table for the SUB instruction. */
    602 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
    603 {
    604     iemAImpl_sub_u8,  iemAImpl_sub_u8_locked,
    605     iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
    606     iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
    607     iemAImpl_sub_u64, iemAImpl_sub_u64_locked
    608 };
    609 
    610 /** Function table for the SBB instruction. */
    611 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
    612 {
    613     iemAImpl_sbb_u8,  iemAImpl_sbb_u8_locked,
    614     iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
    615     iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
    616     iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
    617 };
    618 
    619 /** Function table for the OR instruction. */
    620 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
    621 {
    622     iemAImpl_or_u8,  iemAImpl_or_u8_locked,
    623     iemAImpl_or_u16, iemAImpl_or_u16_locked,
    624     iemAImpl_or_u32, iemAImpl_or_u32_locked,
    625     iemAImpl_or_u64, iemAImpl_or_u64_locked
    626 };
    627 
    628 /** Function table for the XOR instruction. */
    629 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
    630 {
    631     iemAImpl_xor_u8,  iemAImpl_xor_u8_locked,
    632     iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
    633     iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
    634     iemAImpl_xor_u64, iemAImpl_xor_u64_locked
    635 };
    636 
    637 /** Function table for the AND instruction. */
    638 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
    639 {
    640     iemAImpl_and_u8,  iemAImpl_and_u8_locked,
    641     iemAImpl_and_u16, iemAImpl_and_u16_locked,
    642     iemAImpl_and_u32, iemAImpl_and_u32_locked,
    643     iemAImpl_and_u64, iemAImpl_and_u64_locked
    644 };
    645 
    646 /** Function table for the CMP instruction.
    647  * @remarks Making operand order ASSUMPTIONS.
    648  */
    649 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
    650 {
    651     iemAImpl_cmp_u8,  NULL,
    652     iemAImpl_cmp_u16, NULL,
    653     iemAImpl_cmp_u32, NULL,
    654     iemAImpl_cmp_u64, NULL
    655 };
    656 
    657 /** Function table for the TEST instruction.
    658  * @remarks Making operand order ASSUMPTIONS.
    659  */
    660 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
    661 {
    662     iemAImpl_test_u8,  NULL,
    663     iemAImpl_test_u16, NULL,
    664     iemAImpl_test_u32, NULL,
    665     iemAImpl_test_u64, NULL
    666 };
    667 
    668 /** Function table for the BT instruction. */
    669 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
    670 {
    671     NULL,  NULL,
    672     iemAImpl_bt_u16, NULL,
    673     iemAImpl_bt_u32, NULL,
    674     iemAImpl_bt_u64, NULL
    675 };
    676 
    677 /** Function table for the BTC instruction. */
    678 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
    679 {
    680     NULL,  NULL,
    681     iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
    682     iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
    683     iemAImpl_btc_u64, iemAImpl_btc_u64_locked
    684 };
    685 
    686 /** Function table for the BTR instruction. */
    687 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
    688 {
    689     NULL,  NULL,
    690     iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
    691     iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
    692     iemAImpl_btr_u64, iemAImpl_btr_u64_locked
    693 };
    694 
    695 /** Function table for the BTS instruction. */
    696 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
    697 {
    698     NULL,  NULL,
    699     iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
    700     iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
    701     iemAImpl_bts_u64, iemAImpl_bts_u64_locked
    702 };
    703 
    704 /** Function table for the BSF instruction. */
    705 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
    706 {
    707     NULL,  NULL,
    708     iemAImpl_bsf_u16, NULL,
    709     iemAImpl_bsf_u32, NULL,
    710     iemAImpl_bsf_u64, NULL
    711 };
    712 
    713 /** Function table for the BSF instruction, AMD EFLAGS variant. */
    714 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_amd =
    715 {
    716     NULL,  NULL,
    717     iemAImpl_bsf_u16_amd, NULL,
    718     iemAImpl_bsf_u32_amd, NULL,
    719     iemAImpl_bsf_u64_amd, NULL
    720 };
    721 
    722 /** Function table for the BSF instruction, Intel EFLAGS variant. */
    723 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_intel =
    724 {
    725     NULL,  NULL,
    726     iemAImpl_bsf_u16_intel, NULL,
    727     iemAImpl_bsf_u32_intel, NULL,
    728     iemAImpl_bsf_u64_intel, NULL
    729 };
    730 
    731 /** EFLAGS variation selection table for the BSF instruction. */
    732 IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsf_eflags[] =
    733 {
    734     &g_iemAImpl_bsf,
    735     &g_iemAImpl_bsf_intel,
    736     &g_iemAImpl_bsf_amd,
    737     &g_iemAImpl_bsf,
    738 };
    739 
    740 /** Function table for the BSR instruction. */
    741 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
    742 {
    743     NULL,  NULL,
    744     iemAImpl_bsr_u16, NULL,
    745     iemAImpl_bsr_u32, NULL,
    746     iemAImpl_bsr_u64, NULL
    747 };
    748 
    749 /** Function table for the BSR instruction, AMD EFLAGS variant. */
    750 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_amd =
    751 {
    752     NULL,  NULL,
    753     iemAImpl_bsr_u16_amd, NULL,
    754     iemAImpl_bsr_u32_amd, NULL,
    755     iemAImpl_bsr_u64_amd, NULL
    756 };
    757 
    758 /** Function table for the BSR instruction, Intel EFLAGS variant. */
    759 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_intel =
    760 {
    761     NULL,  NULL,
    762     iemAImpl_bsr_u16_intel, NULL,
    763     iemAImpl_bsr_u32_intel, NULL,
    764     iemAImpl_bsr_u64_intel, NULL
    765 };
    766 
    767 /** EFLAGS variation selection table for the BSR instruction. */
    768 IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsr_eflags[] =
    769 {
    770     &g_iemAImpl_bsr,
    771     &g_iemAImpl_bsr_intel,
    772     &g_iemAImpl_bsr_amd,
    773     &g_iemAImpl_bsr,
    774 };
    775 
    776 /** Function table for the IMUL instruction. */
    777 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
    778 {
    779     NULL,  NULL,
    780     iemAImpl_imul_two_u16, NULL,
    781     iemAImpl_imul_two_u32, NULL,
    782     iemAImpl_imul_two_u64, NULL
    783 };
    784 
    785 /** Function table for the IMUL instruction, AMD EFLAGS variant. */
    786 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_amd =
    787 {
    788     NULL,  NULL,
    789     iemAImpl_imul_two_u16_amd, NULL,
    790     iemAImpl_imul_two_u32_amd, NULL,
    791     iemAImpl_imul_two_u64_amd, NULL
    792 };
    793 
    794 /** Function table for the IMUL instruction, Intel EFLAGS variant. */
    795 IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_intel =
    796 {
    797     NULL,  NULL,
    798     iemAImpl_imul_two_u16_intel, NULL,
    799     iemAImpl_imul_two_u32_intel, NULL,
    800     iemAImpl_imul_two_u64_intel, NULL
    801 };
    802 
    803 /** EFLAGS variation selection table for the IMUL instruction. */
    804 IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_imul_two_eflags[] =
    805 {
    806     &g_iemAImpl_imul_two,
    807     &g_iemAImpl_imul_two_intel,
    808     &g_iemAImpl_imul_two_amd,
    809     &g_iemAImpl_imul_two,
    810 };
    811 
    812 /** EFLAGS variation selection table for the 16-bit IMUL instruction. */
    813 IEM_STATIC PFNIEMAIMPLBINU16 const g_iemAImpl_imul_two_u16_eflags[] =
    814 {
    815     iemAImpl_imul_two_u16,
    816     iemAImpl_imul_two_u16_intel,
    817     iemAImpl_imul_two_u16_amd,
    818     iemAImpl_imul_two_u16,
    819 };
    820 
    821 /** EFLAGS variation selection table for the 32-bit IMUL instruction. */
    822 IEM_STATIC PFNIEMAIMPLBINU32 const g_iemAImpl_imul_two_u32_eflags[] =
    823 {
    824     iemAImpl_imul_two_u32,
    825     iemAImpl_imul_two_u32_intel,
    826     iemAImpl_imul_two_u32_amd,
    827     iemAImpl_imul_two_u32,
    828 };
    829 
    830 /** EFLAGS variation selection table for the 64-bit IMUL instruction. */
    831 IEM_STATIC PFNIEMAIMPLBINU64 const g_iemAImpl_imul_two_u64_eflags[] =
    832 {
    833     iemAImpl_imul_two_u64,
    834     iemAImpl_imul_two_u64_intel,
    835     iemAImpl_imul_two_u64_amd,
    836     iemAImpl_imul_two_u64,
    837 };
    838 
    839 /** Group 1 /r lookup table. */
    840 IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
    841 {
    842     &g_iemAImpl_add,
    843     &g_iemAImpl_or,
    844     &g_iemAImpl_adc,
    845     &g_iemAImpl_sbb,
    846     &g_iemAImpl_and,
    847     &g_iemAImpl_sub,
    848     &g_iemAImpl_xor,
    849     &g_iemAImpl_cmp
    850 };
    851 
    852 /** Function table for the INC instruction. */
    853 IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
    854 {
    855     iemAImpl_inc_u8,  iemAImpl_inc_u8_locked,
    856     iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
    857     iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
    858     iemAImpl_inc_u64, iemAImpl_inc_u64_locked
    859 };
    860 
    861 /** Function table for the DEC instruction. */
    862 IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
    863 {
    864     iemAImpl_dec_u8,  iemAImpl_dec_u8_locked,
    865     iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
    866     iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
    867     iemAImpl_dec_u64, iemAImpl_dec_u64_locked
    868 };
    869 
    870 /** Function table for the NEG instruction. */
    871 IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
    872 {
    873     iemAImpl_neg_u8,  iemAImpl_neg_u8_locked,
    874     iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
    875     iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
    876     iemAImpl_neg_u64, iemAImpl_neg_u64_locked
    877 };
    878 
    879 /** Function table for the NOT instruction. */
    880 IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
    881 {
    882     iemAImpl_not_u8,  iemAImpl_not_u8_locked,
    883     iemAImpl_not_u16, iemAImpl_not_u16_locked,
    884     iemAImpl_not_u32, iemAImpl_not_u32_locked,
    885     iemAImpl_not_u64, iemAImpl_not_u64_locked
    886 };
    887 
    888 
    889 /** Function table for the ROL instruction. */
    890 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
    891 {
    892     iemAImpl_rol_u8,
    893     iemAImpl_rol_u16,
    894     iemAImpl_rol_u32,
    895     iemAImpl_rol_u64
    896 };
    897 
    898 /** Function table for the ROL instruction, AMD EFLAGS variant. */
    899 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_amd =
    900 {
    901     iemAImpl_rol_u8_amd,
    902     iemAImpl_rol_u16_amd,
    903     iemAImpl_rol_u32_amd,
    904     iemAImpl_rol_u64_amd
    905 };
    906 
    907 /** Function table for the ROL instruction, Intel EFLAGS variant. */
    908 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_intel =
    909 {
    910     iemAImpl_rol_u8_intel,
    911     iemAImpl_rol_u16_intel,
    912     iemAImpl_rol_u32_intel,
    913     iemAImpl_rol_u64_intel
    914 };
    915 
    916 /** EFLAGS variation selection table for the ROL instruction. */
    917 IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rol_eflags[] =
    918 {
    919     &g_iemAImpl_rol,
    920     &g_iemAImpl_rol_intel,
    921     &g_iemAImpl_rol_amd,
    922     &g_iemAImpl_rol,
    923 };
    924 
    925 
    926 /** Function table for the ROR instruction. */
    927 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
    928 {
    929     iemAImpl_ror_u8,
    930     iemAImpl_ror_u16,
    931     iemAImpl_ror_u32,
    932     iemAImpl_ror_u64
    933 };
    934 
    935 /** Function table for the ROR instruction, AMD EFLAGS variant. */
    936 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_amd =
    937 {
    938     iemAImpl_ror_u8_amd,
    939     iemAImpl_ror_u16_amd,
    940     iemAImpl_ror_u32_amd,
    941     iemAImpl_ror_u64_amd
    942 };
    943 
    944 /** Function table for the ROR instruction, Intel EFLAGS variant. */
    945 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_intel =
    946 {
    947     iemAImpl_ror_u8_intel,
    948     iemAImpl_ror_u16_intel,
    949     iemAImpl_ror_u32_intel,
    950     iemAImpl_ror_u64_intel
    951 };
    952 
    953 /** EFLAGS variation selection table for the ROR instruction. */
    954 IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_ror_eflags[] =
    955 {
    956     &g_iemAImpl_ror,
    957     &g_iemAImpl_ror_intel,
    958     &g_iemAImpl_ror_amd,
    959     &g_iemAImpl_ror,
    960 };
    961 
    962 
    963 /** Function table for the RCL instruction. */
    964 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
    965 {
    966     iemAImpl_rcl_u8,
    967     iemAImpl_rcl_u16,
    968     iemAImpl_rcl_u32,
    969     iemAImpl_rcl_u64
    970 };
    971 
    972 /** Function table for the RCL instruction, AMD EFLAGS variant. */
    973 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_amd =
    974 {
    975     iemAImpl_rcl_u8_amd,
    976     iemAImpl_rcl_u16_amd,
    977     iemAImpl_rcl_u32_amd,
    978     iemAImpl_rcl_u64_amd
    979 };
    980 
    981 /** Function table for the RCL instruction, Intel EFLAGS variant. */
    982 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_intel =
    983 {
    984     iemAImpl_rcl_u8_intel,
    985     iemAImpl_rcl_u16_intel,
    986     iemAImpl_rcl_u32_intel,
    987     iemAImpl_rcl_u64_intel
    988 };
    989 
    990 /** EFLAGS variation selection table for the RCL instruction. */
    991 IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcl_eflags[] =
    992 {
    993     &g_iemAImpl_rcl,
    994     &g_iemAImpl_rcl_intel,
    995     &g_iemAImpl_rcl_amd,
    996     &g_iemAImpl_rcl,
    997 };
    998 
    999 
    1000 /** Function table for the RCR instruction. */
    1001 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
    1002 {
    1003     iemAImpl_rcr_u8,
    1004     iemAImpl_rcr_u16,
    1005     iemAImpl_rcr_u32,
    1006     iemAImpl_rcr_u64
    1007 };
    1008 
    1009 /** Function table for the RCR instruction, AMD EFLAGS variant. */
    1010 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_amd =
    1011 {
    1012     iemAImpl_rcr_u8_amd,
    1013     iemAImpl_rcr_u16_amd,
    1014     iemAImpl_rcr_u32_amd,
    1015     iemAImpl_rcr_u64_amd
    1016 };
    1017 
    1018 /** Function table for the RCR instruction, Intel EFLAGS variant. */
    1019 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_intel =
    1020 {
    1021     iemAImpl_rcr_u8_intel,
    1022     iemAImpl_rcr_u16_intel,
    1023     iemAImpl_rcr_u32_intel,
    1024     iemAImpl_rcr_u64_intel
    1025 };
    1026 
    1027 /** EFLAGS variation selection table for the RCR instruction. */
    1028 IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcr_eflags[] =
    1029 {
    1030     &g_iemAImpl_rcr,
    1031     &g_iemAImpl_rcr_intel,
    1032     &g_iemAImpl_rcr_amd,
    1033     &g_iemAImpl_rcr,
    1034 };
    1035 
    1036 
    1037 /** Function table for the SHL instruction. */
    1038 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
    1039 {
    1040     iemAImpl_shl_u8,
    1041     iemAImpl_shl_u16,
    1042     iemAImpl_shl_u32,
    1043     iemAImpl_shl_u64
    1044 };
    1045 
    1046 /** Function table for the SHL instruction, AMD EFLAGS variant. */
    1047 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_amd =
    1048 {
    1049     iemAImpl_shl_u8_amd,
    1050     iemAImpl_shl_u16_amd,
    1051     iemAImpl_shl_u32_amd,
    1052     iemAImpl_shl_u64_amd
    1053 };
    1054 
    1055 /** Function table for the SHL instruction, Intel EFLAGS variant. */
    1056 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_intel =
    1057 {
    1058     iemAImpl_shl_u8_intel,
    1059     iemAImpl_shl_u16_intel,
    1060     iemAImpl_shl_u32_intel,
    1061     iemAImpl_shl_u64_intel
    1062 };
    1063 
    1064 /** EFLAGS variation selection table for the SHL instruction. */
    1065 IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shl_eflags[] =
    1066 {
    1067     &g_iemAImpl_shl,
    1068     &g_iemAImpl_shl_intel,
    1069     &g_iemAImpl_shl_amd,
    1070     &g_iemAImpl_shl,
    1071 };
    1072 
    1073 
    1074 /** Function table for the SHR instruction. */
    1075 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
    1076 {
    1077     iemAImpl_shr_u8,
    1078     iemAImpl_shr_u16,
    1079     iemAImpl_shr_u32,
    1080     iemAImpl_shr_u64
    1081 };
    1082 
    1083 /** Function table for the SHR instruction, AMD EFLAGS variant. */
    1084 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_amd =
    1085 {
    1086     iemAImpl_shr_u8_amd,
    1087     iemAImpl_shr_u16_amd,
    1088     iemAImpl_shr_u32_amd,
    1089     iemAImpl_shr_u64_amd
    1090 };
    1091 
    1092 /** Function table for the SHR instruction, Intel EFLAGS variant. */
    1093 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_intel =
    1094 {
    1095     iemAImpl_shr_u8_intel,
    1096     iemAImpl_shr_u16_intel,
    1097     iemAImpl_shr_u32_intel,
    1098     iemAImpl_shr_u64_intel
    1099 };
    1100 
    1101 /** EFLAGS variation selection table for the SHR instruction. */
    1102 IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shr_eflags[] =
    1103 {
    1104     &g_iemAImpl_shr,
    1105     &g_iemAImpl_shr_intel,
    1106     &g_iemAImpl_shr_amd,
    1107     &g_iemAImpl_shr,
    1108 };
    1109 
    1110 
    1111 /** Function table for the SAR instruction. */
    1112 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
    1113 {
    1114     iemAImpl_sar_u8,
    1115     iemAImpl_sar_u16,
    1116     iemAImpl_sar_u32,
    1117     iemAImpl_sar_u64
    1118 };
    1119 
    1120 /** Function table for the SAR instruction, AMD EFLAGS variant. */
    1121 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_amd =
    1122 {
    1123     iemAImpl_sar_u8_amd,
    1124     iemAImpl_sar_u16_amd,
    1125     iemAImpl_sar_u32_amd,
    1126     iemAImpl_sar_u64_amd
    1127 };
    1128 
    1129 /** Function table for the SAR instruction, Intel EFLAGS variant. */
    1130 IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_intel =
    1131 {
    1132     iemAImpl_sar_u8_intel,
    1133     iemAImpl_sar_u16_intel,
    1134     iemAImpl_sar_u32_intel,
    1135     iemAImpl_sar_u64_intel
    1136 };
    1137 
    1138 /** EFLAGS variation selection table for the SAR instruction. */
    1139 IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_sar_eflags[] =
    1140 {
    1141     &g_iemAImpl_sar,
    1142     &g_iemAImpl_sar_intel,
    1143     &g_iemAImpl_sar_amd,
    1144     &g_iemAImpl_sar,
    1145 };
    1146 
    1147 
    1148 /** Function table for the MUL instruction. */
    1149 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
    1150 {
    1151     iemAImpl_mul_u8,
    1152     iemAImpl_mul_u16,
    1153     iemAImpl_mul_u32,
    1154     iemAImpl_mul_u64
    1155 };
    1156 
    1157 /** Function table for the MUL instruction, AMD EFLAGS variation. */
    1158 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_amd =
    1159 {
    1160     iemAImpl_mul_u8_amd,
    1161     iemAImpl_mul_u16_amd,
    1162     iemAImpl_mul_u32_amd,
    1163     iemAImpl_mul_u64_amd
    1164 };
    1165 
    1166 /** Function table for the MUL instruction, Intel EFLAGS variation. */
    1167 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_intel =
    1168 {
    1169     iemAImpl_mul_u8_intel,
    1170     iemAImpl_mul_u16_intel,
    1171     iemAImpl_mul_u32_intel,
    1172     iemAImpl_mul_u64_intel
    1173 };
    1174 
    1175 /** EFLAGS variation selection table for the MUL instruction. */
    1176 IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_mul_eflags[] =
    1177 {
    1178     &g_iemAImpl_mul,
    1179     &g_iemAImpl_mul_intel,
    1180     &g_iemAImpl_mul_amd,
    1181     &g_iemAImpl_mul,
    1182 };
    1183 
    1184 /** EFLAGS variation selection table for the 8-bit MUL instruction. */
    1185 IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_mul_u8_eflags[] =
    1186 {
    1187     iemAImpl_mul_u8,
    1188     iemAImpl_mul_u8_intel,
    1189     iemAImpl_mul_u8_amd,
    1190     iemAImpl_mul_u8
    1191 };
    1192 
    1193 
    1194 /** Function table for the IMUL instruction working implicitly on rAX. */
    1195 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
    1196 {
    1197     iemAImpl_imul_u8,
    1198     iemAImpl_imul_u16,
    1199     iemAImpl_imul_u32,
    1200     iemAImpl_imul_u64
    1201 };
    1202 
    1203 /** Function table for the IMUL instruction working implicitly on rAX, AMD EFLAGS variation. */
    1204 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_amd =
    1205 {
    1206     iemAImpl_imul_u8_amd,
    1207     iemAImpl_imul_u16_amd,
    1208     iemAImpl_imul_u32_amd,
    1209     iemAImpl_imul_u64_amd
    1210 };
    1211 
    1212 /** Function table for the IMUL instruction working implicitly on rAX, Intel EFLAGS variation. */
    1213 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_intel =
    1214 {
    1215     iemAImpl_imul_u8_intel,
    1216     iemAImpl_imul_u16_intel,
    1217     iemAImpl_imul_u32_intel,
    1218     iemAImpl_imul_u64_intel
    1219 };
    1220 
    1221 /** EFLAGS variation selection table for the IMUL instruction. */
    1222 IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_imul_eflags[] =
    1223 {
    1224     &g_iemAImpl_imul,
    1225     &g_iemAImpl_imul_intel,
    1226     &g_iemAImpl_imul_amd,
    1227     &g_iemAImpl_imul,
    1228 };
    1229 
    1230 /** EFLAGS variation selection table for the 8-bit IMUL instruction. */
    1231 IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_imul_u8_eflags[] =
    1232 {
    1233     iemAImpl_imul_u8,
    1234     iemAImpl_imul_u8_intel,
    1235     iemAImpl_imul_u8_amd,
    1236     iemAImpl_imul_u8
    1237 };
    1238 
    1239 
    1240 /** Function table for the DIV instruction. */
    1241 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
    1242 {
    1243     iemAImpl_div_u8,
    1244     iemAImpl_div_u16,
    1245     iemAImpl_div_u32,
    1246     iemAImpl_div_u64
    1247 };
    1248 
    1249 /** Function table for the DIV instruction, AMD EFLAGS variation. */
    1250 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_amd =
    1251 {
    1252     iemAImpl_div_u8_amd,
    1253     iemAImpl_div_u16_amd,
    1254     iemAImpl_div_u32_amd,
    1255     iemAImpl_div_u64_amd
    1256 };
    1257 
    1258 /** Function table for the DIV instruction, Intel EFLAGS variation. */
    1259 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_intel =
    1260 {
    1261     iemAImpl_div_u8_intel,
    1262     iemAImpl_div_u16_intel,
    1263     iemAImpl_div_u32_intel,
    1264     iemAImpl_div_u64_intel
    1265 };
    1266 
    1267 /** EFLAGS variation selection table for the DIV instruction. */
    1268 IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_div_eflags[] =
    1269 {
    1270     &g_iemAImpl_div,
    1271     &g_iemAImpl_div_intel,
    1272     &g_iemAImpl_div_amd,
    1273     &g_iemAImpl_div,
    1274 };
    1275 
    1276 /** EFLAGS variation selection table for the 8-bit DIV instruction. */
    1277 IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_div_u8_eflags[] =
    1278 {
    1279     iemAImpl_div_u8,
    1280     iemAImpl_div_u8_intel,
    1281     iemAImpl_div_u8_amd,
    1282     iemAImpl_div_u8
    1283 };
    1284 
    1285 
    1286 /** Function table for the IDIV instruction. */
    1287 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
    1288 {
    1289     iemAImpl_idiv_u8,
    1290     iemAImpl_idiv_u16,
    1291     iemAImpl_idiv_u32,
    1292     iemAImpl_idiv_u64
    1293 };
    1294 
    1295 /** Function table for the IDIV instruction, AMD EFLAGS variation. */
    1296 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_amd =
    1297 {
    1298     iemAImpl_idiv_u8_amd,
    1299     iemAImpl_idiv_u16_amd,
    1300     iemAImpl_idiv_u32_amd,
    1301     iemAImpl_idiv_u64_amd
    1302 };
    1303 
    1304 /** Function table for the IDIV instruction, Intel EFLAGS variation. */
    1305 IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_intel =
    1306 {
    1307     iemAImpl_idiv_u8_intel,
    1308     iemAImpl_idiv_u16_intel,
    1309     iemAImpl_idiv_u32_intel,
    1310     iemAImpl_idiv_u64_intel
    1311 };
    1312 
    1313 /** EFLAGS variation selection table for the IDIV instruction. */
    1314 IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_idiv_eflags[] =
    1315 {
    1316     &g_iemAImpl_idiv,
    1317     &g_iemAImpl_idiv_intel,
    1318     &g_iemAImpl_idiv_amd,
    1319     &g_iemAImpl_idiv,
    1320 };
    1321 
    1322 /** EFLAGS variation selection table for the 8-bit IDIV instruction. */
    1323 IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_idiv_u8_eflags[] =
    1324 {
    1325     iemAImpl_idiv_u8,
    1326     iemAImpl_idiv_u8_intel,
    1327     iemAImpl_idiv_u8_amd,
    1328     iemAImpl_idiv_u8
    1329 };
    1330 
    1331 
    1332 /** Function table for the SHLD instruction. */
    1333 IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
    1334 {
    1335     iemAImpl_shld_u16,
    1336     iemAImpl_shld_u32,
    1337     iemAImpl_shld_u64,
    1338 };
    1339 
    1340 /** Function table for the SHLD instruction, AMD EFLAGS variation. */
    1341 IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_amd =
    1342 {
    1343     iemAImpl_shld_u16_amd,
    1344     iemAImpl_shld_u32_amd,
    1345     iemAImpl_shld_u64_amd
    1346 };
    1347 
    1348 /** Function table for the SHLD instruction, Intel EFLAGS variation. */
    1349 IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_intel =
    1350 {
    1351     iemAImpl_shld_u16_intel,
    1352     iemAImpl_shld_u32_intel,
    1353     iemAImpl_shld_u64_intel
    1354 };
    1355 
    1356 /** EFLAGS variation selection table for the SHLD instruction. */
    1357 IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shld_eflags[] =
    1358 {
    1359     &g_iemAImpl_shld,
    1360     &g_iemAImpl_shld_intel,
    1361     &g_iemAImpl_shld_amd,
    1362     &g_iemAImpl_shld
    1363 };
    1364 
    1365 /** Function table for the SHRD instruction. */
    1366 IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
    1367 {
    1368     iemAImpl_shrd_u16,
    1369     iemAImpl_shrd_u32,
    1370     iemAImpl_shrd_u64
    1371 };
    1372 
    1373 /** Function table for the SHRD instruction, AMD EFLAGS variation. */
    1374 IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_amd =
    1375 {
    1376     iemAImpl_shrd_u16_amd,
    1377     iemAImpl_shrd_u32_amd,
    1378     iemAImpl_shrd_u64_amd
    1379 };
    1380 
    1381 /** Function table for the SHRD instruction, Intel EFLAGS variation. */
    1382 IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_intel =
    1383 {
    1384     iemAImpl_shrd_u16_intel,
    1385     iemAImpl_shrd_u32_intel,
    1386     iemAImpl_shrd_u64_intel
    1387 };
    1388 
    1389 /** EFLAGS variation selection table for the SHRD instruction. */
    1390 IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shrd_eflags[] =
    1391 {
    1392     &g_iemAImpl_shrd,
    1393     &g_iemAImpl_shrd_intel,
    1394     &g_iemAImpl_shrd_amd,
    1395     &g_iemAImpl_shrd
    1396 };
    1397 
    1398 
    1399 /** Function table for the PUNPCKLBW instruction */
    1400 IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw  = { iemAImpl_punpcklbw_u64,  iemAImpl_punpcklbw_u128 };
    1401 /** Function table for the PUNPCKLBD instruction */
    1402 IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd  = { iemAImpl_punpcklwd_u64,  iemAImpl_punpcklwd_u128 };
    1403 /** Function table for the PUNPCKLDQ instruction */
    1404 IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq  = { iemAImpl_punpckldq_u64,  iemAImpl_punpckldq_u128 };
    1405 /** Function table for the PUNPCKLQDQ instruction */
    1406 IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
    1407 
    1408 /** Function table for the PUNPCKHBW instruction */
    1409 IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw  = { iemAImpl_punpckhbw_u64,  iemAImpl_punpckhbw_u128 };
    1410 /** Function table for the PUNPCKHBD instruction */
    1411 IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd  = { iemAImpl_punpckhwd_u64,  iemAImpl_punpckhwd_u128 };
    1412 /** Function table for the PUNPCKHDQ instruction */
    1413 IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq  = { iemAImpl_punpckhdq_u64,  iemAImpl_punpckhdq_u128 };
    1414 /** Function table for the PUNPCKHQDQ instruction */
    1415 IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
    1416 
    1417 /** Function table for the PXOR instruction */
    1418 IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor         = { iemAImpl_pxor_u64,       iemAImpl_pxor_u128 };
    1419 /** Function table for the PCMPEQB instruction */
    1420 IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb      = { iemAImpl_pcmpeqb_u64,    iemAImpl_pcmpeqb_u128 };
    1421 /** Function table for the PCMPEQW instruction */
    1422 IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw      = { iemAImpl_pcmpeqw_u64,    iemAImpl_pcmpeqw_u128 };
    1423 /** Function table for the PCMPEQD instruction */
    1424 IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd      = { iemAImpl_pcmpeqd_u64,    iemAImpl_pcmpeqd_u128 };
    1425 
    1426 
    1427 #if defined(IEM_LOG_MEMORY_WRITES)
    1428 /** What IEM just wrote. */
    1429 uint8_t g_abIemWrote[256];
    1430 /** How much IEM just wrote. */
    1431 size_t g_cbIemWrote;
    1432 #endif
    1433 
    1434 
    1435 /*********************************************************************************************************************************
    1436 *   Internal Functions                                                                                                           *
    1437 *********************************************************************************************************************************/
    1438 IEM_STATIC VBOXSTRICTRC     iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
    1439 IEM_STATIC VBOXSTRICTRC     iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
    1440 IEM_STATIC VBOXSTRICTRC     iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
    1441 IEM_STATIC VBOXSTRICTRC     iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
    1442 /*IEM_STATIC VBOXSTRICTRC     iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
    1443 IEM_STATIC VBOXSTRICTRC     iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
    1444 IEM_STATIC VBOXSTRICTRC     iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
    1445 IEM_STATIC VBOXSTRICTRC     iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
    1446 IEM_STATIC VBOXSTRICTRC     iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
    1447 IEM_STATIC VBOXSTRICTRC     iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
    1448 IEM_STATIC VBOXSTRICTRC     iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
    1449 IEM_STATIC VBOXSTRICTRC     iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
    1450 IEM_STATIC VBOXSTRICTRC     iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
    1451 IEM_STATIC VBOXSTRICTRC     iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
    1452 IEM_STATIC VBOXSTRICTRC     iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
    1453 IEM_STATIC VBOXSTRICTRC     iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
    1454 IEM_STATIC VBOXSTRICTRC     iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
    1455 #ifdef IEM_WITH_SETJMP
    1456 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
    1457 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
    1458 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
    1459 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
    1460 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
    1461 #endif
    1462 
    1463 IEM_STATIC VBOXSTRICTRC     iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
    1464 IEM_STATIC VBOXSTRICTRC     iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
    1465 IEM_STATIC VBOXSTRICTRC     iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
    1466 IEM_STATIC VBOXSTRICTRC     iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
    1467 IEM_STATIC VBOXSTRICTRC     iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
    1468 IEM_STATIC VBOXSTRICTRC     iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
    1469 IEM_STATIC VBOXSTRICTRC     iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
    1470 IEM_STATIC VBOXSTRICTRC     iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
    1471 IEM_STATIC VBOXSTRICTRC     iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
    1472 IEM_STATIC VBOXSTRICTRC     iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
    1473 IEM_STATIC VBOXSTRICTRC     iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
    1474 IEM_STATIC VBOXSTRICTRC     iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
    1475 IEM_STATIC VBOXSTRICTRC     iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
    1476 IEM_STATIC VBOXSTRICTRC     iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
    1477 IEM_STATIC VBOXSTRICTRC     iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
    1478 IEM_STATIC VBOXSTRICTRC     iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
    1479 DECLINLINE(uint16_t)        iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
    1480 DECLINLINE(uint64_t)        iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
    1481 
    1482 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1483 IEM_STATIC VBOXSTRICTRC     iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
    1484 IEM_STATIC VBOXSTRICTRC     iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
    1485 IEM_STATIC VBOXSTRICTRC     iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
    1486 IEM_STATIC VBOXSTRICTRC     iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
    1487 IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
    1488 IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
    1489 IEM_STATIC VBOXSTRICTRC     iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
    1490 IEM_STATIC VBOXSTRICTRC     iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr);
    1491 #endif
    1492 
    1493 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    1494 IEM_STATIC VBOXSTRICTRC     iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
    1495 IEM_STATIC VBOXSTRICTRC     iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
    1496 #endif
    1497 
    1498 
    1499 /**
    1500  * Sets the pass up status.
    1501  *
    1502  * @returns VINF_SUCCESS.
    1503  * @param   pVCpu               The cross context virtual CPU structure of the
    1504  *                              calling thread.
    1505  * @param   rcPassUp            The pass up status.  Must be informational.
    1506  *                              VINF_SUCCESS is not allowed.
    1507  */
    1508 IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
    1509 {
    1510     AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
    1511 
    1512     int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
    1513     if (rcOldPassUp == VINF_SUCCESS)
    1514         pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
    1515     /* If both are EM scheduling codes, use EM priority rules. */
    1516     else if (   rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
    1517              && rcPassUp    >= VINF_EM_FIRST && rcPassUp    <= VINF_EM_LAST)
    1518     {
    1519         if (rcPassUp < rcOldPassUp)
    1520         {
    1521             Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
    1522             pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
    1523         }
    1524         else
    1525             Log(("IEM: rcPassUp=%Rrc  rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
    1526     }
    1527     /* Override EM scheduling with specific status code. */
    1528     else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
    1529     {
    1530         Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
    1531         pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
    1532     }
    1533     /* Don't override specific status code, first come first served. */
    1534     else
    1535         Log(("IEM: rcPassUp=%Rrc  rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
    1536     return VINF_SUCCESS;
    1537 }
    1538 
    1539 
    1540 /**
    1541  * Calculates the CPU mode.
    1542  *
    1543  * This is mainly for updating IEMCPU::enmCpuMode.
    1544  *
    1545  * @returns CPU mode.
    1546  * @param   pVCpu               The cross context virtual CPU structure of the
    1547  *                              calling thread.
    1548  */
    1549 DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
    1550 {
    1551     if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
    1552         return IEMMODE_64BIT;
    1553     if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
    1554         return IEMMODE_32BIT;
    1555     return IEMMODE_16BIT;
    1556 }
    1557 
    1558 
    1559 /**
    1560  * Initializes the execution state.
    1561  *
    1562  * @param   pVCpu               The cross context virtual CPU structure of the
    1563  *                              calling thread.
    1564  * @param   fBypassHandlers     Whether to bypass access handlers.
    1565  *
    1566  * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
    1567  *          side-effects in strict builds.
    1568  */
    1569 DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
    1570 {
    1571     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
    1572     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    1573     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1574     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1575     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    1576     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    1577     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    1578     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    1579     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    1580     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    1581 
    1582     pVCpu->iem.s.uCpl               = CPUMGetGuestCPL(pVCpu);
    1583     pVCpu->iem.s.enmCpuMode         = iemCalcCpuMode(pVCpu);
    1584 #ifdef VBOX_STRICT
    1585     pVCpu->iem.s.enmDefAddrMode     = (IEMMODE)0xfe;
    1586     pVCpu->iem.s.enmEffAddrMode     = (IEMMODE)0xfe;
    1587     pVCpu->iem.s.enmDefOpSize       = (IEMMODE)0xfe;
    1588     pVCpu->iem.s.enmEffOpSize       = (IEMMODE)0xfe;
    1589     pVCpu->iem.s.fPrefixes          = 0xfeedbeef;
    1590     pVCpu->iem.s.uRexReg            = 127;
    1591     pVCpu->iem.s.uRexB              = 127;
    1592     pVCpu->iem.s.offModRm           = 127;
    1593     pVCpu->iem.s.uRexIndex          = 127;
    1594     pVCpu->iem.s.iEffSeg            = 127;
    1595     pVCpu->iem.s.idxPrefix          = 127;
    1596     pVCpu->iem.s.uVex3rdReg         = 127;
    1597     pVCpu->iem.s.uVexLength         = 127;
    1598     pVCpu->iem.s.fEvexStuff         = 127;
    1599     pVCpu->iem.s.uFpuOpcode         = UINT16_MAX;
    1600 # ifdef IEM_WITH_CODE_TLB
    1601     pVCpu->iem.s.offInstrNextByte   = UINT16_MAX;
    1602     pVCpu->iem.s.pbInstrBuf         = NULL;
    1603     pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
    1604     pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
    1605     pVCpu->iem.s.offCurInstrStart   = INT16_MAX;
    1606     pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
    1607 # else
    1608     pVCpu->iem.s.offOpcode          = 127;
    1609     pVCpu->iem.s.cbOpcode           = 127;
    1610 # endif
    1611 #endif
    1612 
    1613     pVCpu->iem.s.cActiveMappings    = 0;
    1614     pVCpu->iem.s.iNextMapping       = 0;
    1615     pVCpu->iem.s.rcPassUp           = VINF_SUCCESS;
    1616     pVCpu->iem.s.fBypassHandlers    = fBypassHandlers;
    1617 #if 0
    1618 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1619     if (    CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
    1620         &&  CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
    1621     {
    1622         PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1623         Assert(pVmcs);
    1624         RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
    1625         if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
    1626         {
    1627            int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
    1628                                                pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
    1629                                                NIL_RTR0PTR /* pvUserR0 */,  NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
    1630            AssertRC(rc);
    1631         }
    1632     }
    1633 #endif
    1634 #endif
    1635 }
    1636 
    1637 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    1638 /**
    1639  * Performs a minimal reinitialization of the execution state.
    1640  *
    1641  * This is intended to be used by VM-exits, SMM, LOADALL and other similar
    1642  * 'world-switch' types operations on the CPU. Currently only nested
    1643  * hardware-virtualization uses it.
    1644  *
    1645  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    1646  */
    1647 IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
    1648 {
    1649     IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
    1650     uint8_t const uCpl    = CPUMGetGuestCPL(pVCpu);
    1651 
    1652     pVCpu->iem.s.uCpl             = uCpl;
    1653     pVCpu->iem.s.enmCpuMode       = enmMode;
    1654     pVCpu->iem.s.enmDefAddrMode   = enmMode;  /** @todo check if this is correct... */
    1655     pVCpu->iem.s.enmEffAddrMode   = enmMode;
    1656     if (enmMode != IEMMODE_64BIT)
    1657     {
    1658         pVCpu->iem.s.enmDefOpSize = enmMode;  /** @todo check if this is correct... */
    1659         pVCpu->iem.s.enmEffOpSize = enmMode;
    1660     }
    1661     else
    1662     {
    1663         pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
    1664         pVCpu->iem.s.enmEffOpSize = enmMode;
    1665     }
    1666     pVCpu->iem.s.iEffSeg          = X86_SREG_DS;
    1667 #ifndef IEM_WITH_CODE_TLB
    1668     /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
    1669     pVCpu->iem.s.offOpcode        = 0;
    1670     pVCpu->iem.s.cbOpcode         = 0;
    1671 #endif
    1672     pVCpu->iem.s.rcPassUp         = VINF_SUCCESS;
    1673 }
    1674 #endif
    1675 
    1676 /**
    1677  * Counterpart to #iemInitExec that undoes evil strict-build stuff.
    1678  *
    1679  * @param   pVCpu               The cross context virtual CPU structure of the
    1680  *                              calling thread.
    1681  */
    1682 DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
    1683 {
    1684     /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
    1685 #ifdef VBOX_STRICT
    1686 # ifdef IEM_WITH_CODE_TLB
    1687     NOREF(pVCpu);
    1688 # else
    1689     pVCpu->iem.s.cbOpcode = 0;
    1690 # endif
    1691 #else
    1692     NOREF(pVCpu);
    1693 #endif
    1694 }
    1695 
    1696 
    1697 /**
    1698  * Initializes the decoder state.
    1699  *
    1700  * iemReInitDecoder is mostly a copy of this function.
    1701  *
    1702  * @param   pVCpu               The cross context virtual CPU structure of the
    1703  *                              calling thread.
    1704  * @param   fBypassHandlers     Whether to bypass access handlers.
    1705  * @param   fDisregardLock      Whether to disregard the LOCK prefix.
    1706  */
    1707 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
    1708 {
    1709     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    1710     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    1711     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1712     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1713     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    1714     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    1715     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    1716     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    1717     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    1718     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    1719 
    1720     pVCpu->iem.s.uCpl               = CPUMGetGuestCPL(pVCpu);
    1721     IEMMODE enmMode = iemCalcCpuMode(pVCpu);
    1722     pVCpu->iem.s.enmCpuMode         = enmMode;
    1723     pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
    1724     pVCpu->iem.s.enmEffAddrMode     = enmMode;
    1725     if (enmMode != IEMMODE_64BIT)
    1726     {
    1727         pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
    1728         pVCpu->iem.s.enmEffOpSize   = enmMode;
    1729     }
    1730     else
    1731     {
    1732         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    1733         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    1734     }
    1735     pVCpu->iem.s.fPrefixes          = 0;
    1736     pVCpu->iem.s.uRexReg            = 0;
    1737     pVCpu->iem.s.uRexB              = 0;
    1738     pVCpu->iem.s.uRexIndex          = 0;
    1739     pVCpu->iem.s.idxPrefix          = 0;
    1740     pVCpu->iem.s.uVex3rdReg         = 0;
    1741     pVCpu->iem.s.uVexLength         = 0;
    1742     pVCpu->iem.s.fEvexStuff         = 0;
    1743     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    1744 #ifdef IEM_WITH_CODE_TLB
    1745     pVCpu->iem.s.pbInstrBuf         = NULL;
    1746     pVCpu->iem.s.offInstrNextByte   = 0;
    1747     pVCpu->iem.s.offCurInstrStart   = 0;
    1748 # ifdef VBOX_STRICT
    1749     pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
    1750     pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
    1751     pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
    1752 # endif
    1753 #else
    1754     pVCpu->iem.s.offOpcode          = 0;
    1755     pVCpu->iem.s.cbOpcode           = 0;
    1756 #endif
    1757     pVCpu->iem.s.offModRm           = 0;
    1758     pVCpu->iem.s.cActiveMappings    = 0;
    1759     pVCpu->iem.s.iNextMapping       = 0;
    1760     pVCpu->iem.s.rcPassUp           = VINF_SUCCESS;
    1761     pVCpu->iem.s.fBypassHandlers    = fBypassHandlers;
    1762     pVCpu->iem.s.fDisregardLock     = fDisregardLock;
    1763 
    1764 #ifdef DBGFTRACE_ENABLED
    1765     switch (enmMode)
    1766     {
    1767         case IEMMODE_64BIT:
    1768             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
    1769             break;
    1770         case IEMMODE_32BIT:
    1771             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    1772             break;
    1773         case IEMMODE_16BIT:
    1774             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    1775             break;
    1776     }
    1777 #endif
    1778 }
    1779 
    1780 
    1781 /**
    1782  * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
    1783  *
    1784  * This is mostly a copy of iemInitDecoder.
    1785  *
    1786  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    1787  */
    1788 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
    1789 {
    1790     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    1791     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1792     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1793     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    1794     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    1795     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    1796     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    1797     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    1798     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    1799 
    1800     pVCpu->iem.s.uCpl               = CPUMGetGuestCPL(pVCpu);   /** @todo this should be updated during execution! */
    1801     IEMMODE enmMode = iemCalcCpuMode(pVCpu);
    1802     pVCpu->iem.s.enmCpuMode         = enmMode;                  /** @todo this should be updated during execution! */
    1803     pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
    1804     pVCpu->iem.s.enmEffAddrMode     = enmMode;
    1805     if (enmMode != IEMMODE_64BIT)
    1806     {
    1807         pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
    1808         pVCpu->iem.s.enmEffOpSize   = enmMode;
    1809     }
    1810     else
    1811     {
    1812         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    1813         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    1814     }
    1815     pVCpu->iem.s.fPrefixes          = 0;
    1816     pVCpu->iem.s.uRexReg            = 0;
    1817     pVCpu->iem.s.uRexB              = 0;
    1818     pVCpu->iem.s.uRexIndex          = 0;
    1819     pVCpu->iem.s.idxPrefix          = 0;
    1820     pVCpu->iem.s.uVex3rdReg         = 0;
    1821     pVCpu->iem.s.uVexLength         = 0;
    1822     pVCpu->iem.s.fEvexStuff         = 0;
    1823     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    1824 #ifdef IEM_WITH_CODE_TLB
    1825     if (pVCpu->iem.s.pbInstrBuf)
    1826     {
    1827         uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
    1828                      - pVCpu->iem.s.uInstrBufPc;
    1829         if (off < pVCpu->iem.s.cbInstrBufTotal)
    1830         {
    1831             pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
    1832             pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
    1833             if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
    1834                 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
    1835             else
    1836                 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
    1837         }
    1838         else
    1839         {
    1840             pVCpu->iem.s.pbInstrBuf       = NULL;
    1841             pVCpu->iem.s.offInstrNextByte = 0;
    1842             pVCpu->iem.s.offCurInstrStart = 0;
    1843             pVCpu->iem.s.cbInstrBuf       = 0;
    1844             pVCpu->iem.s.cbInstrBufTotal  = 0;
    1845         }
    1846     }
    1847     else
    1848     {
    1849         pVCpu->iem.s.offInstrNextByte = 0;
    1850         pVCpu->iem.s.offCurInstrStart = 0;
    1851         pVCpu->iem.s.cbInstrBuf       = 0;
    1852         pVCpu->iem.s.cbInstrBufTotal  = 0;
    1853     }
    1854 #else
    1855     pVCpu->iem.s.cbOpcode           = 0;
    1856     pVCpu->iem.s.offOpcode          = 0;
    1857 #endif
    1858     pVCpu->iem.s.offModRm           = 0;
    1859     Assert(pVCpu->iem.s.cActiveMappings == 0);
    1860     pVCpu->iem.s.iNextMapping       = 0;
    1861     Assert(pVCpu->iem.s.rcPassUp   == VINF_SUCCESS);
    1862     Assert(pVCpu->iem.s.fBypassHandlers == false);
    1863 
    1864 #ifdef DBGFTRACE_ENABLED
    1865     switch (enmMode)
    1866     {
    1867         case IEMMODE_64BIT:
    1868             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
    1869             break;
    1870         case IEMMODE_32BIT:
    1871             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    1872             break;
    1873         case IEMMODE_16BIT:
    1874             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    1875             break;
    1876     }
    1877 #endif
    1878 }
    1879 
    1880 
    1881 
    1882 /**
    1883  * Prefetch opcodes the first time when starting executing.
    1884  *
    1885  * @returns Strict VBox status code.
    1886  * @param   pVCpu               The cross context virtual CPU structure of the
    1887  *                              calling thread.
    1888  * @param   fBypassHandlers     Whether to bypass access handlers.
    1889  * @param   fDisregardLock      Whether to disregard LOCK prefixes.
    1890  *
    1891  * @todo    Combine fDisregardLock and fBypassHandlers into a flag parameter and
    1892  *          store them as such.
    1893  */
    1894 IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
    1895 {
    1896     iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
    1897 
    1898 #ifdef IEM_WITH_CODE_TLB
    1899     /** @todo Do ITLB lookup here. */
    1900 
    1901 #else /* !IEM_WITH_CODE_TLB */
    1902 
    1903     /*
    1904      * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
    1905      *
    1906      * First translate CS:rIP to a physical address.
    1907      */
    1908     uint32_t    cbToTryRead;
    1909     RTGCPTR     GCPtrPC;
    1910     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    1911     {
    1912         cbToTryRead = GUEST_PAGE_SIZE;
    1913         GCPtrPC     = pVCpu->cpum.GstCtx.rip;
    1914         if (IEM_IS_CANONICAL(GCPtrPC))
    1915             cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
    1916         else
    1917             return iemRaiseGeneralProtectionFault0(pVCpu);
    1918     }
    1919     else
    1920     {
    1921         uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
    1922         AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    1923         if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
    1924             cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
    1925         else
    1926             return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    1927         if (cbToTryRead) { /* likely */ }
    1928         else /* overflowed */
    1929         {
    1930             Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
    1931             cbToTryRead = UINT32_MAX;
    1932         }
    1933         GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
    1934         Assert(GCPtrPC <= UINT32_MAX);
    1935     }
    1936 
    1937     PGMPTWALK Walk;
    1938     int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
    1939     if (RT_SUCCESS(rc))
    1940         Assert(Walk.fSucceeded); /* probable. */
    1941     else
    1942     {
    1943         Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
    1944 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    1945         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    1946             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    1947 #endif
    1948         return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
    1949     }
    1950     if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
    1951     else
    1952     {
    1953         Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
    1954 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    1955         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    1956             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    1957 #endif
    1958         return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    1959     }
    1960     if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
    1961     else
    1962     {
    1963         Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
    1964 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    1965         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    1966             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    1967 #endif
    1968         return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    1969     }
    1970     RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
    1971     /** @todo Check reserved bits and such stuff. PGM is better at doing
    1972      *        that, so do it when implementing the guest virtual address
    1973      *        TLB... */
    1974 
    1975     /*
    1976      * Read the bytes at this address.
    1977      */
    1978     uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
    1979     if (cbToTryRead > cbLeftOnPage)
    1980         cbToTryRead = cbLeftOnPage;
    1981     if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
    1982         cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
    1983 
    1984     if (!pVCpu->iem.s.fBypassHandlers)
    1985     {
    1986         VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
    1987         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1988         { /* likely */ }
    1989         else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1990         {
    1991             Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    1992                  GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    1993             rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1994         }
    1995         else
    1996         {
    1997             Log((RT_SUCCESS(rcStrict)
    1998                  ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    1999                  : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    2000                  GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    2001             return rcStrict;
    2002         }
    2003     }
    2004     else
    2005     {
    2006         rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
    2007         if (RT_SUCCESS(rc))
    2008         { /* likely */ }
    2009         else
    2010         {
    2011             Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
    2012                  GCPtrPC, GCPhys, rc, cbToTryRead));
    2013             return rc;
    2014         }
    2015     }
    2016     pVCpu->iem.s.cbOpcode = cbToTryRead;
    2017 #endif /* !IEM_WITH_CODE_TLB */
    2018     return VINF_SUCCESS;
    2019 }
    2020 
    2021 
    2022 /**
    2023  * Invalidates the IEM TLBs.
    2024  *
    2025  * This is called internally as well as by PGM when moving GC mappings.
    2026  *
    2027  * @returns
    2028  * @param   pVCpu       The cross context virtual CPU structure of the calling
    2029  *                      thread.
    2030  * @param   fVmm        Set when PGM calls us with a remapping.
    2031  */
    2032 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
    2033 {
    2034 #ifdef IEM_WITH_CODE_TLB
    2035     pVCpu->iem.s.cbInstrBufTotal = 0;
    2036     pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
    2037     if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
    2038     { /* very likely */ }
    2039     else
    2040     {
    2041         pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
    2042         unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
    2043         while (i-- > 0)
    2044             pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
    2045     }
    2046 #endif
    2047 
    2048 #ifdef IEM_WITH_DATA_TLB
    2049     pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
    2050     if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
    2051     { /* very likely */ }
    2052     else
    2053     {
    2054         pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
    2055         unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
    2056         while (i-- > 0)
    2057             pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
    2058     }
    2059 #endif
    2060     NOREF(pVCpu); NOREF(fVmm);
    2061 }
    2062 
    2063 
    2064 /**
    2065  * Invalidates a page in the TLBs.
    2066  *
    2067  * @param   pVCpu       The cross context virtual CPU structure of the calling
    2068  *                      thread.
    2069  * @param   GCPtr       The address of the page to invalidate
    2070  */
    2071 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
    2072 {
    2073 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    2074     GCPtr = GCPtr >> X86_PAGE_SHIFT;
    2075     AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
    2076     AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
    2077     uintptr_t idx = (uint8_t)GCPtr;
    2078 
    2079 # ifdef IEM_WITH_CODE_TLB
    2080     if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
    2081     {
    2082         pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
    2083         if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
    2084             pVCpu->iem.s.cbInstrBufTotal = 0;
    2085     }
    2086 # endif
    2087 
    2088 # ifdef IEM_WITH_DATA_TLB
    2089     if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
    2090         pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
    2091 # endif
    2092 #else
    2093     NOREF(pVCpu); NOREF(GCPtr);
    2094 #endif
    2095 }
    2096 
    2097 
    2098 /**
    2099  * Invalidates the host physical aspects of the IEM TLBs.
    2100  *
    2101  * This is called internally as well as by PGM when moving GC mappings.
    2102  *
    2103  * @param   pVCpu       The cross context virtual CPU structure of the calling
    2104  *                      thread.
    2105  */
    2106 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
    2107 {
    2108 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    2109     /* Note! This probably won't end up looking exactly like this, but it give an idea... */
    2110 
    2111 # ifdef IEM_WITH_CODE_TLB
    2112     pVCpu->iem.s.cbInstrBufTotal = 0;
    2113 # endif
    2114     uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
    2115     if (uTlbPhysRev != 0)
    2116     {
    2117         pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
    2118         pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
    2119     }
    2120     else
    2121     {
    2122         pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
    2123         pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
    2124 
    2125         unsigned i;
    2126 # ifdef IEM_WITH_CODE_TLB
    2127         i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
    2128         while (i-- > 0)
    2129         {
    2130             pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3       = NULL;
    2131             pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
    2132         }
    2133 # endif
    2134 # ifdef IEM_WITH_DATA_TLB
    2135         i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
    2136         while (i-- > 0)
    2137         {
    2138             pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3       = NULL;
    2139             pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
    2140         }
    2141 # endif
    2142     }
    2143 #else
    2144     NOREF(pVCpu);
    2145 #endif
    2146 }
    2147 
    2148 
    2149 /**
    2150  * Invalidates the host physical aspects of the IEM TLBs.
    2151  *
    2152  * This is called internally as well as by PGM when moving GC mappings.
    2153  *
    2154  * @param   pVM         The cross context VM structure.
    2155  *
    2156  * @remarks Caller holds the PGM lock.
    2157  */
    2158 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
    2159 {
    2160     RT_NOREF_PV(pVM);
    2161 }
    2162 
    2163 #ifdef IEM_WITH_CODE_TLB
    2164 
    2165 /**
    2166  * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
    2167  * failure and jumps.
    2168  *
    2169  * We end up here for a number of reasons:
    2170  *      - pbInstrBuf isn't yet initialized.
    2171  *      - Advancing beyond the buffer boundrary (e.g. cross page).
    2172  *      - Advancing beyond the CS segment limit.
    2173  *      - Fetching from non-mappable page (e.g. MMIO).
    2174  *
    2175  * @param   pVCpu               The cross context virtual CPU structure of the
    2176  *                              calling thread.
    2177  * @param   pvDst               Where to return the bytes.
    2178  * @param   cbDst               Number of bytes to read.
    2179  *
    2180  * @todo    Make cbDst = 0 a way of initializing pbInstrBuf?
    2181  */
    2182 IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
    2183 {
    2184 #ifdef IN_RING3
    2185     for (;;)
    2186     {
    2187         Assert(cbDst <= 8);
    2188         uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
    2189 
    2190         /*
    2191          * We might have a partial buffer match, deal with that first to make the
    2192          * rest simpler.  This is the first part of the cross page/buffer case.
    2193          */
    2194         if (pVCpu->iem.s.pbInstrBuf != NULL)
    2195         {
    2196             if (offBuf < pVCpu->iem.s.cbInstrBuf)
    2197             {
    2198                 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
    2199                 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
    2200                 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
    2201 
    2202                 cbDst  -= cbCopy;
    2203                 pvDst   = (uint8_t *)pvDst + cbCopy;
    2204                 offBuf += cbCopy;
    2205                 pVCpu->iem.s.offInstrNextByte += offBuf;
    2206             }
    2207         }
    2208 
    2209         /*
    2210          * Check segment limit, figuring how much we're allowed to access at this point.
    2211          *
    2212          * We will fault immediately if RIP is past the segment limit / in non-canonical
    2213          * territory.  If we do continue, there are one or more bytes to read before we
    2214          * end up in trouble and we need to do that first before faulting.
    2215          */
    2216         RTGCPTR  GCPtrFirst;
    2217         uint32_t cbMaxRead;
    2218         if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    2219         {
    2220             GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
    2221             if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
    2222             { /* likely */ }
    2223             else
    2224                 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
    2225             cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
    2226         }
    2227         else
    2228         {
    2229             GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
    2230             Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
    2231             if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
    2232             { /* likely */ }
    2233             else
    2234                 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    2235             cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
    2236             if (cbMaxRead != 0)
    2237             { /* likely */ }
    2238             else
    2239             {
    2240                 /* Overflowed because address is 0 and limit is max. */
    2241                 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
    2242                 cbMaxRead = X86_PAGE_SIZE;
    2243             }
    2244             GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
    2245             uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
    2246             if (cbMaxRead2 < cbMaxRead)
    2247                 cbMaxRead = cbMaxRead2;
    2248             /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
    2249         }
    2250 
    2251         /*
    2252          * Get the TLB entry for this piece of code.
    2253          */
    2254         uint64_t     uTag  = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
    2255         AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
    2256         PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
    2257         if (pTlbe->uTag == uTag)
    2258         {
    2259             /* likely when executing lots of code, otherwise unlikely */
    2260 # ifdef VBOX_WITH_STATISTICS
    2261             pVCpu->iem.s.CodeTlb.cTlbHits++;
    2262 # endif
    2263         }
    2264         else
    2265         {
    2266             pVCpu->iem.s.CodeTlb.cTlbMisses++;
    2267             PGMPTWALK Walk;
    2268             int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
    2269             if (RT_FAILURE(rc))
    2270             {
    2271 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2272                 /** @todo Nested VMX: Need to handle EPT violation/misconfig here?  */
    2273                 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
    2274 #endif
    2275                 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
    2276                 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
    2277             }
    2278 
    2279             AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
    2280             Assert(Walk.fSucceeded);
    2281             pTlbe->uTag             = uTag;
    2282             pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
    2283                                     | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
    2284             pTlbe->GCPhys           = Walk.GCPhys;
    2285             pTlbe->pbMappingR3      = NULL;
    2286         }
    2287 
    2288         /*
    2289          * Check TLB page table level access flags.
    2290          */
    2291         if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
    2292         {
    2293             if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
    2294             {
    2295                 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
    2296                 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    2297             }
    2298             if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
    2299             {
    2300                 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
    2301                 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    2302             }
    2303         }
    2304 
    2305         /*
    2306          * Look up the physical page info if necessary.
    2307          */
    2308         if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
    2309         { /* not necessary */ }
    2310         else
    2311         {
    2312             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE     == IEMTLBE_F_PG_NO_WRITE);
    2313             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ      == IEMTLBE_F_PG_NO_READ);
    2314             AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
    2315             pTlbe->fFlagsAndPhysRev &= ~(  IEMTLBE_F_PHYS_REV
    2316                                          | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
    2317             int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
    2318                                                 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
    2319             AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
    2320         }
    2321 
    2322 # if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
    2323         /*
    2324          * Try do a direct read using the pbMappingR3 pointer.
    2325          */
    2326         if (    (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
    2327              == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
    2328         {
    2329             uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
    2330             pVCpu->iem.s.cbInstrBufTotal  = offPg + cbMaxRead;
    2331             if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
    2332             {
    2333                 pVCpu->iem.s.cbInstrBuf       = offPg + RT_MIN(15, cbMaxRead);
    2334                 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
    2335             }
    2336             else
    2337             {
    2338                 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
    2339                 Assert(cbInstr < cbMaxRead);
    2340                 pVCpu->iem.s.cbInstrBuf       = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
    2341                 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
    2342             }
    2343             if (cbDst <= cbMaxRead)
    2344             {
    2345                 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
    2346                 pVCpu->iem.s.uInstrBufPc      = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
    2347                 pVCpu->iem.s.pbInstrBuf       = pTlbe->pbMappingR3;
    2348                 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
    2349                 return;
    2350             }
    2351             pVCpu->iem.s.pbInstrBuf = NULL;
    2352 
    2353             memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
    2354             pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
    2355         }
    2356         else
    2357 # endif
    2358 #if 0
    2359         /*
    2360          * If there is no special read handling, so we can read a bit more and
    2361          * put it in the prefetch buffer.
    2362          */
    2363         if (   cbDst < cbMaxRead
    2364             && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
    2365         {
    2366             VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
    2367                                                 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
    2368             if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2369             { /* likely */ }
    2370             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2371             {
    2372                 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    2373                      GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    2374                 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2375                 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
    2376             }
    2377             else
    2378             {
    2379                 Log((RT_SUCCESS(rcStrict)
    2380                      ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    2381                      : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    2382                      GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    2383                 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    2384             }
    2385         }
    2386         /*
    2387          * Special read handling, so only read exactly what's needed.
    2388          * This is a highly unlikely scenario.
    2389          */
    2390         else
    2391 #endif
    2392         {
    2393             pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
    2394             uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
    2395             VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
    2396                                                 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
    2397             if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2398             { /* likely */ }
    2399             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2400             {
    2401                 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    2402                      GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
    2403                 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2404                 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
    2405             }
    2406             else
    2407             {
    2408                 Log((RT_SUCCESS(rcStrict)
    2409                      ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    2410                      : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    2411                      GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
    2412                 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    2413             }
    2414             pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
    2415             if (cbToRead == cbDst)
    2416                 return;
    2417         }
    2418 
    2419         /*
    2420          * More to read, loop.
    2421          */
    2422         cbDst -= cbMaxRead;
    2423         pvDst  = (uint8_t *)pvDst + cbMaxRead;
    2424     }
    2425 #else
    2426     RT_NOREF(pvDst, cbDst);
    2427     longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
    2428 #endif
    2429 }
    2430 
    2431 #else
    2432 
    2433 /**
    2434  * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
    2435  * exception if it fails.
    2436  *
    2437  * @returns Strict VBox status code.
    2438  * @param   pVCpu               The cross context virtual CPU structure of the
    2439  *                              calling thread.
    2440  * @param   cbMin               The minimum number of bytes relative offOpcode
    2441  *                              that must be read.
    2442  */
    2443 IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
    2444 {
    2445     /*
    2446      * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
    2447      *
    2448      * First translate CS:rIP to a physical address.
    2449      */
    2450     uint8_t     cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
    2451     uint32_t    cbToTryRead;
    2452     RTGCPTR     GCPtrNext;
    2453     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    2454     {
    2455         cbToTryRead = GUEST_PAGE_SIZE;
    2456         GCPtrNext   = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
    2457         if (!IEM_IS_CANONICAL(GCPtrNext))
    2458             return iemRaiseGeneralProtectionFault0(pVCpu);
    2459     }
    2460     else
    2461     {
    2462         uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
    2463         Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
    2464         GCPtrNext32 += pVCpu->iem.s.cbOpcode;
    2465         if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
    2466             return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    2467         cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
    2468         if (!cbToTryRead) /* overflowed */
    2469         {
    2470             Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
    2471             cbToTryRead = UINT32_MAX;
    2472             /** @todo check out wrapping around the code segment.  */
    2473         }
    2474         if (cbToTryRead < cbMin - cbLeft)
    2475             return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
    2476         GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
    2477     }
    2478 
    2479     /* Only read up to the end of the page, and make sure we don't read more
    2480        than the opcode buffer can hold. */
    2481     uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
    2482     if (cbToTryRead > cbLeftOnPage)
    2483         cbToTryRead = cbLeftOnPage;
    2484     if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
    2485         cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
    2486 /** @todo r=bird: Convert assertion into undefined opcode exception? */
    2487     Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
    2488 
    2489     PGMPTWALK Walk;
    2490     int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
    2491     if (RT_FAILURE(rc))
    2492     {
    2493         Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
    2494 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2495         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    2496             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    2497 #endif
    2498         return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
    2499     }
    2500     if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
    2501     {
    2502         Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
    2503 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2504         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    2505             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    2506 #endif
    2507         return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    2508     }
    2509     if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
    2510     {
    2511         Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
    2512 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    2513         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    2514             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    2515 #endif
    2516         return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
    2517     }
    2518     RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
    2519     Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n",  GCPtrNext,  GCPhys,  pVCpu->iem.s.cbOpcode));
    2520     /** @todo Check reserved bits and such stuff. PGM is better at doing
    2521      *        that, so do it when implementing the guest virtual address
    2522      *        TLB... */
    2523 
    2524     /*
    2525      * Read the bytes at this address.
    2526      *
    2527      * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
    2528      * and since PATM should only patch the start of an instruction there
    2529      * should be no need to check again here.
    2530      */
    2531     if (!pVCpu->iem.s.fBypassHandlers)
    2532     {
    2533         VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
    2534                                             cbToTryRead, PGMACCESSORIGIN_IEM);
    2535         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2536         { /* likely */ }
    2537         else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    2538         {
    2539             Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status -  rcStrict=%Rrc\n",
    2540                  GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    2541             rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    2542         }
    2543         else
    2544         {
    2545             Log((RT_SUCCESS(rcStrict)
    2546                  ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
    2547                  : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
    2548                  GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
    2549             return rcStrict;
    2550         }
    2551     }
    2552     else
    2553     {
    2554         rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
    2555         if (RT_SUCCESS(rc))
    2556         { /* likely */ }
    2557         else
    2558         {
    2559             Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
    2560             return rc;
    2561         }
    2562     }
    2563     pVCpu->iem.s.cbOpcode += cbToTryRead;
    2564     Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
    2565 
    2566     return VINF_SUCCESS;
    2567 }
    2568 
    2569 #endif /* !IEM_WITH_CODE_TLB */
    2570 #ifndef IEM_WITH_SETJMP
    2571 
    2572 /**
    2573  * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
    2574  *
    2575  * @returns Strict VBox status code.
    2576  * @param   pVCpu               The cross context virtual CPU structure of the
    2577  *                              calling thread.
    2578  * @param   pb                  Where to return the opcode byte.
    2579  */
    2580 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
    2581 {
    2582     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
    2583     if (rcStrict == VINF_SUCCESS)
    2584     {
    2585         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    2586         *pb = pVCpu->iem.s.abOpcode[offOpcode];
    2587         pVCpu->iem.s.offOpcode = offOpcode + 1;
    2588     }
    2589     else
    2590         *pb = 0;
    2591     return rcStrict;
    2592 }
    2593 
    2594 
    2595 /**
    2596  * Fetches the next opcode byte.
    2597  *
    2598  * @returns Strict VBox status code.
    2599  * @param   pVCpu               The cross context virtual CPU structure of the
    2600  *                              calling thread.
    2601  * @param   pu8                 Where to return the opcode byte.
    2602  */
    2603 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
    2604 {
    2605     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    2606     if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
    2607     {
    2608         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
    2609         *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
    2610         return VINF_SUCCESS;
    2611     }
    2612     return iemOpcodeGetNextU8Slow(pVCpu, pu8);
    2613 }
    2614 
    2615 #else  /* IEM_WITH_SETJMP */
    2616 
    2617 /**
    2618  * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
    2619  *
    2620  * @returns The opcode byte.
    2621  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2622  */
    2623 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
    2624 {
    2625 # ifdef IEM_WITH_CODE_TLB
    2626     uint8_t u8;
    2627     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
    2628     return u8;
    2629 # else
    2630     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
    2631     if (rcStrict == VINF_SUCCESS)
    2632         return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
    2633     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    2634 # endif
    2635 }
    2636 
    2637 
    2638 /**
    2639  * Fetches the next opcode byte, longjmp on error.
    2640  *
    2641  * @returns The opcode byte.
    2642  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2643  */
    2644 DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
    2645 {
    2646 # ifdef IEM_WITH_CODE_TLB
    2647     uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
    2648     uint8_t const  *pbBuf  = pVCpu->iem.s.pbInstrBuf;
    2649     if (RT_LIKELY(   pbBuf != NULL
    2650                   && offBuf < pVCpu->iem.s.cbInstrBuf))
    2651     {
    2652         pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
    2653         return pbBuf[offBuf];
    2654     }
    2655 # else
    2656     uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
    2657     if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
    2658     {
    2659         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
    2660         return pVCpu->iem.s.abOpcode[offOpcode];
    2661     }
    2662 # endif
    2663     return iemOpcodeGetNextU8SlowJmp(pVCpu);
    2664 }
    2665 
    2666 #endif /* IEM_WITH_SETJMP */
    2667 
    2668 /**
    2669  * Fetches the next opcode byte, returns automatically on failure.
    2670  *
    2671  * @param   a_pu8               Where to return the opcode byte.
    2672  * @remark Implicitly references pVCpu.
    2673  */
    2674 #ifndef IEM_WITH_SETJMP
    2675 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
    2676     do \
    2677     { \
    2678         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
    2679         if (rcStrict2 == VINF_SUCCESS) \
    2680         { /* likely */ } \
    2681         else \
    2682             return rcStrict2; \
    2683     } while (0)
    2684 #else
    2685 # define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
    2686 #endif /* IEM_WITH_SETJMP */
    2687 
    2688 
    2689 #ifndef IEM_WITH_SETJMP
    2690 /**
    2691  * Fetches the next signed byte from the opcode stream.
    2692  *
    2693  * @returns Strict VBox status code.
    2694  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2695  * @param   pi8                 Where to return the signed byte.
    2696  */
    2697 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
    2698 {
    2699     return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
    2700 }
    2701 #endif /* !IEM_WITH_SETJMP */
    2702 
    2703 
    2704 /**
    2705  * Fetches the next signed byte from the opcode stream, returning automatically
    2706  * on failure.
    2707  *
    2708  * @param   a_pi8               Where to return the signed byte.
    2709  * @remark Implicitly references pVCpu.
    2710  */
    2711 #ifndef IEM_WITH_SETJMP
    2712 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
    2713     do \
    2714     { \
    2715         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
    2716         if (rcStrict2 != VINF_SUCCESS) \
    2717             return rcStrict2; \
    2718     } while (0)
    2719 #else /* IEM_WITH_SETJMP */
    2720 # define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    2721 
    2722 #endif /* IEM_WITH_SETJMP */
    2723 
    2724 #ifndef IEM_WITH_SETJMP
    2725 
    2726 /**
    2727  * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
    2728  *
    2729  * @returns Strict VBox status code.
    2730  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2731  * @param   pu16                Where to return the opcode dword.
    2732  */
    2733 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
    2734 {
    2735     uint8_t      u8;
    2736     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    2737     if (rcStrict == VINF_SUCCESS)
    2738         *pu16 = (int8_t)u8;
    2739     return rcStrict;
    2740 }
    2741 
    2742 
    2743 /**
    2744  * Fetches the next signed byte from the opcode stream, extending it to
    2745  * unsigned 16-bit.
    2746  *
    2747  * @returns Strict VBox status code.
    2748  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2749  * @param   pu16                Where to return the unsigned word.
    2750  */
    2751 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
    2752 {
    2753     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    2754     if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
    2755         return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
    2756 
    2757     *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
    2758     pVCpu->iem.s.offOpcode = offOpcode + 1;
    2759     return VINF_SUCCESS;
    2760 }
    2761 
    2762 #endif /* !IEM_WITH_SETJMP */
    2763 
    2764 /**
    2765  * Fetches the next signed byte from the opcode stream and sign-extending it to
    2766  * a word, returning automatically on failure.
    2767  *
    2768  * @param   a_pu16              Where to return the word.
    2769  * @remark Implicitly references pVCpu.
    2770  */
    2771 #ifndef IEM_WITH_SETJMP
    2772 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
    2773     do \
    2774     { \
    2775         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
    2776         if (rcStrict2 != VINF_SUCCESS) \
    2777             return rcStrict2; \
    2778     } while (0)
    2779 #else
    2780 # define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    2781 #endif
    2782 
    2783 #ifndef IEM_WITH_SETJMP
    2784 
    2785 /**
    2786  * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
    2787  *
    2788  * @returns Strict VBox status code.
    2789  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2790  * @param   pu32                Where to return the opcode dword.
    2791  */
    2792 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
    2793 {
    2794     uint8_t      u8;
    2795     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    2796     if (rcStrict == VINF_SUCCESS)
    2797         *pu32 = (int8_t)u8;
    2798     return rcStrict;
    2799 }
    2800 
    2801 
    2802 /**
    2803  * Fetches the next signed byte from the opcode stream, extending it to
    2804  * unsigned 32-bit.
    2805  *
    2806  * @returns Strict VBox status code.
    2807  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2808  * @param   pu32                Where to return the unsigned dword.
    2809  */
    2810 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
    2811 {
    2812     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    2813     if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
    2814         return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
    2815 
    2816     *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
    2817     pVCpu->iem.s.offOpcode = offOpcode + 1;
    2818     return VINF_SUCCESS;
    2819 }
    2820 
    2821 #endif /* !IEM_WITH_SETJMP */
    2822 
    2823 /**
    2824  * Fetches the next signed byte from the opcode stream and sign-extending it to
    2825  * a word, returning automatically on failure.
    2826  *
    2827  * @param   a_pu32              Where to return the word.
    2828  * @remark Implicitly references pVCpu.
    2829  */
    2830 #ifndef IEM_WITH_SETJMP
    2831 #define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
    2832     do \
    2833     { \
    2834         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
    2835         if (rcStrict2 != VINF_SUCCESS) \
    2836             return rcStrict2; \
    2837     } while (0)
    2838 #else
    2839 # define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    2840 #endif
    2841 
    2842 #ifndef IEM_WITH_SETJMP
    2843 
    2844 /**
    2845  * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
    2846  *
    2847  * @returns Strict VBox status code.
    2848  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2849  * @param   pu64                Where to return the opcode qword.
    2850  */
    2851 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
    2852 {
    2853     uint8_t      u8;
    2854     VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
    2855     if (rcStrict == VINF_SUCCESS)
    2856         *pu64 = (int8_t)u8;
    2857     return rcStrict;
    2858 }
    2859 
    2860 
    2861 /**
    2862  * Fetches the next signed byte from the opcode stream, extending it to
    2863  * unsigned 64-bit.
    2864  *
    2865  * @returns Strict VBox status code.
    2866  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2867  * @param   pu64                Where to return the unsigned qword.
    2868  */
    2869 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
    2870 {
    2871     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    2872     if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
    2873         return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
    2874 
    2875     *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
    2876     pVCpu->iem.s.offOpcode = offOpcode + 1;
    2877     return VINF_SUCCESS;
    2878 }
    2879 
    2880 #endif /* !IEM_WITH_SETJMP */
    2881 
    2882 
    2883 /**
    2884  * Fetches the next signed byte from the opcode stream and sign-extending it to
    2885  * a word, returning automatically on failure.
    2886  *
    2887  * @param   a_pu64              Where to return the word.
    2888  * @remark Implicitly references pVCpu.
    2889  */
    2890 #ifndef IEM_WITH_SETJMP
    2891 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
    2892     do \
    2893     { \
    2894         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
    2895         if (rcStrict2 != VINF_SUCCESS) \
    2896             return rcStrict2; \
    2897     } while (0)
    2898 #else
    2899 # define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
    2900 #endif
    2901 
    2902 
    2903 #ifndef IEM_WITH_SETJMP
    2904 /**
    2905  * Fetches the next opcode byte.
    2906  *
    2907  * @returns Strict VBox status code.
    2908  * @param   pVCpu               The cross context virtual CPU structure of the
    2909  *                              calling thread.
    2910  * @param   pu8                 Where to return the opcode byte.
    2911  */
    2912 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
    2913 {
    2914     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    2915     pVCpu->iem.s.offModRm = offOpcode;
    2916     if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
    2917     {
    2918         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
    2919         *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
    2920         return VINF_SUCCESS;
    2921     }
    2922     return iemOpcodeGetNextU8Slow(pVCpu, pu8);
    2923 }
    2924 #else  /* IEM_WITH_SETJMP */
    2925 /**
    2926  * Fetches the next opcode byte, longjmp on error.
    2927  *
    2928  * @returns The opcode byte.
    2929  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2930  */
    2931 DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
    2932 {
    2933 # ifdef IEM_WITH_CODE_TLB
    2934     uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
    2935     pVCpu->iem.s.offModRm  = offBuf;
    2936     uint8_t const  *pbBuf  = pVCpu->iem.s.pbInstrBuf;
    2937     if (RT_LIKELY(   pbBuf != NULL
    2938                   && offBuf < pVCpu->iem.s.cbInstrBuf))
    2939     {
    2940         pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
    2941         return pbBuf[offBuf];
    2942     }
    2943 # else
    2944     uintptr_t offOpcode   = pVCpu->iem.s.offOpcode;
    2945     pVCpu->iem.s.offModRm = offOpcode;
    2946     if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
    2947     {
    2948         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
    2949         return pVCpu->iem.s.abOpcode[offOpcode];
    2950     }
    2951 # endif
    2952     return iemOpcodeGetNextU8SlowJmp(pVCpu);
    2953 }
    2954 #endif /* IEM_WITH_SETJMP */
    2955 
    2956 /**
    2957  * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
    2958  * on failure.
    2959  *
    2960  * Will note down the position of the ModR/M byte for VT-x exits.
    2961  *
    2962  * @param   a_pbRm              Where to return the RM opcode byte.
    2963  * @remark Implicitly references pVCpu.
    2964  */
    2965 #ifndef IEM_WITH_SETJMP
    2966 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
    2967     do \
    2968     { \
    2969         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
    2970         if (rcStrict2 == VINF_SUCCESS) \
    2971         { /* likely */ } \
    2972         else \
    2973             return rcStrict2; \
    2974     } while (0)
    2975 #else
    2976 # define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
    2977 #endif /* IEM_WITH_SETJMP */
    2978 
    2979 
    2980 #ifndef IEM_WITH_SETJMP
    2981 
    2982 /**
    2983  * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
    2984  *
    2985  * @returns Strict VBox status code.
    2986  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    2987  * @param   pu16                Where to return the opcode word.
    2988  */
    2989 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
    2990 {
    2991     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    2992     if (rcStrict == VINF_SUCCESS)
    2993     {
    2994         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    2995 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    2996         *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    2997 # else
    2998         *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    2999 # endif
    3000         pVCpu->iem.s.offOpcode = offOpcode + 2;
    3001     }
    3002     else
    3003         *pu16 = 0;
    3004     return rcStrict;
    3005 }
    3006 
    3007 
    3008 /**
    3009  * Fetches the next opcode word.
    3010  *
    3011  * @returns Strict VBox status code.
    3012  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3013  * @param   pu16                Where to return the opcode word.
    3014  */
    3015 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
    3016 {
    3017     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    3018     if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
    3019     {
    3020         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
    3021 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3022         *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3023 # else
    3024         *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    3025 # endif
    3026         return VINF_SUCCESS;
    3027     }
    3028     return iemOpcodeGetNextU16Slow(pVCpu, pu16);
    3029 }
    3030 
    3031 #else  /* IEM_WITH_SETJMP */
    3032 
    3033 /**
    3034  * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
    3035  *
    3036  * @returns The opcode word.
    3037  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3038  */
    3039 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
    3040 {
    3041 # ifdef IEM_WITH_CODE_TLB
    3042     uint16_t u16;
    3043     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
    3044     return u16;
    3045 # else
    3046     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    3047     if (rcStrict == VINF_SUCCESS)
    3048     {
    3049         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3050         pVCpu->iem.s.offOpcode += 2;
    3051 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3052         return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3053 #  else
    3054         return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    3055 #  endif
    3056     }
    3057     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    3058 # endif
    3059 }
    3060 
    3061 
    3062 /**
    3063  * Fetches the next opcode word, longjmp on error.
    3064  *
    3065  * @returns The opcode word.
    3066  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3067  */
    3068 DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
    3069 {
    3070 # ifdef IEM_WITH_CODE_TLB
    3071     uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
    3072     uint8_t const  *pbBuf  = pVCpu->iem.s.pbInstrBuf;
    3073     if (RT_LIKELY(   pbBuf != NULL
    3074                   && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
    3075     {
    3076         pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
    3077 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3078         return *(uint16_t const *)&pbBuf[offBuf];
    3079 #  else
    3080         return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
    3081 #  endif
    3082     }
    3083 # else
    3084     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    3085     if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
    3086     {
    3087         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
    3088 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3089         return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3090 #  else
    3091         return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    3092 #  endif
    3093     }
    3094 # endif
    3095     return iemOpcodeGetNextU16SlowJmp(pVCpu);
    3096 }
    3097 
    3098 #endif /* IEM_WITH_SETJMP */
    3099 
    3100 
    3101 /**
    3102  * Fetches the next opcode word, returns automatically on failure.
    3103  *
    3104  * @param   a_pu16              Where to return the opcode word.
    3105  * @remark Implicitly references pVCpu.
    3106  */
    3107 #ifndef IEM_WITH_SETJMP
    3108 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
    3109     do \
    3110     { \
    3111         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
    3112         if (rcStrict2 != VINF_SUCCESS) \
    3113             return rcStrict2; \
    3114     } while (0)
    3115 #else
    3116 # define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
    3117 #endif
    3118 
    3119 #ifndef IEM_WITH_SETJMP
    3120 
    3121 /**
    3122  * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
    3123  *
    3124  * @returns Strict VBox status code.
    3125  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3126  * @param   pu32                Where to return the opcode double word.
    3127  */
    3128 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
    3129 {
    3130     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    3131     if (rcStrict == VINF_SUCCESS)
    3132     {
    3133         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3134         *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    3135         pVCpu->iem.s.offOpcode = offOpcode + 2;
    3136     }
    3137     else
    3138         *pu32 = 0;
    3139     return rcStrict;
    3140 }
    3141 
    3142 
    3143 /**
    3144  * Fetches the next opcode word, zero extending it to a double word.
    3145  *
    3146  * @returns Strict VBox status code.
    3147  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3148  * @param   pu32                Where to return the opcode double word.
    3149  */
    3150 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
    3151 {
    3152     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    3153     if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
    3154         return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
    3155 
    3156     *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    3157     pVCpu->iem.s.offOpcode = offOpcode + 2;
    3158     return VINF_SUCCESS;
    3159 }
    3160 
    3161 #endif /* !IEM_WITH_SETJMP */
    3162 
    3163 
    3164 /**
    3165  * Fetches the next opcode word and zero extends it to a double word, returns
    3166  * automatically on failure.
    3167  *
    3168  * @param   a_pu32              Where to return the opcode double word.
    3169  * @remark Implicitly references pVCpu.
    3170  */
    3171 #ifndef IEM_WITH_SETJMP
    3172 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
    3173     do \
    3174     { \
    3175         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
    3176         if (rcStrict2 != VINF_SUCCESS) \
    3177             return rcStrict2; \
    3178     } while (0)
    3179 #else
    3180 # define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
    3181 #endif
    3182 
    3183 #ifndef IEM_WITH_SETJMP
    3184 
    3185 /**
    3186  * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
    3187  *
    3188  * @returns Strict VBox status code.
    3189  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3190  * @param   pu64                Where to return the opcode quad word.
    3191  */
    3192 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
    3193 {
    3194     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
    3195     if (rcStrict == VINF_SUCCESS)
    3196     {
    3197         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3198         *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    3199         pVCpu->iem.s.offOpcode = offOpcode + 2;
    3200     }
    3201     else
    3202         *pu64 = 0;
    3203     return rcStrict;
    3204 }
    3205 
    3206 
    3207 /**
    3208  * Fetches the next opcode word, zero extending it to a quad word.
    3209  *
    3210  * @returns Strict VBox status code.
    3211  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3212  * @param   pu64                Where to return the opcode quad word.
    3213  */
    3214 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
    3215 {
    3216     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    3217     if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
    3218         return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
    3219 
    3220     *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
    3221     pVCpu->iem.s.offOpcode = offOpcode + 2;
    3222     return VINF_SUCCESS;
    3223 }
    3224 
    3225 #endif /* !IEM_WITH_SETJMP */
    3226 
    3227 /**
    3228  * Fetches the next opcode word and zero extends it to a quad word, returns
    3229  * automatically on failure.
    3230  *
    3231  * @param   a_pu64              Where to return the opcode quad word.
    3232  * @remark Implicitly references pVCpu.
    3233  */
    3234 #ifndef IEM_WITH_SETJMP
    3235 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
    3236     do \
    3237     { \
    3238         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
    3239         if (rcStrict2 != VINF_SUCCESS) \
    3240             return rcStrict2; \
    3241     } while (0)
    3242 #else
    3243 # define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64)  (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
    3244 #endif
    3245 
    3246 
    3247 #ifndef IEM_WITH_SETJMP
    3248 /**
    3249  * Fetches the next signed word from the opcode stream.
    3250  *
    3251  * @returns Strict VBox status code.
    3252  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3253  * @param   pi16                Where to return the signed word.
    3254  */
    3255 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
    3256 {
    3257     return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
    3258 }
    3259 #endif /* !IEM_WITH_SETJMP */
    3260 
    3261 
    3262 /**
    3263  * Fetches the next signed word from the opcode stream, returning automatically
    3264  * on failure.
    3265  *
    3266  * @param   a_pi16              Where to return the signed word.
    3267  * @remark Implicitly references pVCpu.
    3268  */
    3269 #ifndef IEM_WITH_SETJMP
    3270 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
    3271     do \
    3272     { \
    3273         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
    3274         if (rcStrict2 != VINF_SUCCESS) \
    3275             return rcStrict2; \
    3276     } while (0)
    3277 #else
    3278 # define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
    3279 #endif
    3280 
    3281 #ifndef IEM_WITH_SETJMP
    3282 
    3283 /**
    3284  * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
    3285  *
    3286  * @returns Strict VBox status code.
    3287  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3288  * @param   pu32                Where to return the opcode dword.
    3289  */
    3290 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
    3291 {
    3292     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    3293     if (rcStrict == VINF_SUCCESS)
    3294     {
    3295         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3296 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3297         *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3298 # else
    3299         *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3300                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    3301                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    3302                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3303 # endif
    3304         pVCpu->iem.s.offOpcode = offOpcode + 4;
    3305     }
    3306     else
    3307         *pu32 = 0;
    3308     return rcStrict;
    3309 }
    3310 
    3311 
    3312 /**
    3313  * Fetches the next opcode dword.
    3314  *
    3315  * @returns Strict VBox status code.
    3316  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3317  * @param   pu32                Where to return the opcode double word.
    3318  */
    3319 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
    3320 {
    3321     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    3322     if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
    3323     {
    3324         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
    3325 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3326         *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3327 # else
    3328         *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3329                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    3330                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    3331                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3332 # endif
    3333         return VINF_SUCCESS;
    3334     }
    3335     return iemOpcodeGetNextU32Slow(pVCpu, pu32);
    3336 }
    3337 
    3338 #else  /* !IEM_WITH_SETJMP */
    3339 
    3340 /**
    3341  * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
    3342  *
    3343  * @returns The opcode dword.
    3344  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3345  */
    3346 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
    3347 {
    3348 # ifdef IEM_WITH_CODE_TLB
    3349     uint32_t u32;
    3350     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
    3351     return u32;
    3352 # else
    3353     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    3354     if (rcStrict == VINF_SUCCESS)
    3355     {
    3356         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3357         pVCpu->iem.s.offOpcode = offOpcode + 4;
    3358 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3359         return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3360 #  else
    3361         return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3362                                    pVCpu->iem.s.abOpcode[offOpcode + 1],
    3363                                    pVCpu->iem.s.abOpcode[offOpcode + 2],
    3364                                    pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3365 #  endif
    3366     }
    3367     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    3368 # endif
    3369 }
    3370 
    3371 
    3372 /**
    3373  * Fetches the next opcode dword, longjmp on error.
    3374  *
    3375  * @returns The opcode dword.
    3376  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3377  */
    3378 DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
    3379 {
    3380 # ifdef IEM_WITH_CODE_TLB
    3381     uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
    3382     uint8_t const  *pbBuf  = pVCpu->iem.s.pbInstrBuf;
    3383     if (RT_LIKELY(   pbBuf != NULL
    3384                   && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
    3385     {
    3386         pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
    3387 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3388         return *(uint32_t const *)&pbBuf[offBuf];
    3389 #  else
    3390         return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
    3391                                    pbBuf[offBuf + 1],
    3392                                    pbBuf[offBuf + 2],
    3393                                    pbBuf[offBuf + 3]);
    3394 #  endif
    3395     }
    3396 # else
    3397     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    3398     if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
    3399     {
    3400         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
    3401 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3402         return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3403 #  else
    3404         return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3405                                    pVCpu->iem.s.abOpcode[offOpcode + 1],
    3406                                    pVCpu->iem.s.abOpcode[offOpcode + 2],
    3407                                    pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3408 #  endif
    3409     }
    3410 # endif
    3411     return iemOpcodeGetNextU32SlowJmp(pVCpu);
    3412 }
    3413 
    3414 #endif /* !IEM_WITH_SETJMP */
    3415 
    3416 
    3417 /**
    3418  * Fetches the next opcode dword, returns automatically on failure.
    3419  *
    3420  * @param   a_pu32              Where to return the opcode dword.
    3421  * @remark Implicitly references pVCpu.
    3422  */
    3423 #ifndef IEM_WITH_SETJMP
    3424 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
    3425     do \
    3426     { \
    3427         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
    3428         if (rcStrict2 != VINF_SUCCESS) \
    3429             return rcStrict2; \
    3430     } while (0)
    3431 #else
    3432 # define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
    3433 #endif
    3434 
    3435 #ifndef IEM_WITH_SETJMP
    3436 
    3437 /**
    3438  * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
    3439  *
    3440  * @returns Strict VBox status code.
    3441  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3442  * @param   pu64                Where to return the opcode dword.
    3443  */
    3444 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
    3445 {
    3446     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    3447     if (rcStrict == VINF_SUCCESS)
    3448     {
    3449         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3450         *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3451                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    3452                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    3453                                     pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3454         pVCpu->iem.s.offOpcode = offOpcode + 4;
    3455     }
    3456     else
    3457         *pu64 = 0;
    3458     return rcStrict;
    3459 }
    3460 
    3461 
    3462 /**
    3463  * Fetches the next opcode dword, zero extending it to a quad word.
    3464  *
    3465  * @returns Strict VBox status code.
    3466  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3467  * @param   pu64                Where to return the opcode quad word.
    3468  */
    3469 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
    3470 {
    3471     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    3472     if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
    3473         return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
    3474 
    3475     *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3476                                 pVCpu->iem.s.abOpcode[offOpcode + 1],
    3477                                 pVCpu->iem.s.abOpcode[offOpcode + 2],
    3478                                 pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3479     pVCpu->iem.s.offOpcode = offOpcode + 4;
    3480     return VINF_SUCCESS;
    3481 }
    3482 
    3483 #endif /* !IEM_WITH_SETJMP */
    3484 
    3485 
    3486 /**
    3487  * Fetches the next opcode dword and zero extends it to a quad word, returns
    3488  * automatically on failure.
    3489  *
    3490  * @param   a_pu64              Where to return the opcode quad word.
    3491  * @remark Implicitly references pVCpu.
    3492  */
    3493 #ifndef IEM_WITH_SETJMP
    3494 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
    3495     do \
    3496     { \
    3497         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
    3498         if (rcStrict2 != VINF_SUCCESS) \
    3499             return rcStrict2; \
    3500     } while (0)
    3501 #else
    3502 # define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
    3503 #endif
    3504 
    3505 
    3506 #ifndef IEM_WITH_SETJMP
    3507 /**
    3508  * Fetches the next signed double word from the opcode stream.
    3509  *
    3510  * @returns Strict VBox status code.
    3511  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3512  * @param   pi32                Where to return the signed double word.
    3513  */
    3514 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
    3515 {
    3516     return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
    3517 }
    3518 #endif
    3519 
    3520 /**
    3521  * Fetches the next signed double word from the opcode stream, returning
    3522  * automatically on failure.
    3523  *
    3524  * @param   a_pi32              Where to return the signed double word.
    3525  * @remark Implicitly references pVCpu.
    3526  */
    3527 #ifndef IEM_WITH_SETJMP
    3528 # define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
    3529     do \
    3530     { \
    3531         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
    3532         if (rcStrict2 != VINF_SUCCESS) \
    3533             return rcStrict2; \
    3534     } while (0)
    3535 #else
    3536 # define IEM_OPCODE_GET_NEXT_S32(a_pi32)    (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
    3537 #endif
    3538 
    3539 #ifndef IEM_WITH_SETJMP
    3540 
    3541 /**
    3542  * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
    3543  *
    3544  * @returns Strict VBox status code.
    3545  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3546  * @param   pu64                Where to return the opcode qword.
    3547  */
    3548 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
    3549 {
    3550     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
    3551     if (rcStrict == VINF_SUCCESS)
    3552     {
    3553         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3554         *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3555                                              pVCpu->iem.s.abOpcode[offOpcode + 1],
    3556                                              pVCpu->iem.s.abOpcode[offOpcode + 2],
    3557                                              pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3558         pVCpu->iem.s.offOpcode = offOpcode + 4;
    3559     }
    3560     else
    3561         *pu64 = 0;
    3562     return rcStrict;
    3563 }
    3564 
    3565 
    3566 /**
    3567  * Fetches the next opcode dword, sign extending it into a quad word.
    3568  *
    3569  * @returns Strict VBox status code.
    3570  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3571  * @param   pu64                Where to return the opcode quad word.
    3572  */
    3573 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
    3574 {
    3575     uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
    3576     if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
    3577         return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
    3578 
    3579     int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3580                                       pVCpu->iem.s.abOpcode[offOpcode + 1],
    3581                                       pVCpu->iem.s.abOpcode[offOpcode + 2],
    3582                                       pVCpu->iem.s.abOpcode[offOpcode + 3]);
    3583     *pu64 = i32;
    3584     pVCpu->iem.s.offOpcode = offOpcode + 4;
    3585     return VINF_SUCCESS;
    3586 }
    3587 
    3588 #endif /* !IEM_WITH_SETJMP */
    3589 
    3590 
    3591 /**
    3592  * Fetches the next opcode double word and sign extends it to a quad word,
    3593  * returns automatically on failure.
    3594  *
    3595  * @param   a_pu64              Where to return the opcode quad word.
    3596  * @remark Implicitly references pVCpu.
    3597  */
    3598 #ifndef IEM_WITH_SETJMP
    3599 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
    3600     do \
    3601     { \
    3602         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
    3603         if (rcStrict2 != VINF_SUCCESS) \
    3604             return rcStrict2; \
    3605     } while (0)
    3606 #else
    3607 # define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
    3608 #endif
    3609 
    3610 #ifndef IEM_WITH_SETJMP
    3611 
    3612 /**
    3613  * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
    3614  *
    3615  * @returns Strict VBox status code.
    3616  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3617  * @param   pu64                Where to return the opcode qword.
    3618  */
    3619 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
    3620 {
    3621     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
    3622     if (rcStrict == VINF_SUCCESS)
    3623     {
    3624         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3625 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3626         *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3627 # else
    3628         *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3629                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    3630                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    3631                                     pVCpu->iem.s.abOpcode[offOpcode + 3],
    3632                                     pVCpu->iem.s.abOpcode[offOpcode + 4],
    3633                                     pVCpu->iem.s.abOpcode[offOpcode + 5],
    3634                                     pVCpu->iem.s.abOpcode[offOpcode + 6],
    3635                                     pVCpu->iem.s.abOpcode[offOpcode + 7]);
    3636 # endif
    3637         pVCpu->iem.s.offOpcode = offOpcode + 8;
    3638     }
    3639     else
    3640         *pu64 = 0;
    3641     return rcStrict;
    3642 }
    3643 
    3644 
    3645 /**
    3646  * Fetches the next opcode qword.
    3647  *
    3648  * @returns Strict VBox status code.
    3649  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3650  * @param   pu64                Where to return the opcode qword.
    3651  */
    3652 DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
    3653 {
    3654     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    3655     if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
    3656     {
    3657 # ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3658         *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3659 # else
    3660         *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3661                                     pVCpu->iem.s.abOpcode[offOpcode + 1],
    3662                                     pVCpu->iem.s.abOpcode[offOpcode + 2],
    3663                                     pVCpu->iem.s.abOpcode[offOpcode + 3],
    3664                                     pVCpu->iem.s.abOpcode[offOpcode + 4],
    3665                                     pVCpu->iem.s.abOpcode[offOpcode + 5],
    3666                                     pVCpu->iem.s.abOpcode[offOpcode + 6],
    3667                                     pVCpu->iem.s.abOpcode[offOpcode + 7]);
    3668 # endif
    3669         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
    3670         return VINF_SUCCESS;
    3671     }
    3672     return iemOpcodeGetNextU64Slow(pVCpu, pu64);
    3673 }
    3674 
    3675 #else  /* IEM_WITH_SETJMP */
    3676 
    3677 /**
    3678  * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
    3679  *
    3680  * @returns The opcode qword.
    3681  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3682  */
    3683 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
    3684 {
    3685 # ifdef IEM_WITH_CODE_TLB
    3686     uint64_t u64;
    3687     iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
    3688     return u64;
    3689 # else
    3690     VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
    3691     if (rcStrict == VINF_SUCCESS)
    3692     {
    3693         uint8_t offOpcode = pVCpu->iem.s.offOpcode;
    3694         pVCpu->iem.s.offOpcode = offOpcode + 8;
    3695 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3696         return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3697 #  else
    3698         return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3699                                    pVCpu->iem.s.abOpcode[offOpcode + 1],
    3700                                    pVCpu->iem.s.abOpcode[offOpcode + 2],
    3701                                    pVCpu->iem.s.abOpcode[offOpcode + 3],
    3702                                    pVCpu->iem.s.abOpcode[offOpcode + 4],
    3703                                    pVCpu->iem.s.abOpcode[offOpcode + 5],
    3704                                    pVCpu->iem.s.abOpcode[offOpcode + 6],
    3705                                    pVCpu->iem.s.abOpcode[offOpcode + 7]);
    3706 #  endif
    3707     }
    3708     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    3709 # endif
    3710 }
    3711 
    3712 
    3713 /**
    3714  * Fetches the next opcode qword, longjmp on error.
    3715  *
    3716  * @returns The opcode qword.
    3717  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    3718  */
    3719 DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
    3720 {
    3721 # ifdef IEM_WITH_CODE_TLB
    3722     uintptr_t       offBuf = pVCpu->iem.s.offInstrNextByte;
    3723     uint8_t const  *pbBuf  = pVCpu->iem.s.pbInstrBuf;
    3724     if (RT_LIKELY(   pbBuf != NULL
    3725                   && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
    3726     {
    3727         pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
    3728 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3729         return *(uint64_t const *)&pbBuf[offBuf];
    3730 #  else
    3731         return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
    3732                                    pbBuf[offBuf + 1],
    3733                                    pbBuf[offBuf + 2],
    3734                                    pbBuf[offBuf + 3],
    3735                                    pbBuf[offBuf + 4],
    3736                                    pbBuf[offBuf + 5],
    3737                                    pbBuf[offBuf + 6],
    3738                                    pbBuf[offBuf + 7]);
    3739 #  endif
    3740     }
    3741 # else
    3742     uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
    3743     if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
    3744     {
    3745         pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
    3746 #  ifdef IEM_USE_UNALIGNED_DATA_ACCESS
    3747         return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
    3748 #  else
    3749         return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
    3750                                    pVCpu->iem.s.abOpcode[offOpcode + 1],
    3751                                    pVCpu->iem.s.abOpcode[offOpcode + 2],
    3752                                    pVCpu->iem.s.abOpcode[offOpcode + 3],
    3753                                    pVCpu->iem.s.abOpcode[offOpcode + 4],
    3754                                    pVCpu->iem.s.abOpcode[offOpcode + 5],
    3755                                    pVCpu->iem.s.abOpcode[offOpcode + 6],
    3756                                    pVCpu->iem.s.abOpcode[offOpcode + 7]);
    3757 #  endif
    3758     }
    3759 # endif
    3760     return iemOpcodeGetNextU64SlowJmp(pVCpu);
    3761 }
    3762 
    3763 #endif /* IEM_WITH_SETJMP */
    3764 
    3765 /**
    3766  * Fetches the next opcode quad word, returns automatically on failure.
    3767  *
    3768  * @param   a_pu64              Where to return the opcode quad word.
    3769  * @remark Implicitly references pVCpu.
    3770  */
    3771 #ifndef IEM_WITH_SETJMP
    3772 # define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
    3773     do \
    3774     { \
    3775         VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
    3776         if (rcStrict2 != VINF_SUCCESS) \
    3777             return rcStrict2; \
    3778     } while (0)
    3779 #else
    3780 # define IEM_OPCODE_GET_NEXT_U64(a_pu64)    ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
    3781 #endif
    3782 
    3783 
    3784 /** @name  Misc Worker Functions.
    3785  * @{
    3786  */
    3787 
    3788 /**
    3789  * Gets the exception class for the specified exception vector.
    3790  *
    3791  * @returns The class of the specified exception.
    3792  * @param   uVector       The exception vector.
    3793  */
    3794 IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
    3795 {
    3796     Assert(uVector <= X86_XCPT_LAST);
    3797     switch (uVector)
    3798     {
    3799         case X86_XCPT_DE:
    3800         case X86_XCPT_TS:
    3801         case X86_XCPT_NP:
    3802         case X86_XCPT_SS:
    3803         case X86_XCPT_GP:
    3804         case X86_XCPT_SX:   /* AMD only */
    3805             return IEMXCPTCLASS_CONTRIBUTORY;
    3806 
    3807         case X86_XCPT_PF:
    3808         case X86_XCPT_VE:   /* Intel only */
    3809             return IEMXCPTCLASS_PAGE_FAULT;
    3810 
    3811         case X86_XCPT_DF:
    3812             return IEMXCPTCLASS_DOUBLE_FAULT;
    3813     }
    3814     return IEMXCPTCLASS_BENIGN;
    3815 }
    3816 
    3817 
    3818 /**
    3819  * Evaluates how to handle an exception caused during delivery of another event
    3820  * (exception / interrupt).
    3821  *
    3822  * @returns How to handle the recursive exception.
    3823  * @param   pVCpu               The cross context virtual CPU structure of the
    3824  *                              calling thread.
    3825  * @param   fPrevFlags          The flags of the previous event.
    3826  * @param   uPrevVector         The vector of the previous event.
    3827  * @param   fCurFlags           The flags of the current exception.
    3828  * @param   uCurVector          The vector of the current exception.
    3829  * @param   pfXcptRaiseInfo     Where to store additional information about the
    3830  *                              exception condition. Optional.
    3831  */
    3832 VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
    3833                                                     uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
    3834 {
    3835     /*
    3836      * Only CPU exceptions can be raised while delivering other events, software interrupt
    3837      * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
    3838      */
    3839     AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
    3840     Assert(pVCpu); RT_NOREF(pVCpu);
    3841     Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
    3842 
    3843     IEMXCPTRAISE     enmRaise   = IEMXCPTRAISE_CURRENT_XCPT;
    3844     IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
    3845     if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
    3846     {
    3847         IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
    3848         if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
    3849         {
    3850             IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
    3851             if (   enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
    3852                 && (   enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
    3853                     || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
    3854             {
    3855                 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
    3856                 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
    3857                                                                         : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
    3858                 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
    3859                       uCurVector, pVCpu->cpum.GstCtx.cr2));
    3860             }
    3861             else if (   enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
    3862                      && enmCurXcptClass  == IEMXCPTCLASS_CONTRIBUTORY)
    3863             {
    3864                 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
    3865                 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
    3866             }
    3867             else if (   enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
    3868                      && (   enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
    3869                          || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
    3870             {
    3871                 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
    3872                 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
    3873             }
    3874         }
    3875         else
    3876         {
    3877             if (uPrevVector == X86_XCPT_NMI)
    3878             {
    3879                 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
    3880                 if (uCurVector == X86_XCPT_PF)
    3881                 {
    3882                     fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
    3883                     Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
    3884                 }
    3885             }
    3886             else if (   uPrevVector == X86_XCPT_AC
    3887                      && uCurVector  == X86_XCPT_AC)
    3888             {
    3889                 enmRaise   = IEMXCPTRAISE_CPU_HANG;
    3890                 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
    3891                 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
    3892             }
    3893         }
    3894     }
    3895     else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
    3896     {
    3897         fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
    3898         if (uCurVector == X86_XCPT_PF)
    3899             fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
    3900     }
    3901     else
    3902     {
    3903         Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
    3904         fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
    3905     }
    3906 
    3907     if (pfXcptRaiseInfo)
    3908         *pfXcptRaiseInfo = fRaiseInfo;
    3909     return enmRaise;
    3910 }
    3911 
    3912 
    3913 /**
    3914  * Enters the CPU shutdown state initiated by a triple fault or other
    3915  * unrecoverable conditions.
    3916  *
    3917  * @returns Strict VBox status code.
    3918  * @param   pVCpu           The cross context virtual CPU structure of the
    3919  *                          calling thread.
    3920  */
    3921 IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
    3922 {
    3923     if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    3924         IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
    3925 
    3926     if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
    3927     {
    3928         Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
    3929         IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    3930     }
    3931 
    3932     RT_NOREF(pVCpu);
    3933     return VINF_EM_TRIPLE_FAULT;
    3934 }
    3935 
    3936 
    3937 /**
    3938  * Validates a new SS segment.
    3939  *
    3940  * @returns VBox strict status code.
    3941  * @param   pVCpu           The cross context virtual CPU structure of the
    3942  *                          calling thread.
    3943  * @param   NewSS           The new SS selctor.
    3944  * @param   uCpl            The CPL to load the stack for.
    3945  * @param   pDesc           Where to return the descriptor.
    3946  */
    3947 IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
    3948 {
    3949     /* Null selectors are not allowed (we're not called for dispatching
    3950        interrupts with SS=0 in long mode). */
    3951     if (!(NewSS & X86_SEL_MASK_OFF_RPL))
    3952     {
    3953         Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
    3954         return iemRaiseTaskSwitchFault0(pVCpu);
    3955     }
    3956 
    3957     /** @todo testcase: check that the TSS.ssX RPL is checked.  Also check when. */
    3958     if ((NewSS & X86_SEL_RPL) != uCpl)
    3959     {
    3960         Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
    3961         return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
    3962     }
    3963 
    3964     /*
    3965      * Read the descriptor.
    3966      */
    3967     VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
    3968     if (rcStrict != VINF_SUCCESS)
    3969         return rcStrict;
    3970 
    3971     /*
    3972      * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
    3973      */
    3974     if (!pDesc->Legacy.Gen.u1DescType)
    3975     {
    3976         Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
    3977         return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
    3978     }
    3979 
    3980     if (    (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
    3981         || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
    3982     {
    3983         Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
    3984         return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
    3985     }
    3986     if (pDesc->Legacy.Gen.u2Dpl != uCpl)
    3987     {
    3988         Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
    3989         return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
    3990     }
    3991 
    3992     /* Is it there? */
    3993     /** @todo testcase: Is this checked before the canonical / limit check below? */
    3994     if (!pDesc->Legacy.Gen.u1Present)
    3995     {
    3996         Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
    3997         return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
    3998     }
    3999 
    4000     return VINF_SUCCESS;
    4001 }
    4002 
    4003 
    4004 /**
    4005  * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
    4006  * not (kind of obsolete now).
    4007  *
    4008  * @param   a_pVCpu The cross context virtual CPU structure of the calling thread.
    4009  */
    4010 #define IEMMISC_GET_EFL(a_pVCpu)            ( (a_pVCpu)->cpum.GstCtx.eflags.u  )
    4011 
    4012 /**
    4013  * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
    4014  *
    4015  * @param   a_pVCpu The cross context virtual CPU structure of the calling thread.
    4016  * @param   a_fEfl  The new EFLAGS.
    4017  */
    4018 #define IEMMISC_SET_EFL(a_pVCpu, a_fEfl)    do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
    4019 
    4020 /** @} */
    4021 
    4022 
    4023 /** @name  Raising Exceptions.
    4024  *
    4025  * @{
    4026  */
    4027 
    4028 
    4029 /**
    4030  * Loads the specified stack far pointer from the TSS.
    4031  *
    4032  * @returns VBox strict status code.
    4033  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    4034  * @param   uCpl            The CPL to load the stack for.
    4035  * @param   pSelSS          Where to return the new stack segment.
    4036  * @param   puEsp           Where to return the new stack pointer.
    4037  */
    4038 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
    4039 {
    4040     VBOXSTRICTRC rcStrict;
    4041     Assert(uCpl < 4);
    4042 
    4043     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
    4044     switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
    4045     {
    4046         /*
    4047          * 16-bit TSS (X86TSS16).
    4048          */
    4049         case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
    4050         case X86_SEL_TYPE_SYS_286_TSS_BUSY:
    4051         {
    4052             uint32_t off = uCpl * 4 + 2;
    4053             if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
    4054             {
    4055                 /** @todo check actual access pattern here. */
    4056                 uint32_t u32Tmp = 0; /* gcc maybe... */
    4057                 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
    4058                 if (rcStrict == VINF_SUCCESS)
    4059                 {
    4060                     *puEsp  = RT_LOWORD(u32Tmp);
    4061                     *pSelSS = RT_HIWORD(u32Tmp);
    4062                     return VINF_SUCCESS;
    4063                 }
    4064             }
    4065             else
    4066             {
    4067                 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
    4068                 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
    4069             }
    4070             break;
    4071         }
    4072 
    4073         /*
    4074          * 32-bit TSS (X86TSS32).
    4075          */
    4076         case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
    4077         case X86_SEL_TYPE_SYS_386_TSS_BUSY:
    4078         {
    4079             uint32_t off = uCpl * 8 + 4;
    4080             if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
    4081             {
    4082 /** @todo check actual access pattern here. */
    4083                 uint64_t u64Tmp;
    4084                 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
    4085                 if (rcStrict == VINF_SUCCESS)
    4086                 {
    4087                     *puEsp  = u64Tmp & UINT32_MAX;
    4088                     *pSelSS = (RTSEL)(u64Tmp >> 32);
    4089                     return VINF_SUCCESS;
    4090                 }
    4091             }
    4092             else
    4093             {
    4094                 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
    4095                 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
    4096             }
    4097             break;
    4098         }
    4099 
    4100         default:
    4101             AssertFailed();
    4102             rcStrict = VERR_IEM_IPE_4;
    4103             break;
    4104     }
    4105 
    4106     *puEsp  = 0; /* make gcc happy */
    4107     *pSelSS = 0; /* make gcc happy */
    4108     return rcStrict;
    4109 }
    4110 
    4111 
    4112 /**
    4113  * Loads the specified stack pointer from the 64-bit TSS.
    4114  *
    4115  * @returns VBox strict status code.
    4116  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    4117  * @param   uCpl            The CPL to load the stack for.
    4118  * @param   uIst            The interrupt stack table index, 0 if to use uCpl.
    4119  * @param   puRsp           Where to return the new stack pointer.
    4120  */
    4121 IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
    4122 {
    4123     Assert(uCpl < 4);
    4124     Assert(uIst < 8);
    4125     *puRsp  = 0; /* make gcc happy */
    4126 
    4127     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
    4128     AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
    4129 
    4130     uint32_t off;
    4131     if (uIst)
    4132         off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
    4133     else
    4134         off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
    4135     if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
    4136     {
    4137         Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
    4138         return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
    4139     }
    4140 
    4141     return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
    4142 }
    4143 
    4144 
    4145 /**
    4146  * Adjust the CPU state according to the exception being raised.
    4147  *
    4148  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    4149  * @param   u8Vector        The exception that has been raised.
    4150  */
    4151 DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
    4152 {
    4153     switch (u8Vector)
    4154     {
    4155         case X86_XCPT_DB:
    4156             IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
    4157             pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
    4158             break;
    4159         /** @todo Read the AMD and Intel exception reference... */
    4160     }
    4161 }
    4162 
    4163 
    4164 /**
    4165  * Implements exceptions and interrupts for real mode.
    4166  *
    4167  * @returns VBox strict status code.
    4168  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    4169  * @param   cbInstr         The number of bytes to offset rIP by in the return
    4170  *                          address.
    4171  * @param   u8Vector        The interrupt / exception vector number.
    4172  * @param   fFlags          The flags.
    4173  * @param   uErr            The error value if IEM_XCPT_FLAGS_ERR is set.
    4174  * @param   uCr2            The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
    4175  */
    4176 IEM_STATIC VBOXSTRICTRC
    4177 iemRaiseXcptOrIntInRealMode(PVMCPUCC      pVCpu,
    4178                             uint8_t     cbInstr,
    4179                             uint8_t     u8Vector,
    4180                             uint32_t    fFlags,
    4181                             uint16_t    uErr,
    4182                             uint64_t    uCr2)
    4183 {
    4184     NOREF(uErr); NOREF(uCr2);
    4185     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    4186 
    4187     /*
    4188      * Read the IDT entry.
    4189      */
    4190     if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
    4191     {
    4192         Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
    4193         return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    4194     }
    4195     RTFAR16 Idte;
    4196     VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
    4197     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    4198     {
    4199         Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
    4200         return rcStrict;
    4201     }
    4202 
    4203     /*
    4204      * Push the stack frame.
    4205      */
    4206     uint16_t *pu16Frame;
    4207     uint64_t  uNewRsp;
    4208     rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
    4209     if (rcStrict != VINF_SUCCESS)
    4210         return rcStrict;
    4211 
    4212     uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
    4213 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
    4214     AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
    4215     if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
    4216         fEfl |= UINT16_C(0xf000);
    4217 #endif
    4218     pu16Frame[2] = (uint16_t)fEfl;
    4219     pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
    4220     pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
    4221     rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
    4222     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    4223         return rcStrict;
    4224 
    4225     /*
    4226      * Load the vector address into cs:ip and make exception specific state
    4227      * adjustments.
    4228      */
    4229     pVCpu->cpum.GstCtx.cs.Sel           = Idte.sel;
    4230     pVCpu->cpum.GstCtx.cs.ValidSel      = Idte.sel;
    4231     pVCpu->cpum.GstCtx.cs.fFlags        = CPUMSELREG_FLAGS_VALID;
    4232     pVCpu->cpum.GstCtx.cs.u64Base       = (uint32_t)Idte.sel << 4;
    4233     /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
    4234     pVCpu->cpum.GstCtx.rip              = Idte.off;
    4235     fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
    4236     IEMMISC_SET_EFL(pVCpu, fEfl);
    4237 
    4238     /** @todo do we actually do this in real mode? */
    4239     if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
    4240         iemRaiseXcptAdjustState(pVCpu, u8Vector);
    4241 
    4242     return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
    4243 }
    4244 
    4245 
    4246 /**
    4247  * Loads a NULL data selector into when coming from V8086 mode.
    4248  *
    4249  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    4250  * @param   pSReg           Pointer to the segment register.
    4251  */
    4252 IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
    4253 {
    4254     pSReg->Sel      = 0;
    4255     pSReg->ValidSel = 0;
    4256     if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
    4257     {
    4258         /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
    4259         pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
    4260         pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
    4261     }
    4262     else
    4263     {
    4264         pSReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    4265         /** @todo check this on AMD-V */
    4266         pSReg->u64Base  = 0;
    4267         pSReg->u32Limit = 0;
    4268     }
    4269 }
    4270 
    4271 
    4272 /**
    4273  * Loads a segment selector during a task switch in V8086 mode.
    4274  *
    4275  * @param   pSReg           Pointer to the segment register.
    4276  * @param   uSel            The selector value to load.
    4277  */
    4278 IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
    4279 {
    4280     /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
    4281     pSReg->Sel      = uSel;
    4282     pSReg->ValidSel = uSel;
    4283     pSReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    4284     pSReg->u64Base  = uSel << 4;
    4285     pSReg->u32Limit = 0xffff;
    4286     pSReg->Attr.u   = 0xf3;
    4287 }
    4288 
    4289 
    4290 /**
    4291  * Loads a NULL data selector into a selector register, both the hidden and
    4292  * visible parts, in protected mode.
    4293  *
    4294  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    4295  * @param   pSReg               Pointer to the segment register.
    4296  * @param   uRpl                The RPL.
    4297  */
    4298 IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
    4299 {
    4300     /** @todo Testcase: write a testcase checking what happends when loading a NULL
    4301      *        data selector in protected mode. */
    4302     pSReg->Sel      = uRpl;
    4303     pSReg->ValidSel = uRpl;
    4304     pSReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    4305     if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
    4306     {
    4307         /* VT-x (Intel 3960x) observed doing something like this. */
    4308         pSReg->Attr.u   = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
    4309         pSReg->u32Limit = UINT32_MAX;
    4310         pSReg->u64Base  = 0;
    4311     }
    4312     else
    4313     {
    4314         pSReg->Attr.u   = X86DESCATTR_UNUSABLE;
    4315         pSReg->u32Limit = 0;
    4316         pSReg->u64Base  = 0;
    4317     }
    4318 }
    4319 
    4320 
    4321 /**
    4322  * Loads a segment selector during a task switch in protected mode.
    4323  *
    4324  * In this task switch scenario, we would throw \#TS exceptions rather than
    4325  * \#GPs.
    4326  *
    4327  * @returns VBox strict status code.
    4328  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    4329  * @param   pSReg           Pointer to the segment register.
    4330  * @param   uSel            The new selector value.
    4331  *
    4332  * @remarks This does _not_ handle CS or SS.
    4333  * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
    4334  */
    4335 IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
    4336 {
    4337     Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
    4338 
    4339     /* Null data selector. */
    4340     if (!(uSel & X86_SEL_MASK_OFF_RPL))
    4341     {
    4342         iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
    4343         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    4344         CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
    4345         return VINF_SUCCESS;
    4346     }
    4347 
    4348     /* Fetch the descriptor. */
    4349     IEMSELDESC Desc;
    4350     VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
    4351     if (rcStrict != VINF_SUCCESS)
    4352     {
    4353         Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
    4354              VBOXSTRICTRC_VAL(rcStrict)));
    4355         return rcStrict;
    4356     }
    4357 
    4358     /* Must be a data segment or readable code segment. */
    4359     if (   !Desc.Legacy.Gen.u1DescType
    4360         || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
    4361     {
    4362         Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
    4363              Desc.Legacy.Gen.u4Type));
    4364         return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
    4365     }
    4366 
    4367     /* Check privileges for data segments and non-conforming code segments. */
    4368     if (   (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
    4369         != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
    4370     {
    4371         /* The RPL and the new CPL must be less than or equal to the DPL. */
    4372         if (   (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
    4373             || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
    4374         {
    4375             Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
    4376                  uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
    4377             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
    4378         }
    4379     }
    4380 
    4381     /* Is it there? */
    4382     if (!Desc.Legacy.Gen.u1Present)
    4383     {
    4384         Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
    4385         return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
    4386     }
    4387 
    4388     /* The base and limit. */
    4389     uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
    4390     uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
    4391 
    4392     /*
    4393      * Ok, everything checked out fine. Now set the accessed bit before
    4394      * committing the result into the registers.
    4395      */
    4396     if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
    4397     {
    4398         rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
    4399         if (rcStrict != VINF_SUCCESS)
    4400             return rcStrict;
    4401         Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
    4402     }
    4403 
    4404     /* Commit */
    4405     pSReg->Sel      = uSel;
    4406     pSReg->Attr.u   = X86DESC_GET_HID_ATTR(&Desc.Legacy);
    4407     pSReg->u32Limit = cbLimit;
    4408     pSReg->u64Base  = u64Base;  /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
    4409     pSReg->ValidSel = uSel;
    4410     pSReg->fFlags   = CPUMSELREG_FLAGS_VALID;
    4411     if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
    4412         pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
    4413 
    4414     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    4415     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
    4416     return VINF_SUCCESS;
    4417 }
    4418 
    4419 
    4420 /**
    4421  * Performs a task switch.
    4422  *
    4423  * If the task switch is the result of a JMP, CALL or IRET instruction, the
    4424  * caller is responsible for performing the necessary checks (like DPL, TSS
    4425  * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
    4426  * reference for JMP, CALL, IRET.
    4427  *
    4428  * If the task switch is the due to a software interrupt or hardware exception,
    4429  * the caller is responsible for validating the TSS selector and descriptor. See
    4430  * Intel Instruction reference for INT n.
    4431  *
    4432  * @returns VBox strict status code.
    4433  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    4434  * @param   enmTaskSwitch   The cause of the task switch.
    4435  * @param   uNextEip        The EIP effective after the task switch.
    4436  * @param   fFlags          The flags, see IEM_XCPT_FLAGS_XXX.
    4437  * @param   uErr            The error value if IEM_XCPT_FLAGS_ERR is set.
    4438  * @param   uCr2            The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
    4439  * @param   SelTSS          The TSS selector of the new task.
    4440  * @param   pNewDescTSS     Pointer to the new TSS descriptor.
    4441  */
    4442 IEM_STATIC VBOXSTRICTRC
    4443 iemTaskSwitch(PVMCPUCC        pVCpu,
    4444               IEMTASKSWITCH   enmTaskSwitch,
    4445               uint32_t        uNextEip,
    4446               uint32_t        fFlags,
    4447               uint16_t        uErr,
    4448               uint64_t        uCr2,
    4449               RTSEL           SelTSS,
    4450               PIEMSELDESC     pNewDescTSS)
    4451 {
    4452     Assert(!IEM_IS_REAL_MODE(pVCpu));
    4453     Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
    4454     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    4455 
    4456     uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
    4457     Assert(   uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
    4458            || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
    4459            || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
    4460            || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
    4461 
    4462     bool const fIsNewTSS386 = (   uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
    4463                                || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
    4464 
    4465     Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
    4466          fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
    4467 
    4468     /* Update CR2 in case it's a page-fault. */
    4469     /** @todo This should probably be done much earlier in IEM/PGM. See
    4470      *        @bugref{5653#c49}. */
    4471     if (fFlags & IEM_XCPT_FLAGS_CR2)
    4472         pVCpu->cpum.GstCtx.cr2 = uCr2;
    4473 
    4474     /*
    4475      * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
    4476      * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
    4477      */
    4478     uint32_t const uNewTSSLimit    = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
    4479     uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
    4480     if (uNewTSSLimit < uNewTSSLimitMin)
    4481     {
    4482         Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
    4483              enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
    4484         return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
    4485     }
    4486 
    4487     /*
    4488      * Task switches in VMX non-root mode always cause task switches.
    4489      * The new TSS must have been read and validated (DPL, limits etc.) before a
    4490      * task-switch VM-exit commences.
    4491      *
    4492      * See Intel spec. 25.4.2 "Treatment of Task Switches".
    4493      */
    4494     if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    4495     {
    4496         Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
    4497         IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
    4498     }
    4499 
    4500     /*
    4501      * The SVM nested-guest intercept for task-switch takes priority over all exceptions
    4502      * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
    4503      */
    4504     if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
    4505     {
    4506         uint32_t const uExitInfo1 = SelTSS;
    4507         uint32_t       uExitInfo2 = uErr;
    4508         switch (enmTaskSwitch)
    4509         {
    4510             case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
    4511             case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
    4512             default: break;
    4513         }
    4514         if (fFlags & IEM_XCPT_FLAGS_ERR)
    4515             uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
    4516         if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
    4517             uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
    4518 
    4519         Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
    4520         IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
    4521         RT_NOREF2(uExitInfo1, uExitInfo2);
    4522     }
    4523 
    4524     /*
    4525      * Check the current TSS limit. The last written byte to the current TSS during the
    4526      * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
    4527      * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
    4528      *
    4529      * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
    4530      * end up with smaller than "legal" TSS limits.
    4531      */
    4532     uint32_t const uCurTSSLimit    = pVCpu->cpum.GstCtx.tr.u32Limit;
    4533     uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
    4534     if (uCurTSSLimit < uCurTSSLimitMin)
    4535     {
    4536         Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
    4537              enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
    4538         return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
    4539     }
    4540 
    4541     /*
    4542      * Verify that the new TSS can be accessed and map it. Map only the required contents
    4543      * and not the entire TSS.
    4544      */
    4545     void           *pvNewTSS;
    4546     uint32_t  const cbNewTSS    = uNewTSSLimitMin + 1;
    4547     RTGCPTR   const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
    4548     AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
    4549     /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
    4550      *        not perform correct translation if this happens. See Intel spec. 7.2.1
    4551      *        "Task-State Segment". */
    4552     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
    4553     if (rcStrict != VINF_SUCCESS)
    4554     {
    4555         Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
    4556              cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
    4557         return rcStrict;
    4558     }
    4559 
    4560     /*
    4561      * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
    4562      */
    4563     uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
    4564     if (   enmTaskSwitch == IEMTASKSWITCH_JUMP
    4565         || enmTaskSwitch == IEMTASKSWITCH_IRET)
    4566     {
    4567         PX86DESC pDescCurTSS;
    4568         rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
    4569                              pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
    4570         if (rcStrict != VINF_SUCCESS)
    4571         {
    4572             Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
    4573                  enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
    4574             return rcStrict;
    4575         }
    4576 
    4577         pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
    4578         rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
    4579         if (rcStrict != VINF_SUCCESS)
    4580         {
    4581             Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
    4582                  enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
    4583             return rcStrict;
    4584         }
    4585 
    4586         /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
    4587         if (enmTaskSwitch == IEMTASKSWITCH_IRET)
    4588         {
    4589             Assert(   uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
    4590                    || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
    4591             u32EFlags &= ~X86_EFL_NT;
    4592         }
    4593     }
    4594 
    4595     /*
    4596      * Save the CPU state into the current TSS.
    4597      */
    4598     RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
    4599     if (GCPtrNewTSS == GCPtrCurTSS)
    4600     {
    4601         Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
    4602         Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
    4603              pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
    4604              pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
    4605              pVCpu->cpum.GstCtx.ldtr.Sel));
    4606     }
    4607     if (fIsNewTSS386)
    4608     {
    4609         /*
    4610          * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
    4611          * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
    4612          */
    4613         void          *pvCurTSS32;
    4614         uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
    4615         uint32_t const cbCurTSS  = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
    4616         AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
    4617         rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
    4618         if (rcStrict != VINF_SUCCESS)
    4619         {
    4620             Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
    4621                  enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
    4622             return rcStrict;
    4623         }
    4624 
    4625         /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
    4626         PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
    4627         pCurTSS32->eip    = uNextEip;
    4628         pCurTSS32->eflags = u32EFlags;
    4629         pCurTSS32->eax    = pVCpu->cpum.GstCtx.eax;
    4630         pCurTSS32->ecx    = pVCpu->cpum.GstCtx.ecx;
    4631         pCurTSS32->edx    = pVCpu->cpum.GstCtx.edx;
    4632         pCurTSS32->ebx    = pVCpu->cpum.GstCtx.ebx;
    4633         pCurTSS32->esp    = pVCpu->cpum.GstCtx.esp;
    4634         pCurTSS32->ebp    = pVCpu->cpum.GstCtx.ebp;
    4635         pCurTSS32->esi    = pVCpu->cpum.GstCtx.esi;
    4636         pCurTSS32->edi    = pVCpu->cpum.GstCtx.edi;
    4637         pCurTSS32->es     = pVCpu->cpum.GstCtx.es.Sel;
    4638         pCurTSS32->cs     = pVCpu->cpum.GstCtx.cs.Sel;
    4639         pCurTSS32->ss     = pVCpu->cpum.GstCtx.ss.Sel;
    4640         pCurTSS32->ds     = pVCpu->cpum.GstCtx.ds.Sel;
    4641         pCurTSS32->fs     = pVCpu->cpum.GstCtx.fs.Sel;
    4642         pCurTSS32->gs     = pVCpu->cpum.GstCtx.gs.Sel;
    4643 
    4644         rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
    4645         if (rcStrict != VINF_SUCCESS)
    4646         {
    4647             Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
    4648                  VBOXSTRICTRC_VAL(rcStrict)));
    4649             return rcStrict;
    4650         }
    4651     }
    4652     else
    4653     {
    4654         /*
    4655          * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
    4656          */
    4657         void          *pvCurTSS16;
    4658         uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
    4659         uint32_t const cbCurTSS  = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
    4660         AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
    4661         rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
    4662         if (rcStrict != VINF_SUCCESS)
    4663         {
    4664             Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
    4665                  enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
    4666             return rcStrict;
    4667         }
    4668 
    4669         /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
    4670         PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
    4671         pCurTSS16->ip    = uNextEip;
    4672         pCurTSS16->flags = u32EFlags;
    4673         pCurTSS16->ax    = pVCpu->cpum.GstCtx.ax;
    4674         pCurTSS16->cx    = pVCpu->cpum.GstCtx.cx;
    4675         pCurTSS16->dx    = pVCpu->cpum.GstCtx.dx;
    4676         pCurTSS16->bx    = pVCpu->cpum.GstCtx.bx;
    4677         pCurTSS16->sp    = pVCpu->cpum.GstCtx.sp;
    4678         pCurTSS16->bp    = pVCpu->cpum.GstCtx.bp;
    4679         pCurTSS16->si    = pVCpu->cpum.GstCtx.si;
    4680         pCurTSS16->di    = pVCpu->cpum.GstCtx.di;
    4681         pCurTSS16->es    = pVCpu->cpum.GstCtx.es.Sel;
    4682         pCurTSS16->cs    = pVCpu->cpum.GstCtx.cs.Sel;
    4683         pCurTSS16->ss    = pVCpu->cpum.GstCtx.ss.Sel;
    4684         pCurTSS16->ds    = pVCpu->cpum.GstCtx.ds.Sel;
    4685 
    4686         rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
    4687         if (rcStrict != VINF_SUCCESS)
    4688         {
    4689             Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
    4690                  VBOXSTRICTRC_VAL(rcStrict)));
    4691             return rcStrict;
    4692         }
    4693     }
    4694 
    4695     /*
    4696      * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
    4697      */
    4698     if (   enmTaskSwitch == IEMTASKSWITCH_CALL
    4699         || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
    4700     {
    4701         /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
    4702         PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
    4703         pNewTSS->selPrev  = pVCpu->cpum.GstCtx.tr.Sel;
    4704     }
    4705 
    4706     /*
    4707      * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
    4708      * it's done further below with error handling (e.g. CR3 changes will go through PGM).
    4709      */
    4710     uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
    4711     uint16_t uNewES,  uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
    4712     bool     fNewDebugTrap;
    4713     if (fIsNewTSS386)
    4714     {
    4715         PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
    4716         uNewCr3       = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
    4717         uNewEip       = pNewTSS32->eip;
    4718         uNewEflags    = pNewTSS32->eflags;
    4719         uNewEax       = pNewTSS32->eax;
    4720         uNewEcx       = pNewTSS32->ecx;
    4721         uNewEdx       = pNewTSS32->edx;
    4722         uNewEbx       = pNewTSS32->ebx;
    4723         uNewEsp       = pNewTSS32->esp;
    4724         uNewEbp       = pNewTSS32->ebp;
    4725         uNewEsi       = pNewTSS32->esi;
    4726         uNewEdi       = pNewTSS32->edi;
    4727         uNewES        = pNewTSS32->es;
    4728         uNewCS        = pNewTSS32->cs;
    4729         uNewSS        = pNewTSS32->ss;
    4730         uNewDS        = pNewTSS32->ds;
    4731         uNewFS        = pNewTSS32->fs;
    4732         uNewGS        = pNewTSS32->gs;
    4733         uNewLdt       = pNewTSS32->selLdt;
    4734         fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
    4735     }
    4736     else
    4737     {
    4738         PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
    4739         uNewCr3       = 0;
    4740         uNewEip       = pNewTSS16->ip;
    4741         uNewEflags    = pNewTSS16->flags;
    4742         uNewEax       = UINT32_C(0xffff0000) | pNewTSS16->ax;
    4743         uNewEcx       = UINT32_C(0xffff0000) | pNewTSS16->cx;
    4744         uNewEdx       = UINT32_C(0xffff0000) | pNewTSS16->dx;
    4745         uNewEbx       = UINT32_C(0xffff0000) | pNewTSS16->bx;
    4746         uNewEsp       = UINT32_C(0xffff0000) | pNewTSS16->sp;
    4747         uNewEbp       = UINT32_C(0xffff0000) | pNewTSS16->bp;
    4748         uNewEsi       = UINT32_C(0xffff0000) | pNewTSS16->si;
    4749         uNewEdi       = UINT32_C(0xffff0000) | pNewTSS16->di;
    4750         uNewES        = pNewTSS16->es;
    4751         uNewCS        = pNewTSS16->cs;
    4752         uNewSS        = pNewTSS16->ss;
    4753         uNewDS        = pNewTSS16->ds;
    4754         uNewFS        = 0;
    4755         uNewGS        = 0;
    4756         uNewLdt       = pNewTSS16->selLdt;
    4757         fNewDebugTrap = false;
    4758     }
    4759 
    4760     if (GCPtrNewTSS == GCPtrCurTSS)
    4761         Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
    4762              uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
    4763 
    4764     /*
    4765      * We're done accessing the new TSS.
    4766      */
    4767     rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
    4768     if (rcStrict != VINF_SUCCESS)
    4769     {
    4770         Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
    4771         return rcStrict;
    4772     }
    4773 
    4774     /*
    4775      * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
    4776      */
    4777     if (enmTaskSwitch != IEMTASKSWITCH_IRET)
    4778     {
    4779         rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
    4780                              pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
    4781         if (rcStrict != VINF_SUCCESS)
    4782         {
    4783             Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
    4784                  enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
    4785             return rcStrict;
    4786         }
    4787 
    4788         /* Check that the descriptor indicates the new TSS is available (not busy). */
    4789         AssertMsg(   pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
    4790                   || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
    4791                      ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
    4792 
    4793         pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
    4794         rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
    4795         if (rcStrict != VINF_SUCCESS)
    4796         {
    4797             Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
    4798                  enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
    4799             return rcStrict;
    4800         }
    4801     }
    4802 
    4803     /*
    4804      * From this point on, we're technically in the new task. We will defer exceptions
    4805      * until the completion of the task switch but before executing any instructions in the new task.
    4806      */
    4807     pVCpu->cpum.GstCtx.tr.Sel      = SelTSS;
    4808     pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
    4809     pVCpu->cpum.GstCtx.tr.fFlags   = CPUMSELREG_FLAGS_VALID;
    4810     pVCpu->cpum.GstCtx.tr.Attr.u   = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
    4811     pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
    4812     pVCpu->cpum.GstCtx.tr.u64Base  = X86DESC_BASE(&pNewDescTSS->Legacy);
    4813     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
    4814 
    4815     /* Set the busy bit in TR. */
    4816     pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
    4817 
    4818     /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
    4819     if (   enmTaskSwitch == IEMTASKSWITCH_CALL
    4820         || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
    4821     {
    4822         uNewEflags |= X86_EFL_NT;
    4823     }
    4824 
    4825     pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL;     /** @todo Should we clear DR7.LE bit too? */
    4826     pVCpu->cpum.GstCtx.cr0   |= X86_CR0_TS;
    4827     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
    4828 
    4829     pVCpu->cpum.GstCtx.eip    = uNewEip;
    4830     pVCpu->cpum.GstCtx.eax    = uNewEax;
    4831     pVCpu->cpum.GstCtx.ecx    = uNewEcx;
    4832     pVCpu->cpum.GstCtx.edx    = uNewEdx;
    4833     pVCpu->cpum.GstCtx.ebx    = uNewEbx;
    4834     pVCpu->cpum.GstCtx.esp    = uNewEsp;
    4835     pVCpu->cpum.GstCtx.ebp    = uNewEbp;
    4836     pVCpu->cpum.GstCtx.esi    = uNewEsi;
    4837     pVCpu->cpum.GstCtx.edi    = uNewEdi;
    4838 
    4839     uNewEflags &= X86_EFL_LIVE_MASK;
    4840     uNewEflags |= X86_EFL_RA1_MASK;
    4841     IEMMISC_SET_EFL(pVCpu, uNewEflags);
    4842 
    4843     /*
    4844      * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
    4845      * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
    4846      * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
    4847      */
    4848     pVCpu->cpum.GstCtx.es.Sel       = uNewES;
    4849     pVCpu->cpum.GstCtx.es.Attr.u   &= ~X86DESCATTR_P;
    4850 
    4851     pVCpu->cpum.GstCtx.cs.Sel       = uNewCS;
    4852     pVCpu->cpum.GstCtx.cs.Attr.u   &= ~X86DESCATTR_P;
    4853 
    4854     pVCpu->cpum.GstCtx.ss.Sel       = uNewSS;
    4855     pVCpu->cpum.GstCtx.ss.Attr.u   &= ~X86DESCATTR_P;
    4856 
    4857     pVCpu->cpum.GstCtx.ds.Sel       = uNewDS;
    4858     pVCpu->cpum.GstCtx.ds.Attr.u   &= ~X86DESCATTR_P;
    4859 
    4860     pVCpu->cpum.GstCtx.fs.Sel       = uNewFS;
    4861     pVCpu->cpum.GstCtx.fs.Attr.u   &= ~X86DESCATTR_P;
    4862 
    4863     pVCpu->cpum.GstCtx.gs.Sel       = uNewGS;
    4864     pVCpu->cpum.GstCtx.gs.Attr.u   &= ~X86DESCATTR_P;
    4865     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
    4866 
    4867     pVCpu->cpum.GstCtx.ldtr.Sel     = uNewLdt;
    4868     pVCpu->cpum.GstCtx.ldtr.fFlags  = CPUMSELREG_FLAGS_STALE;
    4869     pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
    4870     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
    4871 
    4872     if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
    4873     {
    4874         pVCpu->cpum.GstCtx.es.Attr.u   |= X86DESCATTR_UNUSABLE;
    4875         pVCpu->cpum.GstCtx.cs.Attr.u   |= X86DESCATTR_UNUSABLE;
    4876         pVCpu->cpum.GstCtx.ss.Attr.u   |= X86DESCATTR_UNUSABLE;
    4877         pVCpu->cpum.GstCtx.ds.Attr.u   |= X86DESCATTR_UNUSABLE;
    4878         pVCpu->cpum.GstCtx.fs.Attr.u   |= X86DESCATTR_UNUSABLE;
    4879         pVCpu->cpum.GstCtx.gs.Attr.u   |= X86DESCATTR_UNUSABLE;
    4880         pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
    4881     }
    4882 
    4883     /*
    4884      * Switch CR3 for the new task.
    4885      */
    4886     if (   fIsNewTSS386
    4887         && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
    4888     {
    4889         /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
    4890         int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
    4891         AssertRCSuccessReturn(rc, rc);
    4892 
    4893         /* Inform PGM. */
    4894         /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
    4895         rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
    4896         AssertRCReturn(rc, rc);
    4897         /* ignore informational status codes */
    4898 
    4899         CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
    4900     }
    4901 
    4902     /*
    4903      * Switch LDTR for the new task.
    4904      */
    4905     if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
    4906         iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
    4907     else
    4908     {
    4909         Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);   /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
    4910 
    4911         IEMSELDESC DescNewLdt;
    4912         rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
    4913         if (rcStrict != VINF_SUCCESS)
    4914         {
    4915             Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
    4916                  uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
    4917             return rcStrict;
    4918         }
    4919         if (   !DescNewLdt.Legacy.Gen.u1Present
    4920             ||  DescNewLdt.Legacy.Gen.u1DescType
    4921             ||  DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
    4922         {
    4923             Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
    4924                  uNewLdt, DescNewLdt.Legacy.u));
    4925             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
    4926         }
    4927 
    4928         pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
    4929         pVCpu->cpum.GstCtx.ldtr.fFlags   = CPUMSELREG_FLAGS_VALID;
    4930         pVCpu->cpum.GstCtx.ldtr.u64Base  = X86DESC_BASE(&DescNewLdt.Legacy);
    4931         pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
    4932         pVCpu->cpum.GstCtx.ldtr.Attr.u   = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
    4933         if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
    4934             pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
    4935         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    4936     }
    4937 
    4938     IEMSELDESC DescSS;
    4939     if (IEM_IS_V86_MODE(pVCpu))
    4940     {
    4941         pVCpu->iem.s.uCpl = 3;
    4942         iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
    4943         iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
    4944         iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
    4945         iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
    4946         iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
    4947         iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
    4948 
    4949         /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
    4950         DescSS.Legacy.u = 0;
    4951         DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
    4952         DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
    4953         DescSS.Legacy.Gen.u16BaseLow  = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
    4954         DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
    4955         DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
    4956         DescSS.Legacy.Gen.u4Type      = X86_SEL_TYPE_RW_ACC;
    4957         DescSS.Legacy.Gen.u2Dpl       = 3;
    4958     }
    4959     else
    4960     {
    4961         uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
    4962 
    4963         /*
    4964          * Load the stack segment for the new task.
    4965          */
    4966         if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
    4967         {
    4968             Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
    4969             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
    4970         }
    4971 
    4972         /* Fetch the descriptor. */
    4973         rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
    4974         if (rcStrict != VINF_SUCCESS)
    4975         {
    4976             Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
    4977                  VBOXSTRICTRC_VAL(rcStrict)));
    4978             return rcStrict;
    4979         }
    4980 
    4981         /* SS must be a data segment and writable. */
    4982         if (    !DescSS.Legacy.Gen.u1DescType
    4983             ||  (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
    4984             || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
    4985         {
    4986             Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
    4987                  uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
    4988             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
    4989         }
    4990 
    4991         /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
    4992         if (   (uNewSS & X86_SEL_RPL) != uNewCpl
    4993             || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
    4994         {
    4995             Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
    4996                  uNewCpl));
    4997             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
    4998         }
    4999 
    5000         /* Is it there? */
    5001         if (!DescSS.Legacy.Gen.u1Present)
    5002         {
    5003             Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
    5004             return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
    5005         }
    5006 
    5007         uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
    5008         uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
    5009 
    5010         /* Set the accessed bit before committing the result into SS. */
    5011         if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
    5012         {
    5013             rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
    5014             if (rcStrict != VINF_SUCCESS)
    5015                 return rcStrict;
    5016             DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
    5017         }
    5018 
    5019         /* Commit SS. */
    5020         pVCpu->cpum.GstCtx.ss.Sel      = uNewSS;
    5021         pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
    5022         pVCpu->cpum.GstCtx.ss.Attr.u   = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
    5023         pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
    5024         pVCpu->cpum.GstCtx.ss.u64Base  = u64Base;
    5025         pVCpu->cpum.GstCtx.ss.fFlags   = CPUMSELREG_FLAGS_VALID;
    5026         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    5027 
    5028         /* CPL has changed, update IEM before loading rest of segments. */
    5029         pVCpu->iem.s.uCpl = uNewCpl;
    5030 
    5031         /*
    5032          * Load the data segments for the new task.
    5033          */
    5034         rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
    5035         if (rcStrict != VINF_SUCCESS)
    5036             return rcStrict;
    5037         rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
    5038         if (rcStrict != VINF_SUCCESS)
    5039             return rcStrict;
    5040         rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
    5041         if (rcStrict != VINF_SUCCESS)
    5042             return rcStrict;
    5043         rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
    5044         if (rcStrict != VINF_SUCCESS)
    5045             return rcStrict;
    5046 
    5047         /*
    5048          * Load the code segment for the new task.
    5049          */
    5050         if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
    5051         {
    5052             Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
    5053             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
    5054         }
    5055 
    5056         /* Fetch the descriptor. */
    5057         IEMSELDESC DescCS;
    5058         rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
    5059         if (rcStrict != VINF_SUCCESS)
    5060         {
    5061             Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
    5062             return rcStrict;
    5063         }
    5064 
    5065         /* CS must be a code segment. */
    5066         if (   !DescCS.Legacy.Gen.u1DescType
    5067             || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
    5068         {
    5069             Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
    5070                  DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
    5071             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
    5072         }
    5073 
    5074         /* For conforming CS, DPL must be less than or equal to the RPL. */
    5075         if (   (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
    5076             && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
    5077         {
    5078             Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
    5079                  DescCS.Legacy.Gen.u2Dpl));
    5080             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
    5081         }
    5082 
    5083         /* For non-conforming CS, DPL must match RPL. */
    5084         if (   !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
    5085             && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
    5086         {
    5087             Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
    5088                  DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
    5089             return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
    5090         }
    5091 
    5092         /* Is it there? */
    5093         if (!DescCS.Legacy.Gen.u1Present)
    5094         {
    5095             Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
    5096             return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
    5097         }
    5098 
    5099         cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
    5100         u64Base = X86DESC_BASE(&DescCS.Legacy);
    5101 
    5102         /* Set the accessed bit before committing the result into CS. */
    5103         if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
    5104         {
    5105             rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
    5106             if (rcStrict != VINF_SUCCESS)
    5107                 return rcStrict;
    5108             DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
    5109         }
    5110 
    5111         /* Commit CS. */
    5112         pVCpu->cpum.GstCtx.cs.Sel      = uNewCS;
    5113         pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
    5114         pVCpu->cpum.GstCtx.cs.Attr.u   = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
    5115         pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
    5116         pVCpu->cpum.GstCtx.cs.u64Base  = u64Base;
    5117         pVCpu->cpum.GstCtx.cs.fFlags   = CPUMSELREG_FLAGS_VALID;
    5118         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    5119     }
    5120 
    5121     /** @todo Debug trap. */
    5122     if (fIsNewTSS386 && fNewDebugTrap)
    5123         Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
    5124 
    5125     /*
    5126      * Construct the error code masks based on what caused this task switch.
    5127      * See Intel Instruction reference for INT.
    5128      */
    5129     uint16_t uExt;
    5130     if (   enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
    5131         && (   !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
    5132             ||  (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
    5133     {
    5134         uExt = 1;
    5135     }
    5136     else
    5137         uExt = 0;
    5138 
    5139     /*
    5140      * Push any error code on to the new stack.
    5141      */
    5142     if (fFlags & IEM_XCPT_FLAGS_ERR)
    5143     {
    5144         Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
    5145         uint32_t      cbLimitSS    = X86DESC_LIMIT_G(&DescSS.Legacy);
    5146         uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
    5147 
    5148         /* Check that there is sufficient space on the stack. */
    5149         /** @todo Factor out segment limit checking for normal/expand down segments
    5150          *        into a separate function. */
    5151         if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
    5152         {
    5153             if (   pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
    5154                 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
    5155             {
    5156                 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
    5157                 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
    5158                      pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
    5159                 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
    5160             }
    5161         }
    5162         else
    5163         {
    5164             if (   pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
    5165                 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
    5166             {
    5167                 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
    5168                      pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
    5169                 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
    5170             }
    5171         }
    5172 
    5173 
    5174         if (fIsNewTSS386)
    5175             rcStrict = iemMemStackPushU32(pVCpu, uErr);
    5176         else
    5177             rcStrict = iemMemStackPushU16(pVCpu, uErr);
    5178         if (rcStrict != VINF_SUCCESS)
    5179         {
    5180             Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
    5181                  fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
    5182             return rcStrict;
    5183         }
    5184     }
    5185 
    5186     /* Check the new EIP against the new CS limit. */
    5187     if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
    5188     {
    5189         Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
    5190              pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
    5191         /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
    5192         return iemRaiseGeneralProtectionFault(pVCpu, uExt);
    5193     }
    5194 
    5195     Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
    5196          pVCpu->cpum.GstCtx.ss.Sel));
    5197     return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
    5198 }
    5199 
    5200 
    5201 /**
    5202  * Implements exceptions and interrupts for protected mode.
    5203  *
    5204  * @returns VBox strict status code.
    5205  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    5206  * @param   cbInstr         The number of bytes to offset rIP by in the return
    5207  *                          address.
    5208  * @param   u8Vector        The interrupt / exception vector number.
    5209  * @param   fFlags          The flags.
    5210  * @param   uErr            The error value if IEM_XCPT_FLAGS_ERR is set.
    5211  * @param   uCr2            The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
    5212  */
    5213 IEM_STATIC VBOXSTRICTRC
    5214 iemRaiseXcptOrIntInProtMode(PVMCPUCC      pVCpu,
    5215                             uint8_t     cbInstr,
    5216                             uint8_t     u8Vector,
    5217                             uint32_t    fFlags,
    5218                             uint16_t    uErr,
    5219                             uint64_t    uCr2)
    5220 {
    5221     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    5222 
    5223     /*
    5224      * Read the IDT entry.
    5225      */
    5226     if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
    5227     {
    5228         Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
    5229         return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5230     }
    5231     X86DESC Idte;
    5232     VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
    5233                                               pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
    5234     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    5235     {
    5236         Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
    5237         return rcStrict;
    5238     }
    5239     Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
    5240          u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
    5241          Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
    5242 
    5243     /*
    5244      * Check the descriptor type, DPL and such.
    5245      * ASSUMES this is done in the same order as described for call-gate calls.
    5246      */
    5247     if (Idte.Gate.u1DescType)
    5248     {
    5249         Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
    5250         return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5251     }
    5252     bool     fTaskGate   = false;
    5253     uint8_t  f32BitGate  = true;
    5254     uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
    5255     switch (Idte.Gate.u4Type)
    5256     {
    5257         case X86_SEL_TYPE_SYS_UNDEFINED:
    5258         case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
    5259         case X86_SEL_TYPE_SYS_LDT:
    5260         case X86_SEL_TYPE_SYS_286_TSS_BUSY:
    5261         case X86_SEL_TYPE_SYS_286_CALL_GATE:
    5262         case X86_SEL_TYPE_SYS_UNDEFINED2:
    5263         case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
    5264         case X86_SEL_TYPE_SYS_UNDEFINED3:
    5265         case X86_SEL_TYPE_SYS_386_TSS_BUSY:
    5266         case X86_SEL_TYPE_SYS_386_CALL_GATE:
    5267         case X86_SEL_TYPE_SYS_UNDEFINED4:
    5268         {
    5269             /** @todo check what actually happens when the type is wrong...
    5270              *        esp. call gates. */
    5271             Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
    5272             return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5273         }
    5274 
    5275         case X86_SEL_TYPE_SYS_286_INT_GATE:
    5276             f32BitGate = false;
    5277             RT_FALL_THRU();
    5278         case X86_SEL_TYPE_SYS_386_INT_GATE:
    5279             fEflToClear |= X86_EFL_IF;
    5280             break;
    5281 
    5282         case X86_SEL_TYPE_SYS_TASK_GATE:
    5283             fTaskGate = true;
    5284 #ifndef IEM_IMPLEMENTS_TASKSWITCH
    5285             IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
    5286 #endif
    5287             break;
    5288 
    5289         case X86_SEL_TYPE_SYS_286_TRAP_GATE:
    5290             f32BitGate = false;
    5291         case X86_SEL_TYPE_SYS_386_TRAP_GATE:
    5292             break;
    5293 
    5294         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    5295     }
    5296 
    5297     /* Check DPL against CPL if applicable. */
    5298     if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
    5299     {
    5300         if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
    5301         {
    5302             Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
    5303             return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5304         }
    5305     }
    5306 
    5307     /* Is it there? */
    5308     if (!Idte.Gate.u1Present)
    5309     {
    5310         Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
    5311         return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5312     }
    5313 
    5314     /* Is it a task-gate? */
    5315     if (fTaskGate)
    5316     {
    5317         /*
    5318          * Construct the error code masks based on what caused this task switch.
    5319          * See Intel Instruction reference for INT.
    5320          */
    5321         uint16_t const uExt     = (    (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
    5322                                    && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
    5323         uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
    5324         RTSEL          SelTSS   = Idte.Gate.u16Sel;
    5325 
    5326         /*
    5327          * Fetch the TSS descriptor in the GDT.
    5328          */
    5329         IEMSELDESC DescTSS;
    5330         rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
    5331         if (rcStrict != VINF_SUCCESS)
    5332         {
    5333             Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
    5334                  VBOXSTRICTRC_VAL(rcStrict)));
    5335             return rcStrict;
    5336         }
    5337 
    5338         /* The TSS descriptor must be a system segment and be available (not busy). */
    5339         if (   DescTSS.Legacy.Gen.u1DescType
    5340             || (   DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
    5341                 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
    5342         {
    5343             Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
    5344                  u8Vector, SelTSS, DescTSS.Legacy.au64));
    5345             return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
    5346         }
    5347 
    5348         /* The TSS must be present. */
    5349         if (!DescTSS.Legacy.Gen.u1Present)
    5350         {
    5351             Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
    5352             return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
    5353         }
    5354 
    5355         /* Do the actual task switch. */
    5356         return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
    5357                              (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
    5358                              fFlags, uErr, uCr2, SelTSS, &DescTSS);
    5359     }
    5360 
    5361     /* A null CS is bad. */
    5362     RTSEL NewCS = Idte.Gate.u16Sel;
    5363     if (!(NewCS & X86_SEL_MASK_OFF_RPL))
    5364     {
    5365         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
    5366         return iemRaiseGeneralProtectionFault0(pVCpu);
    5367     }
    5368 
    5369     /* Fetch the descriptor for the new CS. */
    5370     IEMSELDESC DescCS;
    5371     rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
    5372     if (rcStrict != VINF_SUCCESS)
    5373     {
    5374         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
    5375         return rcStrict;
    5376     }
    5377 
    5378     /* Must be a code segment. */
    5379     if (!DescCS.Legacy.Gen.u1DescType)
    5380     {
    5381         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
    5382         return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
    5383     }
    5384     if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
    5385     {
    5386         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
    5387         return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
    5388     }
    5389 
    5390     /* Don't allow lowering the privilege level. */
    5391     /** @todo Does the lowering of privileges apply to software interrupts
    5392      *        only?  This has bearings on the more-privileged or
    5393      *        same-privilege stack behavior further down.  A testcase would
    5394      *        be nice. */
    5395     if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
    5396     {
    5397         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
    5398              u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
    5399         return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
    5400     }
    5401 
    5402     /* Make sure the selector is present. */
    5403     if (!DescCS.Legacy.Gen.u1Present)
    5404     {
    5405         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
    5406         return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
    5407     }
    5408 
    5409     /* Check the new EIP against the new CS limit. */
    5410     uint32_t const uNewEip =    Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
    5411                              || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
    5412                            ? Idte.Gate.u16OffsetLow
    5413                            : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
    5414     uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
    5415     if (uNewEip > cbLimitCS)
    5416     {
    5417         Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
    5418              u8Vector, uNewEip, cbLimitCS, NewCS));
    5419         return iemRaiseGeneralProtectionFault(pVCpu, 0);
    5420     }
    5421     Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
    5422 
    5423     /* Calc the flag image to push. */
    5424     uint32_t        fEfl    = IEMMISC_GET_EFL(pVCpu);
    5425     if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
    5426         fEfl &= ~X86_EFL_RF;
    5427     else
    5428         fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
    5429 
    5430     /* From V8086 mode only go to CPL 0. */
    5431     uint8_t const   uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
    5432                             ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
    5433     if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
    5434     {
    5435         Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
    5436         return iemRaiseGeneralProtectionFault(pVCpu, 0);
    5437     }
    5438 
    5439     /*
    5440      * If the privilege level changes, we need to get a new stack from the TSS.
    5441      * This in turns means validating the new SS and ESP...
    5442      */
    5443     if (uNewCpl != pVCpu->iem.s.uCpl)
    5444     {
    5445         RTSEL    NewSS;
    5446         uint32_t uNewEsp;
    5447         rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
    5448         if (rcStrict != VINF_SUCCESS)
    5449             return rcStrict;
    5450 
    5451         IEMSELDESC DescSS;
    5452         rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
    5453         if (rcStrict != VINF_SUCCESS)
    5454             return rcStrict;
    5455         /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
    5456         if (!DescSS.Legacy.Gen.u1DefBig)
    5457         {
    5458             Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
    5459             uNewEsp = (uint16_t)uNewEsp;
    5460         }
    5461 
    5462         Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
    5463 
    5464         /* Check that there is sufficient space for the stack frame. */
    5465         uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
    5466         uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
    5467                                    ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
    5468                                    : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
    5469 
    5470         if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
    5471         {
    5472             if (   uNewEsp - 1 > cbLimitSS
    5473                 || uNewEsp < cbStackFrame)
    5474             {
    5475                 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
    5476                      u8Vector, NewSS, uNewEsp, cbStackFrame));
    5477                 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
    5478             }
    5479         }
    5480         else
    5481         {
    5482             if (   uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
    5483                 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
    5484             {
    5485                 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
    5486                      u8Vector, NewSS, uNewEsp, cbStackFrame));
    5487                 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
    5488             }
    5489         }
    5490 
    5491         /*
    5492          * Start making changes.
    5493          */
    5494 
    5495         /* Set the new CPL so that stack accesses use it. */
    5496         uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
    5497         pVCpu->iem.s.uCpl = uNewCpl;
    5498 
    5499         /* Create the stack frame. */
    5500         RTPTRUNION uStackFrame;
    5501         rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
    5502                              uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
    5503         if (rcStrict != VINF_SUCCESS)
    5504             return rcStrict;
    5505         void * const pvStackFrame = uStackFrame.pv;
    5506         if (f32BitGate)
    5507         {
    5508             if (fFlags & IEM_XCPT_FLAGS_ERR)
    5509                 *uStackFrame.pu32++ = uErr;
    5510             uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
    5511             uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
    5512             uStackFrame.pu32[2] = fEfl;
    5513             uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
    5514             uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
    5515             Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
    5516             if (fEfl & X86_EFL_VM)
    5517             {
    5518                 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
    5519                 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
    5520                 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
    5521                 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
    5522                 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
    5523             }
    5524         }
    5525         else
    5526         {
    5527             if (fFlags & IEM_XCPT_FLAGS_ERR)
    5528                 *uStackFrame.pu16++ = uErr;
    5529             uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
    5530             uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
    5531             uStackFrame.pu16[2] = fEfl;
    5532             uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
    5533             uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
    5534             Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
    5535             if (fEfl & X86_EFL_VM)
    5536             {
    5537                 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
    5538                 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
    5539                 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
    5540                 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
    5541                 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
    5542             }
    5543         }
    5544         rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
    5545         if (rcStrict != VINF_SUCCESS)
    5546             return rcStrict;
    5547 
    5548         /* Mark the selectors 'accessed' (hope this is the correct time). */
    5549         /** @todo testcase: excatly _when_ are the accessed bits set - before or
    5550          *        after pushing the stack frame? (Write protect the gdt + stack to
    5551          *        find out.) */
    5552         if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
    5553         {
    5554             rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
    5555             if (rcStrict != VINF_SUCCESS)
    5556                 return rcStrict;
    5557             DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
    5558         }
    5559 
    5560         if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
    5561         {
    5562             rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
    5563             if (rcStrict != VINF_SUCCESS)
    5564                 return rcStrict;
    5565             DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
    5566         }
    5567 
    5568         /*
    5569          * Start comitting the register changes (joins with the DPL=CPL branch).
    5570          */
    5571         pVCpu->cpum.GstCtx.ss.Sel            = NewSS;
    5572         pVCpu->cpum.GstCtx.ss.ValidSel       = NewSS;
    5573         pVCpu->cpum.GstCtx.ss.fFlags         = CPUMSELREG_FLAGS_VALID;
    5574         pVCpu->cpum.GstCtx.ss.u32Limit       = cbLimitSS;
    5575         pVCpu->cpum.GstCtx.ss.u64Base        = X86DESC_BASE(&DescSS.Legacy);
    5576         pVCpu->cpum.GstCtx.ss.Attr.u         = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
    5577         /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
    5578          *        16-bit handler, the high word of ESP remains unchanged (i.e. only
    5579          *        SP is loaded).
    5580          *  Need to check the other combinations too:
    5581          *      - 16-bit TSS, 32-bit handler
    5582          *      - 32-bit TSS, 16-bit handler */
    5583         if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    5584             pVCpu->cpum.GstCtx.sp            = (uint16_t)(uNewEsp - cbStackFrame);
    5585         else
    5586             pVCpu->cpum.GstCtx.rsp           = uNewEsp - cbStackFrame;
    5587 
    5588         if (fEfl & X86_EFL_VM)
    5589         {
    5590             iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
    5591             iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
    5592             iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
    5593             iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
    5594         }
    5595     }
    5596     /*
    5597      * Same privilege, no stack change and smaller stack frame.
    5598      */
    5599     else
    5600     {
    5601         uint64_t        uNewRsp;
    5602         RTPTRUNION      uStackFrame;
    5603         uint8_t const   cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
    5604         rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
    5605         if (rcStrict != VINF_SUCCESS)
    5606             return rcStrict;
    5607         void * const pvStackFrame = uStackFrame.pv;
    5608 
    5609         if (f32BitGate)
    5610         {
    5611             if (fFlags & IEM_XCPT_FLAGS_ERR)
    5612                 *uStackFrame.pu32++ = uErr;
    5613             uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
    5614             uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
    5615             uStackFrame.pu32[2] = fEfl;
    5616         }
    5617         else
    5618         {
    5619             if (fFlags & IEM_XCPT_FLAGS_ERR)
    5620                 *uStackFrame.pu16++ = uErr;
    5621             uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
    5622             uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
    5623             uStackFrame.pu16[2] = fEfl;
    5624         }
    5625         rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
    5626         if (rcStrict != VINF_SUCCESS)
    5627             return rcStrict;
    5628 
    5629         /* Mark the CS selector as 'accessed'. */
    5630         if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
    5631         {
    5632             rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
    5633             if (rcStrict != VINF_SUCCESS)
    5634                 return rcStrict;
    5635             DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
    5636         }
    5637 
    5638         /*
    5639          * Start committing the register changes (joins with the other branch).
    5640          */
    5641         pVCpu->cpum.GstCtx.rsp = uNewRsp;
    5642     }
    5643 
    5644     /* ... register committing continues. */
    5645     pVCpu->cpum.GstCtx.cs.Sel            = (NewCS & ~X86_SEL_RPL) | uNewCpl;
    5646     pVCpu->cpum.GstCtx.cs.ValidSel       = (NewCS & ~X86_SEL_RPL) | uNewCpl;
    5647     pVCpu->cpum.GstCtx.cs.fFlags         = CPUMSELREG_FLAGS_VALID;
    5648     pVCpu->cpum.GstCtx.cs.u32Limit       = cbLimitCS;
    5649     pVCpu->cpum.GstCtx.cs.u64Base        = X86DESC_BASE(&DescCS.Legacy);
    5650     pVCpu->cpum.GstCtx.cs.Attr.u         = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
    5651 
    5652     pVCpu->cpum.GstCtx.rip               = uNewEip;  /* (The entire register is modified, see pe16_32 bs3kit tests.) */
    5653     fEfl &= ~fEflToClear;
    5654     IEMMISC_SET_EFL(pVCpu, fEfl);
    5655 
    5656     if (fFlags & IEM_XCPT_FLAGS_CR2)
    5657         pVCpu->cpum.GstCtx.cr2 = uCr2;
    5658 
    5659     if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
    5660         iemRaiseXcptAdjustState(pVCpu, u8Vector);
    5661 
    5662     return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
    5663 }
    5664 
    5665 
    5666 /**
    5667  * Implements exceptions and interrupts for long mode.
    5668  *
    5669  * @returns VBox strict status code.
    5670  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    5671  * @param   cbInstr         The number of bytes to offset rIP by in the return
    5672  *                          address.
    5673  * @param   u8Vector        The interrupt / exception vector number.
    5674  * @param   fFlags          The flags.
    5675  * @param   uErr            The error value if IEM_XCPT_FLAGS_ERR is set.
    5676  * @param   uCr2            The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
    5677  */
    5678 IEM_STATIC VBOXSTRICTRC
    5679 iemRaiseXcptOrIntInLongMode(PVMCPUCC      pVCpu,
    5680                             uint8_t     cbInstr,
    5681                             uint8_t     u8Vector,
    5682                             uint32_t    fFlags,
    5683                             uint16_t    uErr,
    5684                             uint64_t    uCr2)
    5685 {
    5686     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    5687 
    5688     /*
    5689      * Read the IDT entry.
    5690      */
    5691     uint16_t offIdt = (uint16_t)u8Vector << 4;
    5692     if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
    5693     {
    5694         Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
    5695         return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5696     }
    5697     X86DESC64 Idte;
    5698 #ifdef _MSC_VER /* Shut up silly compiler warning. */
    5699     Idte.au64[0] = 0;
    5700     Idte.au64[1] = 0;
    5701 #endif
    5702     VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
    5703     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    5704         rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
    5705     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    5706     {
    5707         Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
    5708         return rcStrict;
    5709     }
    5710     Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
    5711          u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
    5712          Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
    5713 
    5714     /*
    5715      * Check the descriptor type, DPL and such.
    5716      * ASSUMES this is done in the same order as described for call-gate calls.
    5717      */
    5718     if (Idte.Gate.u1DescType)
    5719     {
    5720         Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
    5721         return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5722     }
    5723     uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
    5724     switch (Idte.Gate.u4Type)
    5725     {
    5726         case AMD64_SEL_TYPE_SYS_INT_GATE:
    5727             fEflToClear |= X86_EFL_IF;
    5728             break;
    5729         case AMD64_SEL_TYPE_SYS_TRAP_GATE:
    5730             break;
    5731 
    5732         default:
    5733             Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
    5734             return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5735     }
    5736 
    5737     /* Check DPL against CPL if applicable. */
    5738     if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
    5739     {
    5740         if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
    5741         {
    5742             Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
    5743             return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5744         }
    5745     }
    5746 
    5747     /* Is it there? */
    5748     if (!Idte.Gate.u1Present)
    5749     {
    5750         Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
    5751         return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
    5752     }
    5753 
    5754     /* A null CS is bad. */
    5755     RTSEL NewCS = Idte.Gate.u16Sel;
    5756     if (!(NewCS & X86_SEL_MASK_OFF_RPL))
    5757     {
    5758         Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
    5759         return iemRaiseGeneralProtectionFault0(pVCpu);
    5760     }
    5761 
    5762     /* Fetch the descriptor for the new CS. */
    5763     IEMSELDESC DescCS;
    5764     rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
    5765     if (rcStrict != VINF_SUCCESS)
    5766     {
    5767         Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
    5768         return rcStrict;
    5769     }
    5770 
    5771     /* Must be a 64-bit code segment. */
    5772     if (!DescCS.Long.Gen.u1DescType)
    5773     {
    5774         Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
    5775         return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
    5776     }
    5777     if (   !DescCS.Long.Gen.u1Long
    5778         || DescCS.Long.Gen.u1DefBig
    5779         || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
    5780     {
    5781         Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
    5782              u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
    5783         return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
    5784     }
    5785 
    5786     /* Don't allow lowering the privilege level.  For non-conforming CS
    5787        selectors, the CS.DPL sets the privilege level the trap/interrupt
    5788        handler runs at.  For conforming CS selectors, the CPL remains
    5789        unchanged, but the CS.DPL must be <= CPL. */
    5790     /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
    5791      *        when CPU in Ring-0. Result \#GP?  */
    5792     if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
    5793     {
    5794         Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
    5795              u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
    5796         return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
    5797     }
    5798 
    5799 
    5800     /* Make sure the selector is present. */
    5801     if (!DescCS.Legacy.Gen.u1Present)
    5802     {
    5803         Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
    5804         return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
    5805     }
    5806 
    5807     /* Check that the new RIP is canonical. */
    5808     uint64_t const uNewRip = Idte.Gate.u16OffsetLow
    5809                            | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
    5810                            | ((uint64_t)Idte.Gate.u32OffsetTop  << 32);
    5811     if (!IEM_IS_CANONICAL(uNewRip))
    5812     {
    5813         Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
    5814         return iemRaiseGeneralProtectionFault0(pVCpu);
    5815     }
    5816 
    5817     /*
    5818      * If the privilege level changes or if the IST isn't zero, we need to get
    5819      * a new stack from the TSS.
    5820      */
    5821     uint64_t        uNewRsp;
    5822     uint8_t const   uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
    5823                             ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
    5824     if (   uNewCpl != pVCpu->iem.s.uCpl
    5825         || Idte.Gate.u3IST != 0)
    5826     {
    5827         rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
    5828         if (rcStrict != VINF_SUCCESS)
    5829             return rcStrict;
    5830     }
    5831     else
    5832         uNewRsp = pVCpu->cpum.GstCtx.rsp;
    5833     uNewRsp &= ~(uint64_t)0xf;
    5834 
    5835     /*
    5836      * Calc the flag image to push.
    5837      */
    5838     uint32_t        fEfl    = IEMMISC_GET_EFL(pVCpu);
    5839     if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
    5840         fEfl &= ~X86_EFL_RF;
    5841     else
    5842         fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
    5843 
    5844     /*
    5845      * Start making changes.
    5846      */
    5847     /* Set the new CPL so that stack accesses use it. */
    5848     uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
    5849     pVCpu->iem.s.uCpl = uNewCpl;
    5850 
    5851     /* Create the stack frame. */
    5852     uint32_t   cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
    5853     RTPTRUNION uStackFrame;
    5854     rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
    5855                          uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
    5856     if (rcStrict != VINF_SUCCESS)
    5857         return rcStrict;
    5858     void * const pvStackFrame = uStackFrame.pv;
    5859 
    5860     if (fFlags & IEM_XCPT_FLAGS_ERR)
    5861         *uStackFrame.pu64++ = uErr;
    5862     uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
    5863     uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
    5864     uStackFrame.pu64[2] = fEfl;
    5865     uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
    5866     uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
    5867     rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
    5868     if (rcStrict != VINF_SUCCESS)
    5869         return rcStrict;
    5870 
    5871     /* Mark the CS selectors 'accessed' (hope this is the correct time). */
    5872     /** @todo testcase: excatly _when_ are the accessed bits set - before or
    5873      *        after pushing the stack frame? (Write protect the gdt + stack to
    5874      *        find out.) */
    5875     if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
    5876     {
    5877         rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
    5878         if (rcStrict != VINF_SUCCESS)
    5879             return rcStrict;
    5880         DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
    5881     }
    5882 
    5883     /*
    5884      * Start comitting the register changes.
    5885      */
    5886     /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
    5887      *        hidden registers when interrupting 32-bit or 16-bit code! */
    5888     if (uNewCpl != uOldCpl)
    5889     {
    5890         pVCpu->cpum.GstCtx.ss.Sel        = 0 | uNewCpl;
    5891         pVCpu->cpum.GstCtx.ss.ValidSel   = 0 | uNewCpl;
    5892         pVCpu->cpum.GstCtx.ss.fFlags     = CPUMSELREG_FLAGS_VALID;
    5893         pVCpu->cpum.GstCtx.ss.u32Limit   = UINT32_MAX;
    5894         pVCpu->cpum.GstCtx.ss.u64Base    = 0;
    5895         pVCpu->cpum.GstCtx.ss.Attr.u     = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
    5896     }
    5897     pVCpu->cpum.GstCtx.rsp           = uNewRsp - cbStackFrame;
    5898     pVCpu->cpum.GstCtx.cs.Sel        = (NewCS & ~X86_SEL_RPL) | uNewCpl;
    5899     pVCpu->cpum.GstCtx.cs.ValidSel   = (NewCS & ~X86_SEL_RPL) | uNewCpl;
    5900     pVCpu->cpum.GstCtx.cs.fFlags     = CPUMSELREG_FLAGS_VALID;
    5901     pVCpu->cpum.GstCtx.cs.u32Limit   = X86DESC_LIMIT_G(&DescCS.Legacy);
    5902     pVCpu->cpum.GstCtx.cs.u64Base    = X86DESC_BASE(&DescCS.Legacy);
    5903     pVCpu->cpum.GstCtx.cs.Attr.u     = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
    5904     pVCpu->cpum.GstCtx.rip           = uNewRip;
    5905 
    5906     fEfl &= ~fEflToClear;
    5907     IEMMISC_SET_EFL(pVCpu, fEfl);
    5908 
    5909     if (fFlags & IEM_XCPT_FLAGS_CR2)
    5910         pVCpu->cpum.GstCtx.cr2 = uCr2;
    5911 
    5912     if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
    5913         iemRaiseXcptAdjustState(pVCpu, u8Vector);
    5914 
    5915     return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
    5916 }
    5917 
    5918 
    5919 /**
    5920  * Implements exceptions and interrupts.
    5921  *
    5922  * All exceptions and interrupts goes thru this function!
    5923  *
    5924  * @returns VBox strict status code.
    5925  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    5926  * @param   cbInstr         The number of bytes to offset rIP by in the return
    5927  *                          address.
    5928  * @param   u8Vector        The interrupt / exception vector number.
    5929  * @param   fFlags          The flags.
    5930  * @param   uErr            The error value if IEM_XCPT_FLAGS_ERR is set.
    5931  * @param   uCr2            The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
    5932  */
    5933 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
    5934 iemRaiseXcptOrInt(PVMCPUCC    pVCpu,
    5935                   uint8_t     cbInstr,
    5936                   uint8_t     u8Vector,
    5937                   uint32_t    fFlags,
    5938                   uint16_t    uErr,
    5939                   uint64_t    uCr2)
    5940 {
    5941     /*
    5942      * Get all the state that we might need here.
    5943      */
    5944     IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    5945     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
    5946 
    5947 #ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
    5948     /*
    5949      * Flush prefetch buffer
    5950      */
    5951     pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
    5952 #endif
    5953 
    5954     /*
    5955      * Perform the V8086 IOPL check and upgrade the fault without nesting.
    5956      */
    5957     if (   pVCpu->cpum.GstCtx.eflags.Bits.u1VM
    5958         && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
    5959         && (fFlags & (  IEM_XCPT_FLAGS_T_SOFT_INT
    5960                       | IEM_XCPT_FLAGS_BP_INSTR
    5961                       | IEM_XCPT_FLAGS_ICEBP_INSTR
    5962                       | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
    5963         && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
    5964     {
    5965         Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
    5966         fFlags   = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
    5967         u8Vector = X86_XCPT_GP;
    5968         uErr     = 0;
    5969     }
    5970 #ifdef DBGFTRACE_ENABLED
    5971     RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
    5972                       pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
    5973                       pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
    5974 #endif
    5975 
    5976     /*
    5977      * Evaluate whether NMI blocking should be in effect.
    5978      * Normally, NMI blocking is in effect whenever we inject an NMI.
    5979      */
    5980     bool fBlockNmi;
    5981     if (   u8Vector == X86_XCPT_NMI
    5982         && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
    5983         fBlockNmi = true;
    5984     else
    5985         fBlockNmi = false;
    5986 
    5987 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    5988     if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    5989     {
    5990         VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
    5991         if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
    5992             return rcStrict0;
    5993 
    5994         /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
    5995         if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
    5996         {
    5997             Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
    5998             fBlockNmi = false;
    5999         }
    6000     }
    6001 #endif
    6002 
    6003 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    6004     if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
    6005     {
    6006         /*
    6007          * If the event is being injected as part of VMRUN, it isn't subject to event
    6008          * intercepts in the nested-guest. However, secondary exceptions that occur
    6009          * during injection of any event -are- subject to exception intercepts.
    6010          *
    6011          * See AMD spec. 15.20 "Event Injection".
    6012          */
    6013         if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
    6014             pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
    6015         else
    6016         {
    6017             /*
    6018              * Check and handle if the event being raised is intercepted.
    6019              */
    6020             VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
    6021             if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
    6022                 return rcStrict0;
    6023         }
    6024     }
    6025 #endif
    6026 
    6027     /*
    6028      * Set NMI blocking if necessary.
    6029      */
    6030     if (   fBlockNmi
    6031         && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
    6032         VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
    6033 
    6034     /*
    6035      * Do recursion accounting.
    6036      */
    6037     uint8_t const  uPrevXcpt = pVCpu->iem.s.uCurXcpt;
    6038     uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
    6039     if (pVCpu->iem.s.cXcptRecursions == 0)
    6040         Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
    6041              u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
    6042     else
    6043     {
    6044         Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
    6045              u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
    6046              pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
    6047 
    6048         if (pVCpu->iem.s.cXcptRecursions >= 4)
    6049         {
    6050 #ifdef DEBUG_bird
    6051             AssertFailed();
    6052 #endif
    6053             IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
    6054         }
    6055 
    6056         /*
    6057          * Evaluate the sequence of recurring events.
    6058          */
    6059         IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
    6060                                                          NULL /* pXcptRaiseInfo */);
    6061         if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
    6062         { /* likely */ }
    6063         else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
    6064         {
    6065             Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
    6066             fFlags   = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
    6067             u8Vector = X86_XCPT_DF;
    6068             uErr     = 0;
    6069 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    6070             /* VMX nested-guest #DF intercept needs to be checked here. */
    6071             if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
    6072             {
    6073                 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
    6074                 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
    6075                     return rcStrict0;
    6076             }
    6077 #endif
    6078             /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
    6079             if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
    6080                 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    6081         }
    6082         else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
    6083         {
    6084             Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
    6085             return iemInitiateCpuShutdown(pVCpu);
    6086         }
    6087         else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
    6088         {
    6089             /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
    6090             Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
    6091             if (   !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
    6092                 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    6093                 return VERR_EM_GUEST_CPU_HANG;
    6094         }
    6095         else
    6096         {
    6097             AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
    6098                              enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
    6099             return VERR_IEM_IPE_9;
    6100         }
    6101 
    6102         /*
    6103          * The 'EXT' bit is set when an exception occurs during deliver of an external
    6104          * event (such as an interrupt or earlier exception)[1]. Privileged software
    6105          * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
    6106          * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
    6107          *
    6108          * [1] - Intel spec. 6.13 "Error Code"
    6109          * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
    6110          * [3] - Intel Instruction reference for INT n.
    6111          */
    6112         if (   (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
    6113             && (fFlags & IEM_XCPT_FLAGS_ERR)
    6114             && u8Vector != X86_XCPT_PF
    6115             && u8Vector != X86_XCPT_DF)
    6116         {
    6117             uErr |= X86_TRAP_ERR_EXTERNAL;
    6118         }
    6119     }
    6120 
    6121     pVCpu->iem.s.cXcptRecursions++;
    6122     pVCpu->iem.s.uCurXcpt    = u8Vector;
    6123     pVCpu->iem.s.fCurXcpt    = fFlags;
    6124     pVCpu->iem.s.uCurXcptErr = uErr;
    6125     pVCpu->iem.s.uCurXcptCr2 = uCr2;
    6126 
    6127     /*
    6128      * Extensive logging.
    6129      */
    6130 #if defined(LOG_ENABLED) && defined(IN_RING3)
    6131     if (LogIs3Enabled())
    6132     {
    6133         IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
    6134         PVM     pVM = pVCpu->CTX_SUFF(pVM);
    6135         char    szRegs[4096];
    6136         DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
    6137                         "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
    6138                         "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
    6139                         "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
    6140                         "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
    6141                         "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
    6142                         "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
    6143                         "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
    6144                         "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
    6145                         "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
    6146                         "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
    6147                         "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
    6148                         "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
    6149                         "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
    6150                         "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim}  idtr=%016VR{idtr_base}:%04VR{idtr_lim}  rflags=%08VR{rflags}\n"
    6151                         "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
    6152                         "tr  ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
    6153                         "    sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
    6154                         "        efer=%016VR{efer}\n"
    6155                         "         pat=%016VR{pat}\n"
    6156                         "     sf_mask=%016VR{sf_mask}\n"
    6157                         "krnl_gs_base=%016VR{krnl_gs_base}\n"
    6158                         "       lstar=%016VR{lstar}\n"
    6159                         "        star=%016VR{star} cstar=%016VR{cstar}\n"
    6160                         "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
    6161                         );
    6162 
    6163         char szInstr[256];
    6164         DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
    6165                            DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    6166                            szInstr, sizeof(szInstr), NULL);
    6167         Log3(("%s%s\n", szRegs, szInstr));
    6168     }
    6169 #endif /* LOG_ENABLED */
    6170 
    6171     /*
    6172      * Call the mode specific worker function.
    6173      */
    6174     VBOXSTRICTRC    rcStrict;
    6175     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
    6176         rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
    6177     else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
    6178         rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
    6179     else
    6180         rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
    6181 
    6182     /* Flush the prefetch buffer. */
    6183 #ifdef IEM_WITH_CODE_TLB
    6184     pVCpu->iem.s.pbInstrBuf = NULL;
    6185 #else
    6186     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    6187 #endif
    6188 
    6189     /*
    6190      * Unwind.
    6191      */
    6192     pVCpu->iem.s.cXcptRecursions--;
    6193     pVCpu->iem.s.uCurXcpt = uPrevXcpt;
    6194     pVCpu->iem.s.fCurXcpt = fPrevXcpt;
    6195     Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
    6196          VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
    6197          pVCpu->iem.s.cXcptRecursions + 1));
    6198     return rcStrict;
    6199 }
    6200 
    6201 #ifdef IEM_WITH_SETJMP
    6202 /**
    6203  * See iemRaiseXcptOrInt.  Will not return.
    6204  */
    6205 IEM_STATIC DECL_NO_RETURN(void)
    6206 iemRaiseXcptOrIntJmp(PVMCPUCC      pVCpu,
    6207                      uint8_t     cbInstr,
    6208                      uint8_t     u8Vector,
    6209                      uint32_t    fFlags,
    6210                      uint16_t    uErr,
    6211                      uint64_t    uCr2)
    6212 {
    6213     VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
    6214     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    6215 }
    6216 #endif
    6217 
    6218 
    6219 /** \#DE - 00.  */
    6220 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
    6221 {
    6222     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6223 }
    6224 
    6225 
    6226 /** \#DB - 01.
    6227  * @note This automatically clear DR7.GD.  */
    6228 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
    6229 {
    6230     /** @todo set/clear RF. */
    6231     pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
    6232     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6233 }
    6234 
    6235 
    6236 /** \#BR - 05.  */
    6237 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
    6238 {
    6239     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6240 }
    6241 
    6242 
    6243 /** \#UD - 06.  */
    6244 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
    6245 {
    6246     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6247 }
    6248 
    6249 
    6250 /** \#NM - 07.  */
    6251 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
    6252 {
    6253     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6254 }
    6255 
    6256 
    6257 /** \#TS(err) - 0a.  */
    6258 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
    6259 {
    6260     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
    6261 }
    6262 
    6263 
    6264 /** \#TS(tr) - 0a.  */
    6265 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
    6266 {
    6267     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    6268                              pVCpu->cpum.GstCtx.tr.Sel, 0);
    6269 }
    6270 
    6271 
    6272 /** \#TS(0) - 0a.  */
    6273 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
    6274 {
    6275     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    6276                              0, 0);
    6277 }
    6278 
    6279 
    6280 /** \#TS(err) - 0a.  */
    6281 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
    6282 {
    6283     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    6284                              uSel & X86_SEL_MASK_OFF_RPL, 0);
    6285 }
    6286 
    6287 
    6288 /** \#NP(err) - 0b.  */
    6289 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
    6290 {
    6291     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
    6292 }
    6293 
    6294 
    6295 /** \#NP(sel) - 0b.  */
    6296 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
    6297 {
    6298     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    6299                              uSel & ~X86_SEL_RPL, 0);
    6300 }
    6301 
    6302 
    6303 /** \#SS(seg) - 0c.  */
    6304 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
    6305 {
    6306     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    6307                              uSel & ~X86_SEL_RPL, 0);
    6308 }
    6309 
    6310 
    6311 /** \#SS(err) - 0c.  */
    6312 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
    6313 {
    6314     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
    6315 }
    6316 
    6317 
    6318 /** \#GP(n) - 0d.  */
    6319 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
    6320 {
    6321     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
    6322 }
    6323 
    6324 
    6325 /** \#GP(0) - 0d.  */
    6326 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
    6327 {
    6328     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6329 }
    6330 
    6331 #ifdef IEM_WITH_SETJMP
    6332 /** \#GP(0) - 0d.  */
    6333 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
    6334 {
    6335     iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6336 }
    6337 #endif
    6338 
    6339 
    6340 /** \#GP(sel) - 0d.  */
    6341 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
    6342 {
    6343     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    6344                              Sel & ~X86_SEL_RPL, 0);
    6345 }
    6346 
    6347 
    6348 /** \#GP(0) - 0d.  */
    6349 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
    6350 {
    6351     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6352 }
    6353 
    6354 
    6355 /** \#GP(sel) - 0d.  */
    6356 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
    6357 {
    6358     NOREF(iSegReg); NOREF(fAccess);
    6359     return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
    6360                              IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6361 }
    6362 
    6363 #ifdef IEM_WITH_SETJMP
    6364 /** \#GP(sel) - 0d, longjmp.  */
    6365 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
    6366 {
    6367     NOREF(iSegReg); NOREF(fAccess);
    6368     iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
    6369                          IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6370 }
    6371 #endif
    6372 
    6373 /** \#GP(sel) - 0d.  */
    6374 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
    6375 {
    6376     NOREF(Sel);
    6377     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6378 }
    6379 
    6380 #ifdef IEM_WITH_SETJMP
    6381 /** \#GP(sel) - 0d, longjmp.  */
    6382 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
    6383 {
    6384     NOREF(Sel);
    6385     iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6386 }
    6387 #endif
    6388 
    6389 
    6390 /** \#GP(sel) - 0d.  */
    6391 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
    6392 {
    6393     NOREF(iSegReg); NOREF(fAccess);
    6394     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6395 }
    6396 
    6397 #ifdef IEM_WITH_SETJMP
    6398 /** \#GP(sel) - 0d, longjmp.  */
    6399 DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
    6400                                                                                   uint32_t fAccess)
    6401 {
    6402     NOREF(iSegReg); NOREF(fAccess);
    6403     iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
    6404 }
    6405 #endif
    6406 
    6407 
    6408 /** \#PF(n) - 0e.  */
    6409 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
    6410 {
    6411     uint16_t uErr;
    6412     switch (rc)
    6413     {
    6414         case VERR_PAGE_NOT_PRESENT:
    6415         case VERR_PAGE_TABLE_NOT_PRESENT:
    6416         case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
    6417         case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
    6418             uErr = 0;
    6419             break;
    6420 
    6421         default:
    6422             AssertMsgFailed(("%Rrc\n", rc));
    6423             RT_FALL_THRU();
    6424         case VERR_ACCESS_DENIED:
    6425             uErr = X86_TRAP_PF_P;
    6426             break;
    6427 
    6428         /** @todo reserved  */
    6429     }
    6430 
    6431     if (pVCpu->iem.s.uCpl == 3)
    6432         uErr |= X86_TRAP_PF_US;
    6433 
    6434     if (   (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
    6435         && (   (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
    6436             && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
    6437         uErr |= X86_TRAP_PF_ID;
    6438 
    6439 #if 0 /* This is so much non-sense, really.  Why was it done like that? */
    6440     /* Note! RW access callers reporting a WRITE protection fault, will clear
    6441              the READ flag before calling.  So, read-modify-write accesses (RW)
    6442              can safely be reported as READ faults. */
    6443     if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
    6444         uErr |= X86_TRAP_PF_RW;
    6445 #else
    6446     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    6447     {
    6448         /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
    6449         /// (regardless of outcome of the comparison in the latter case).
    6450         //if (!(fAccess & IEM_ACCESS_TYPE_READ))
    6451             uErr |= X86_TRAP_PF_RW;
    6452     }
    6453 #endif
    6454 
    6455     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
    6456                              uErr, GCPtrWhere);
    6457 }
    6458 
    6459 #ifdef IEM_WITH_SETJMP
    6460 /** \#PF(n) - 0e, longjmp.  */
    6461 IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
    6462 {
    6463     longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
    6464 }
    6465 #endif
    6466 
    6467 
    6468 /** \#MF(0) - 10.  */
    6469 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
    6470 {
    6471     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6472 }
    6473 
    6474 
    6475 /** \#AC(0) - 11.  */
    6476 DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
    6477 {
    6478     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6479 }
    6480 
    6481 
    6482 /**
    6483  * Macro for calling iemCImplRaiseDivideError().
    6484  *
    6485  * This enables us to add/remove arguments and force different levels of
    6486  * inlining as we wish.
    6487  *
    6488  * @return  Strict VBox status code.
    6489  */
    6490 #define IEMOP_RAISE_DIVIDE_ERROR()          IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
    6491 IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
    6492 {
    6493     NOREF(cbInstr);
    6494     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6495 }
    6496 
    6497 
    6498 /**
    6499  * Macro for calling iemCImplRaiseInvalidLockPrefix().
    6500  *
    6501  * This enables us to add/remove arguments and force different levels of
    6502  * inlining as we wish.
    6503  *
    6504  * @return  Strict VBox status code.
    6505  */
    6506 #define IEMOP_RAISE_INVALID_LOCK_PREFIX()   IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
    6507 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
    6508 {
    6509     NOREF(cbInstr);
    6510     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6511 }
    6512 
    6513 
    6514 /**
    6515  * Macro for calling iemCImplRaiseInvalidOpcode().
    6516  *
    6517  * This enables us to add/remove arguments and force different levels of
    6518  * inlining as we wish.
    6519  *
    6520  * @return  Strict VBox status code.
    6521  */
    6522 #define IEMOP_RAISE_INVALID_OPCODE()        IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
    6523 IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
    6524 {
    6525     NOREF(cbInstr);
    6526     return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
    6527 }
    6528 
    6529 
    6530 /** @}  */
    6531 
    6532 
    6533 /*
    6534  *
    6535  * Helpers routines.
    6536  * Helpers routines.
    6537  * Helpers routines.
    6538  *
    6539  */
    6540 
    6541 /**
    6542  * Recalculates the effective operand size.
    6543  *
    6544  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6545  */
    6546 IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
    6547 {
    6548     switch (pVCpu->iem.s.enmCpuMode)
    6549     {
    6550         case IEMMODE_16BIT:
    6551             pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
    6552             break;
    6553         case IEMMODE_32BIT:
    6554             pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
    6555             break;
    6556         case IEMMODE_64BIT:
    6557             switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
    6558             {
    6559                 case 0:
    6560                     pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
    6561                     break;
    6562                 case IEM_OP_PRF_SIZE_OP:
    6563                     pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
    6564                     break;
    6565                 case IEM_OP_PRF_SIZE_REX_W:
    6566                 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
    6567                     pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
    6568                     break;
    6569             }
    6570             break;
    6571         default:
    6572             AssertFailed();
    6573     }
    6574 }
    6575 
    6576 
    6577 /**
    6578  * Sets the default operand size to 64-bit and recalculates the effective
    6579  * operand size.
    6580  *
    6581  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6582  */
    6583 IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
    6584 {
    6585     Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
    6586     pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
    6587     if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
    6588         pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
    6589     else
    6590         pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
    6591 }
    6592 
    6593 
    6594 /*
    6595  *
    6596  * Common opcode decoders.
    6597  * Common opcode decoders.
    6598  * Common opcode decoders.
    6599  *
    6600  */
    6601 //#include <iprt/mem.h>
    6602 
    6603 /**
    6604  * Used to add extra details about a stub case.
    6605  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    6606  */
    6607 IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
    6608 {
    6609 #if defined(LOG_ENABLED) && defined(IN_RING3)
    6610     PVM  pVM = pVCpu->CTX_SUFF(pVM);
    6611     char szRegs[4096];
    6612     DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
    6613                     "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
    6614                     "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
    6615                     "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
    6616                     "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
    6617                     "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
    6618                     "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
    6619                     "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
    6620                     "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
    6621                     "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
    6622                     "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
    6623                     "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
    6624                     "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
    6625                     "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
    6626                     "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim}  idtr=%016VR{idtr_base}:%04VR{idtr_lim}  rflags=%08VR{rflags}\n"
    6627                     "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
    6628                     "tr  ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
    6629                     "    sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
    6630                     "        efer=%016VR{efer}\n"
    6631                     "         pat=%016VR{pat}\n"
    6632                     "     sf_mask=%016VR{sf_mask}\n"
    6633                     "krnl_gs_base=%016VR{krnl_gs_base}\n"
    6634                     "       lstar=%016VR{lstar}\n"
    6635                     "        star=%016VR{star} cstar=%016VR{cstar}\n"
    6636                     "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
    6637                     );
    6638 
    6639     char szInstr[256];
    6640     DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
    6641                        DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    6642                        szInstr, sizeof(szInstr), NULL);
    6643 
    6644     RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
    6645 #else
    6646     RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
    6647 #endif
    6648 }
    6649 
    6650 /**
    6651  * Complains about a stub.
    6652  *
    6653  * Providing two versions of this macro, one for daily use and one for use when
    6654  * working on IEM.
    6655  */
    6656 #if 0
    6657 # define IEMOP_BITCH_ABOUT_STUB() \
    6658     do { \
    6659         RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
    6660         iemOpStubMsg2(pVCpu); \
    6661         RTAssertPanic(); \
    6662     } while (0)
    6663 #else
    6664 # define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
    6665 #endif
    6666 
    6667 /** Stubs an opcode. */
    6668 #define FNIEMOP_STUB(a_Name) \
    6669     FNIEMOP_DEF(a_Name) \
    6670     { \
    6671         RT_NOREF_PV(pVCpu); \
    6672         IEMOP_BITCH_ABOUT_STUB(); \
    6673         return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
    6674     } \
    6675     typedef int ignore_semicolon
    6676 
    6677 /** Stubs an opcode. */
    6678 #define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
    6679     FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    6680     { \
    6681         RT_NOREF_PV(pVCpu); \
    6682         RT_NOREF_PV(a_Name0); \
    6683         IEMOP_BITCH_ABOUT_STUB(); \
    6684         return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
    6685     } \
    6686     typedef int ignore_semicolon
    6687 
    6688 /** Stubs an opcode which currently should raise \#UD. */
    6689 #define FNIEMOP_UD_STUB(a_Name) \
    6690     FNIEMOP_DEF(a_Name) \
    6691     { \
    6692         Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
    6693         return IEMOP_RAISE_INVALID_OPCODE(); \
    6694     } \
    6695     typedef int ignore_semicolon
    6696 
    6697 /** Stubs an opcode which currently should raise \#UD. */
    6698 #define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
    6699     FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
    6700     { \
    6701         RT_NOREF_PV(pVCpu); \
    6702         RT_NOREF_PV(a_Name0); \
    6703         Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
    6704         return IEMOP_RAISE_INVALID_OPCODE(); \
    6705     } \
    6706     typedef int ignore_semicolon
    6707 
    6708 
    6709 
    6710 /** @name   Register Access.
    6711  * @{
    6712  */
    6713 
    6714 /**
    6715  * Gets a reference (pointer) to the specified hidden segment register.
    6716  *
    6717  * @returns Hidden register reference.
    6718  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6719  * @param   iSegReg             The segment register.
    6720  */
    6721 IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
    6722 {
    6723     Assert(iSegReg < X86_SREG_COUNT);
    6724     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    6725     PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
    6726 
    6727     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    6728     return pSReg;
    6729 }
    6730 
    6731 
    6732 /**
    6733  * Ensures that the given hidden segment register is up to date.
    6734  *
    6735  * @returns Hidden register reference.
    6736  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6737  * @param   pSReg               The segment register.
    6738  */
    6739 IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
    6740 {
    6741     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    6742     NOREF(pVCpu);
    6743     return pSReg;
    6744 }
    6745 
    6746 
    6747 /**
    6748  * Gets a reference (pointer) to the specified segment register (the selector
    6749  * value).
    6750  *
    6751  * @returns Pointer to the selector variable.
    6752  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6753  * @param   iSegReg             The segment register.
    6754  */
    6755 DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
    6756 {
    6757     Assert(iSegReg < X86_SREG_COUNT);
    6758     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    6759     return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
    6760 }
    6761 
    6762 
    6763 /**
    6764  * Fetches the selector value of a segment register.
    6765  *
    6766  * @returns The selector value.
    6767  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6768  * @param   iSegReg             The segment register.
    6769  */
    6770 DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
    6771 {
    6772     Assert(iSegReg < X86_SREG_COUNT);
    6773     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    6774     return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
    6775 }
    6776 
    6777 
    6778 /**
    6779  * Fetches the base address value of a segment register.
    6780  *
    6781  * @returns The selector value.
    6782  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6783  * @param   iSegReg             The segment register.
    6784  */
    6785 DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
    6786 {
    6787     Assert(iSegReg < X86_SREG_COUNT);
    6788     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    6789     return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
    6790 }
    6791 
    6792 
    6793 /**
    6794  * Gets a reference (pointer) to the specified general purpose register.
    6795  *
    6796  * @returns Register reference.
    6797  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6798  * @param   iReg                The general purpose register.
    6799  */
    6800 DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
    6801 {
    6802     Assert(iReg < 16);
    6803     return &pVCpu->cpum.GstCtx.aGRegs[iReg];
    6804 }
    6805 
    6806 
    6807 /**
    6808  * Gets a reference (pointer) to the specified 8-bit general purpose register.
    6809  *
    6810  * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
    6811  *
    6812  * @returns Register reference.
    6813  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6814  * @param   iReg                The register.
    6815  */
    6816 DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
    6817 {
    6818     if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
    6819     {
    6820         Assert(iReg < 16);
    6821         return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
    6822     }
    6823     /* high 8-bit register. */
    6824     Assert(iReg < 8);
    6825     return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
    6826 }
    6827 
    6828 
    6829 /**
    6830  * Gets a reference (pointer) to the specified 16-bit general purpose register.
    6831  *
    6832  * @returns Register reference.
    6833  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6834  * @param   iReg                The register.
    6835  */
    6836 DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
    6837 {
    6838     Assert(iReg < 16);
    6839     return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
    6840 }
    6841 
    6842 
    6843 /**
    6844  * Gets a reference (pointer) to the specified 32-bit general purpose register.
    6845  *
    6846  * @returns Register reference.
    6847  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6848  * @param   iReg                The register.
    6849  */
    6850 DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
    6851 {
    6852     Assert(iReg < 16);
    6853     return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
    6854 }
    6855 
    6856 
    6857 /**
    6858  * Gets a reference (pointer) to the specified 64-bit general purpose register.
    6859  *
    6860  * @returns Register reference.
    6861  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6862  * @param   iReg                The register.
    6863  */
    6864 DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
    6865 {
    6866     Assert(iReg < 64);
    6867     return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
    6868 }
    6869 
    6870 
    6871 /**
    6872  * Gets a reference (pointer) to the specified segment register's base address.
    6873  *
    6874  * @returns Segment register base address reference.
    6875  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6876  * @param   iSegReg             The segment selector.
    6877  */
    6878 DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
    6879 {
    6880     Assert(iSegReg < X86_SREG_COUNT);
    6881     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    6882     return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
    6883 }
    6884 
    6885 
    6886 /**
    6887  * Fetches the value of a 8-bit general purpose register.
    6888  *
    6889  * @returns The register value.
    6890  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6891  * @param   iReg                The register.
    6892  */
    6893 DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
    6894 {
    6895     return *iemGRegRefU8(pVCpu, iReg);
    6896 }
    6897 
    6898 
    6899 /**
    6900  * Fetches the value of a 16-bit general purpose register.
    6901  *
    6902  * @returns The register value.
    6903  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6904  * @param   iReg                The register.
    6905  */
    6906 DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
    6907 {
    6908     Assert(iReg < 16);
    6909     return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
    6910 }
    6911 
    6912 
    6913 /**
    6914  * Fetches the value of a 32-bit general purpose register.
    6915  *
    6916  * @returns The register value.
    6917  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6918  * @param   iReg                The register.
    6919  */
    6920 DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
    6921 {
    6922     Assert(iReg < 16);
    6923     return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
    6924 }
    6925 
    6926 
    6927 /**
    6928  * Fetches the value of a 64-bit general purpose register.
    6929  *
    6930  * @returns The register value.
    6931  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6932  * @param   iReg                The register.
    6933  */
    6934 DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
    6935 {
    6936     Assert(iReg < 16);
    6937     return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
    6938 }
    6939 
    6940 
    6941 /**
    6942  * Adds a 8-bit signed jump offset to RIP/EIP/IP.
    6943  *
    6944  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    6945  * segment limit.
    6946  *
    6947  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    6948  * @param   offNextInstr        The offset of the next instruction.
    6949  */
    6950 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
    6951 {
    6952     switch (pVCpu->iem.s.enmEffOpSize)
    6953     {
    6954         case IEMMODE_16BIT:
    6955         {
    6956             uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
    6957             if (   uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
    6958                 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
    6959                 return iemRaiseGeneralProtectionFault0(pVCpu);
    6960             pVCpu->cpum.GstCtx.rip = uNewIp;
    6961             break;
    6962         }
    6963 
    6964         case IEMMODE_32BIT:
    6965         {
    6966             Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    6967             Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
    6968 
    6969             uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
    6970             if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
    6971                 return iemRaiseGeneralProtectionFault0(pVCpu);
    6972             pVCpu->cpum.GstCtx.rip = uNewEip;
    6973             break;
    6974         }
    6975 
    6976         case IEMMODE_64BIT:
    6977         {
    6978             Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
    6979 
    6980             uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
    6981             if (!IEM_IS_CANONICAL(uNewRip))
    6982                 return iemRaiseGeneralProtectionFault0(pVCpu);
    6983             pVCpu->cpum.GstCtx.rip = uNewRip;
    6984             break;
    6985         }
    6986 
    6987         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    6988     }
    6989 
    6990     pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
    6991 
    6992 #ifndef IEM_WITH_CODE_TLB
    6993     /* Flush the prefetch buffer. */
    6994     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    6995 #endif
    6996 
    6997     return VINF_SUCCESS;
    6998 }
    6999 
    7000 
    7001 /**
    7002  * Adds a 16-bit signed jump offset to RIP/EIP/IP.
    7003  *
    7004  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    7005  * segment limit.
    7006  *
    7007  * @returns Strict VBox status code.
    7008  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7009  * @param   offNextInstr        The offset of the next instruction.
    7010  */
    7011 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
    7012 {
    7013     Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
    7014 
    7015     uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
    7016     if (   uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
    7017         && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
    7018         return iemRaiseGeneralProtectionFault0(pVCpu);
    7019     /** @todo Test 16-bit jump in 64-bit mode. possible?  */
    7020     pVCpu->cpum.GstCtx.rip = uNewIp;
    7021     pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
    7022 
    7023 #ifndef IEM_WITH_CODE_TLB
    7024     /* Flush the prefetch buffer. */
    7025     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    7026 #endif
    7027 
    7028     return VINF_SUCCESS;
    7029 }
    7030 
    7031 
    7032 /**
    7033  * Adds a 32-bit signed jump offset to RIP/EIP/IP.
    7034  *
    7035  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    7036  * segment limit.
    7037  *
    7038  * @returns Strict VBox status code.
    7039  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7040  * @param   offNextInstr        The offset of the next instruction.
    7041  */
    7042 IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
    7043 {
    7044     Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
    7045 
    7046     if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
    7047     {
    7048         Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
    7049 
    7050         uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
    7051         if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
    7052             return iemRaiseGeneralProtectionFault0(pVCpu);
    7053         pVCpu->cpum.GstCtx.rip = uNewEip;
    7054     }
    7055     else
    7056     {
    7057         Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
    7058 
    7059         uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
    7060         if (!IEM_IS_CANONICAL(uNewRip))
    7061             return iemRaiseGeneralProtectionFault0(pVCpu);
    7062         pVCpu->cpum.GstCtx.rip = uNewRip;
    7063     }
    7064     pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
    7065 
    7066 #ifndef IEM_WITH_CODE_TLB
    7067     /* Flush the prefetch buffer. */
    7068     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    7069 #endif
    7070 
    7071     return VINF_SUCCESS;
    7072 }
    7073 
    7074 
    7075 /**
    7076  * Performs a near jump to the specified address.
    7077  *
    7078  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    7079  * segment limit.
    7080  *
    7081  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7082  * @param   uNewRip             The new RIP value.
    7083  */
    7084 IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
    7085 {
    7086     switch (pVCpu->iem.s.enmEffOpSize)
    7087     {
    7088         case IEMMODE_16BIT:
    7089         {
    7090             Assert(uNewRip <= UINT16_MAX);
    7091             if (   uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
    7092                 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
    7093                 return iemRaiseGeneralProtectionFault0(pVCpu);
    7094             /** @todo Test 16-bit jump in 64-bit mode.  */
    7095             pVCpu->cpum.GstCtx.rip = uNewRip;
    7096             break;
    7097         }
    7098 
    7099         case IEMMODE_32BIT:
    7100         {
    7101             Assert(uNewRip <= UINT32_MAX);
    7102             Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    7103             Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
    7104 
    7105             if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
    7106                 return iemRaiseGeneralProtectionFault0(pVCpu);
    7107             pVCpu->cpum.GstCtx.rip = uNewRip;
    7108             break;
    7109         }
    7110 
    7111         case IEMMODE_64BIT:
    7112         {
    7113             Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
    7114 
    7115             if (!IEM_IS_CANONICAL(uNewRip))
    7116                 return iemRaiseGeneralProtectionFault0(pVCpu);
    7117             pVCpu->cpum.GstCtx.rip = uNewRip;
    7118             break;
    7119         }
    7120 
    7121         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    7122     }
    7123 
    7124     pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
    7125 
    7126 #ifndef IEM_WITH_CODE_TLB
    7127     /* Flush the prefetch buffer. */
    7128     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    7129 #endif
    7130 
    7131     return VINF_SUCCESS;
    7132 }
    7133 
    7134 
    7135 /**
    7136  * Get the address of the top of the stack.
    7137  *
    7138  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7139  */
    7140 DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
    7141 {
    7142     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7143         return pVCpu->cpum.GstCtx.rsp;
    7144     if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7145         return pVCpu->cpum.GstCtx.esp;
    7146     return pVCpu->cpum.GstCtx.sp;
    7147 }
    7148 
    7149 
    7150 /**
    7151  * Updates the RIP/EIP/IP to point to the next instruction.
    7152  *
    7153  * This function leaves the EFLAGS.RF flag alone.
    7154  *
    7155  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7156  * @param   cbInstr             The number of bytes to add.
    7157  */
    7158 IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
    7159 {
    7160     switch (pVCpu->iem.s.enmCpuMode)
    7161     {
    7162         case IEMMODE_16BIT:
    7163             Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
    7164             pVCpu->cpum.GstCtx.eip += cbInstr;
    7165             pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
    7166             break;
    7167 
    7168         case IEMMODE_32BIT:
    7169             pVCpu->cpum.GstCtx.eip += cbInstr;
    7170             Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    7171             break;
    7172 
    7173         case IEMMODE_64BIT:
    7174             pVCpu->cpum.GstCtx.rip += cbInstr;
    7175             break;
    7176         default: AssertFailed();
    7177     }
    7178 }
    7179 
    7180 
    7181 #if 0
    7182 /**
    7183  * Updates the RIP/EIP/IP to point to the next instruction.
    7184  *
    7185  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7186  */
    7187 IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
    7188 {
    7189     return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
    7190 }
    7191 #endif
    7192 
    7193 
    7194 
    7195 /**
    7196  * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
    7197  *
    7198  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7199  * @param   cbInstr             The number of bytes to add.
    7200  */
    7201 IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
    7202 {
    7203     pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
    7204 
    7205     AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
    7206 #if ARCH_BITS >= 64
    7207     static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
    7208     Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
    7209     pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
    7210 #else
    7211     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7212         pVCpu->cpum.GstCtx.rip += cbInstr;
    7213     else
    7214         pVCpu->cpum.GstCtx.eip += cbInstr;
    7215 #endif
    7216 }
    7217 
    7218 
    7219 /**
    7220  * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
    7221  *
    7222  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7223  */
    7224 IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
    7225 {
    7226     return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
    7227 }
    7228 
    7229 
    7230 /**
    7231  * Adds to the stack pointer.
    7232  *
    7233  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7234  * @param   cbToAdd             The number of bytes to add (8-bit!).
    7235  */
    7236 DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
    7237 {
    7238     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7239         pVCpu->cpum.GstCtx.rsp += cbToAdd;
    7240     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7241         pVCpu->cpum.GstCtx.esp += cbToAdd;
    7242     else
    7243         pVCpu->cpum.GstCtx.sp  += cbToAdd;
    7244 }
    7245 
    7246 
    7247 /**
    7248  * Subtracts from the stack pointer.
    7249  *
    7250  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7251  * @param   cbToSub             The number of bytes to subtract (8-bit!).
    7252  */
    7253 DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
    7254 {
    7255     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7256         pVCpu->cpum.GstCtx.rsp -= cbToSub;
    7257     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7258         pVCpu->cpum.GstCtx.esp -= cbToSub;
    7259     else
    7260         pVCpu->cpum.GstCtx.sp  -= cbToSub;
    7261 }
    7262 
    7263 
    7264 /**
    7265  * Adds to the temporary stack pointer.
    7266  *
    7267  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7268  * @param   pTmpRsp             The temporary SP/ESP/RSP to update.
    7269  * @param   cbToAdd             The number of bytes to add (16-bit).
    7270  */
    7271 DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
    7272 {
    7273     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7274         pTmpRsp->u           += cbToAdd;
    7275     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7276         pTmpRsp->DWords.dw0  += cbToAdd;
    7277     else
    7278         pTmpRsp->Words.w0    += cbToAdd;
    7279 }
    7280 
    7281 
    7282 /**
    7283  * Subtracts from the temporary stack pointer.
    7284  *
    7285  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7286  * @param   pTmpRsp             The temporary SP/ESP/RSP to update.
    7287  * @param   cbToSub             The number of bytes to subtract.
    7288  * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
    7289  *          expecting that.
    7290  */
    7291 DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
    7292 {
    7293     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7294         pTmpRsp->u          -= cbToSub;
    7295     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7296         pTmpRsp->DWords.dw0 -= cbToSub;
    7297     else
    7298         pTmpRsp->Words.w0   -= cbToSub;
    7299 }
    7300 
    7301 
    7302 /**
    7303  * Calculates the effective stack address for a push of the specified size as
    7304  * well as the new RSP value (upper bits may be masked).
    7305  *
    7306  * @returns Effective stack addressf for the push.
    7307  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7308  * @param   cbItem              The size of the stack item to pop.
    7309  * @param   puNewRsp            Where to return the new RSP value.
    7310  */
    7311 DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
    7312 {
    7313     RTUINT64U   uTmpRsp;
    7314     RTGCPTR     GCPtrTop;
    7315     uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
    7316 
    7317     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7318         GCPtrTop = uTmpRsp.u            -= cbItem;
    7319     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7320         GCPtrTop = uTmpRsp.DWords.dw0   -= cbItem;
    7321     else
    7322         GCPtrTop = uTmpRsp.Words.w0     -= cbItem;
    7323     *puNewRsp = uTmpRsp.u;
    7324     return GCPtrTop;
    7325 }
    7326 
    7327 
    7328 /**
    7329  * Gets the current stack pointer and calculates the value after a pop of the
    7330  * specified size.
    7331  *
    7332  * @returns Current stack pointer.
    7333  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7334  * @param   cbItem              The size of the stack item to pop.
    7335  * @param   puNewRsp            Where to return the new RSP value.
    7336  */
    7337 DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
    7338 {
    7339     RTUINT64U   uTmpRsp;
    7340     RTGCPTR     GCPtrTop;
    7341     uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
    7342 
    7343     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7344     {
    7345         GCPtrTop = uTmpRsp.u;
    7346         uTmpRsp.u += cbItem;
    7347     }
    7348     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7349     {
    7350         GCPtrTop = uTmpRsp.DWords.dw0;
    7351         uTmpRsp.DWords.dw0 += cbItem;
    7352     }
    7353     else
    7354     {
    7355         GCPtrTop = uTmpRsp.Words.w0;
    7356         uTmpRsp.Words.w0 += cbItem;
    7357     }
    7358     *puNewRsp = uTmpRsp.u;
    7359     return GCPtrTop;
    7360 }
    7361 
    7362 
    7363 /**
    7364  * Calculates the effective stack address for a push of the specified size as
    7365  * well as the new temporary RSP value (upper bits may be masked).
    7366  *
    7367  * @returns Effective stack addressf for the push.
    7368  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7369  * @param   pTmpRsp             The temporary stack pointer.  This is updated.
    7370  * @param   cbItem              The size of the stack item to pop.
    7371  */
    7372 DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
    7373 {
    7374     RTGCPTR GCPtrTop;
    7375 
    7376     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7377         GCPtrTop = pTmpRsp->u          -= cbItem;
    7378     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7379         GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
    7380     else
    7381         GCPtrTop = pTmpRsp->Words.w0   -= cbItem;
    7382     return GCPtrTop;
    7383 }
    7384 
    7385 
    7386 /**
    7387  * Gets the effective stack address for a pop of the specified size and
    7388  * calculates and updates the temporary RSP.
    7389  *
    7390  * @returns Current stack pointer.
    7391  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7392  * @param   pTmpRsp             The temporary stack pointer.  This is updated.
    7393  * @param   cbItem              The size of the stack item to pop.
    7394  */
    7395 DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
    7396 {
    7397     RTGCPTR GCPtrTop;
    7398     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    7399     {
    7400         GCPtrTop = pTmpRsp->u;
    7401         pTmpRsp->u          += cbItem;
    7402     }
    7403     else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
    7404     {
    7405         GCPtrTop = pTmpRsp->DWords.dw0;
    7406         pTmpRsp->DWords.dw0 += cbItem;
    7407     }
    7408     else
    7409     {
    7410         GCPtrTop = pTmpRsp->Words.w0;
    7411         pTmpRsp->Words.w0   += cbItem;
    7412     }
    7413     return GCPtrTop;
    7414 }
    7415 
    7416 /** @}  */
    7417 
    7418 
    7419 /** @name   FPU access and helpers.
    7420  *
    7421  * @{
    7422  */
    7423 
    7424 
    7425 /**
    7426  * Hook for preparing to use the host FPU.
    7427  *
    7428  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7429  *
    7430  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7431  */
    7432 DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
    7433 {
    7434 #ifdef IN_RING3
    7435     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
    7436 #else
    7437     CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
    7438 #endif
    7439     IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
    7440 }
    7441 
    7442 
    7443 /**
    7444  * Hook for preparing to use the host FPU for SSE.
    7445  *
    7446  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7447  *
    7448  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7449  */
    7450 DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
    7451 {
    7452     iemFpuPrepareUsage(pVCpu);
    7453 }
    7454 
    7455 
    7456 /**
    7457  * Hook for preparing to use the host FPU for AVX.
    7458  *
    7459  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7460  *
    7461  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7462  */
    7463 DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
    7464 {
    7465     iemFpuPrepareUsage(pVCpu);
    7466 }
    7467 
    7468 
    7469 /**
    7470  * Hook for actualizing the guest FPU state before the interpreter reads it.
    7471  *
    7472  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7473  *
    7474  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7475  */
    7476 DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
    7477 {
    7478 #ifdef IN_RING3
    7479     NOREF(pVCpu);
    7480 #else
    7481     CPUMRZFpuStateActualizeForRead(pVCpu);
    7482 #endif
    7483     IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
    7484 }
    7485 
    7486 
    7487 /**
    7488  * Hook for actualizing the guest FPU state before the interpreter changes it.
    7489  *
    7490  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7491  *
    7492  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7493  */
    7494 DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
    7495 {
    7496 #ifdef IN_RING3
    7497     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
    7498 #else
    7499     CPUMRZFpuStateActualizeForChange(pVCpu);
    7500 #endif
    7501     IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
    7502 }
    7503 
    7504 
    7505 /**
    7506  * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
    7507  * only.
    7508  *
    7509  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7510  *
    7511  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7512  */
    7513 DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
    7514 {
    7515 #if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
    7516     NOREF(pVCpu);
    7517 #else
    7518     CPUMRZFpuStateActualizeSseForRead(pVCpu);
    7519 #endif
    7520     IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
    7521 }
    7522 
    7523 
    7524 /**
    7525  * Hook for actualizing the guest XMM0..15 and MXCSR register state for
    7526  * read+write.
    7527  *
    7528  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7529  *
    7530  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7531  */
    7532 DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
    7533 {
    7534 #if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
    7535     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
    7536 #else
    7537     CPUMRZFpuStateActualizeForChange(pVCpu);
    7538 #endif
    7539     IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
    7540 
    7541     /* Make sure any changes are loaded the next time around. */
    7542     pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
    7543 }
    7544 
    7545 
    7546 /**
    7547  * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
    7548  * only.
    7549  *
    7550  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7551  *
    7552  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7553  */
    7554 DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
    7555 {
    7556 #ifdef IN_RING3
    7557     NOREF(pVCpu);
    7558 #else
    7559     CPUMRZFpuStateActualizeAvxForRead(pVCpu);
    7560 #endif
    7561     IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
    7562 }
    7563 
    7564 
    7565 /**
    7566  * Hook for actualizing the guest YMM0..15 and MXCSR register state for
    7567  * read+write.
    7568  *
    7569  * This is necessary in ring-0 and raw-mode context (nop in ring-3).
    7570  *
    7571  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7572  */
    7573 DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
    7574 {
    7575 #ifdef IN_RING3
    7576     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
    7577 #else
    7578     CPUMRZFpuStateActualizeForChange(pVCpu);
    7579 #endif
    7580     IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
    7581 
    7582     /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
    7583     pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
    7584 }
    7585 
    7586 
    7587 /**
    7588  * Stores a QNaN value into a FPU register.
    7589  *
    7590  * @param   pReg                Pointer to the register.
    7591  */
    7592 DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
    7593 {
    7594     pReg->au32[0] = UINT32_C(0x00000000);
    7595     pReg->au32[1] = UINT32_C(0xc0000000);
    7596     pReg->au16[4] = UINT16_C(0xffff);
    7597 }
    7598 
    7599 
    7600 /**
    7601  * Updates the FOP, FPU.CS and FPUIP registers.
    7602  *
    7603  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7604  * @param   pFpuCtx             The FPU context.
    7605  */
    7606 DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
    7607 {
    7608     Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
    7609     pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
    7610     /** @todo x87.CS and FPUIP needs to be kept seperately. */
    7611     if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
    7612     {
    7613         /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
    7614          *        happens in real mode here based on the fnsave and fnstenv images. */
    7615         pFpuCtx->CS    = 0;
    7616         pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
    7617     }
    7618     else if (!IEM_IS_LONG_MODE(pVCpu))
    7619     {
    7620         pFpuCtx->CS    = pVCpu->cpum.GstCtx.cs.Sel;
    7621         pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
    7622     }
    7623     else
    7624         *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
    7625 }
    7626 
    7627 
    7628 /**
    7629  * Updates the x87.DS and FPUDP registers.
    7630  *
    7631  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7632  * @param   pFpuCtx             The FPU context.
    7633  * @param   iEffSeg             The effective segment register.
    7634  * @param   GCPtrEff            The effective address relative to @a iEffSeg.
    7635  */
    7636 DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    7637 {
    7638     RTSEL sel;
    7639     switch (iEffSeg)
    7640     {
    7641         case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
    7642         case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
    7643         case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
    7644         case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
    7645         case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
    7646         case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
    7647         default:
    7648             AssertMsgFailed(("%d\n", iEffSeg));
    7649             sel = pVCpu->cpum.GstCtx.ds.Sel;
    7650     }
    7651     /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
    7652     if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
    7653     {
    7654         pFpuCtx->DS    = 0;
    7655         pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
    7656     }
    7657     else if (!IEM_IS_LONG_MODE(pVCpu))
    7658     {
    7659         pFpuCtx->DS    = sel;
    7660         pFpuCtx->FPUDP = GCPtrEff;
    7661     }
    7662     else
    7663         *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
    7664 }
    7665 
    7666 
    7667 /**
    7668  * Rotates the stack registers in the push direction.
    7669  *
    7670  * @param   pFpuCtx             The FPU context.
    7671  * @remarks This is a complete waste of time, but fxsave stores the registers in
    7672  *          stack order.
    7673  */
    7674 DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
    7675 {
    7676     RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
    7677     pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
    7678     pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
    7679     pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
    7680     pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
    7681     pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
    7682     pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
    7683     pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
    7684     pFpuCtx->aRegs[0].r80 = r80Tmp;
    7685 }
    7686 
    7687 
    7688 /**
    7689  * Rotates the stack registers in the pop direction.
    7690  *
    7691  * @param   pFpuCtx             The FPU context.
    7692  * @remarks This is a complete waste of time, but fxsave stores the registers in
    7693  *          stack order.
    7694  */
    7695 DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
    7696 {
    7697     RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
    7698     pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
    7699     pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
    7700     pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
    7701     pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
    7702     pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
    7703     pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
    7704     pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
    7705     pFpuCtx->aRegs[7].r80 = r80Tmp;
    7706 }
    7707 
    7708 
    7709 /**
    7710  * Updates FSW and pushes a FPU result onto the FPU stack if no pending
    7711  * exception prevents it.
    7712  *
    7713  * @param   pResult             The FPU operation result to push.
    7714  * @param   pFpuCtx             The FPU context.
    7715  */
    7716 IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
    7717 {
    7718     /* Update FSW and bail if there are pending exceptions afterwards. */
    7719     uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
    7720     fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
    7721     if (   (fFsw             & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
    7722         & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
    7723     {
    7724         pFpuCtx->FSW = fFsw;
    7725         return;
    7726     }
    7727 
    7728     uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
    7729     if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
    7730     {
    7731         /* All is fine, push the actual value. */
    7732         pFpuCtx->FTW |= RT_BIT(iNewTop);
    7733         pFpuCtx->aRegs[7].r80 = pResult->r80Result;
    7734     }
    7735     else if (pFpuCtx->FCW & X86_FCW_IM)
    7736     {
    7737         /* Masked stack overflow, push QNaN. */
    7738         fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
    7739         iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
    7740     }
    7741     else
    7742     {
    7743         /* Raise stack overflow, don't push anything. */
    7744         pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
    7745         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
    7746         return;
    7747     }
    7748 
    7749     fFsw &= ~X86_FSW_TOP_MASK;
    7750     fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
    7751     pFpuCtx->FSW = fFsw;
    7752 
    7753     iemFpuRotateStackPush(pFpuCtx);
    7754 }
    7755 
    7756 
    7757 /**
    7758  * Stores a result in a FPU register and updates the FSW and FTW.
    7759  *
    7760  * @param   pFpuCtx             The FPU context.
    7761  * @param   pResult             The result to store.
    7762  * @param   iStReg              Which FPU register to store it in.
    7763  */
    7764 IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
    7765 {
    7766     Assert(iStReg < 8);
    7767     uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
    7768     pFpuCtx->FSW &= ~X86_FSW_C_MASK;
    7769     pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
    7770     pFpuCtx->FTW |= RT_BIT(iReg);
    7771     pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
    7772 }
    7773 
    7774 
    7775 /**
    7776  * Only updates the FPU status word (FSW) with the result of the current
    7777  * instruction.
    7778  *
    7779  * @param   pFpuCtx             The FPU context.
    7780  * @param   u16FSW              The FSW output of the current instruction.
    7781  */
    7782 IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
    7783 {
    7784     pFpuCtx->FSW &= ~X86_FSW_C_MASK;
    7785     pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
    7786 }
    7787 
    7788 
    7789 /**
    7790  * Pops one item off the FPU stack if no pending exception prevents it.
    7791  *
    7792  * @param   pFpuCtx             The FPU context.
    7793  */
    7794 IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
    7795 {
    7796     /* Check pending exceptions. */
    7797     uint16_t uFSW = pFpuCtx->FSW;
    7798     if (   (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
    7799         & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
    7800         return;
    7801 
    7802     /* TOP--. */
    7803     uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
    7804     uFSW &= ~X86_FSW_TOP_MASK;
    7805     uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
    7806     pFpuCtx->FSW = uFSW;
    7807 
    7808     /* Mark the previous ST0 as empty. */
    7809     iOldTop >>= X86_FSW_TOP_SHIFT;
    7810     pFpuCtx->FTW &= ~RT_BIT(iOldTop);
    7811 
    7812     /* Rotate the registers. */
    7813     iemFpuRotateStackPop(pFpuCtx);
    7814 }
    7815 
    7816 
    7817 /**
    7818  * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
    7819  *
    7820  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7821  * @param   pResult             The FPU operation result to push.
    7822  */
    7823 IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
    7824 {
    7825     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7826     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7827     iemFpuMaybePushResult(pResult, pFpuCtx);
    7828 }
    7829 
    7830 
    7831 /**
    7832  * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
    7833  * and sets FPUDP and FPUDS.
    7834  *
    7835  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7836  * @param   pResult             The FPU operation result to push.
    7837  * @param   iEffSeg             The effective segment register.
    7838  * @param   GCPtrEff            The effective address relative to @a iEffSeg.
    7839  */
    7840 IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    7841 {
    7842     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7843     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    7844     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7845     iemFpuMaybePushResult(pResult, pFpuCtx);
    7846 }
    7847 
    7848 
    7849 /**
    7850  * Replace ST0 with the first value and push the second onto the FPU stack,
    7851  * unless a pending exception prevents it.
    7852  *
    7853  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7854  * @param   pResult             The FPU operation result to store and push.
    7855  */
    7856 IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
    7857 {
    7858     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7859     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7860 
    7861     /* Update FSW and bail if there are pending exceptions afterwards. */
    7862     uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
    7863     fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
    7864     if (   (fFsw             & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
    7865         & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
    7866     {
    7867         pFpuCtx->FSW = fFsw;
    7868         return;
    7869     }
    7870 
    7871     uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
    7872     if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
    7873     {
    7874         /* All is fine, push the actual value. */
    7875         pFpuCtx->FTW |= RT_BIT(iNewTop);
    7876         pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
    7877         pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
    7878     }
    7879     else if (pFpuCtx->FCW & X86_FCW_IM)
    7880     {
    7881         /* Masked stack overflow, push QNaN. */
    7882         fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
    7883         iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
    7884         iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
    7885     }
    7886     else
    7887     {
    7888         /* Raise stack overflow, don't push anything. */
    7889         pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
    7890         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
    7891         return;
    7892     }
    7893 
    7894     fFsw &= ~X86_FSW_TOP_MASK;
    7895     fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
    7896     pFpuCtx->FSW = fFsw;
    7897 
    7898     iemFpuRotateStackPush(pFpuCtx);
    7899 }
    7900 
    7901 
    7902 /**
    7903  * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
    7904  * FOP.
    7905  *
    7906  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7907  * @param   pResult             The result to store.
    7908  * @param   iStReg              Which FPU register to store it in.
    7909  */
    7910 IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
    7911 {
    7912     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7913     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7914     iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
    7915 }
    7916 
    7917 
    7918 /**
    7919  * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
    7920  * FOP, and then pops the stack.
    7921  *
    7922  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7923  * @param   pResult             The result to store.
    7924  * @param   iStReg              Which FPU register to store it in.
    7925  */
    7926 IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
    7927 {
    7928     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7929     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7930     iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
    7931     iemFpuMaybePopOne(pFpuCtx);
    7932 }
    7933 
    7934 
    7935 /**
    7936  * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
    7937  * FPUDP, and FPUDS.
    7938  *
    7939  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7940  * @param   pResult             The result to store.
    7941  * @param   iStReg              Which FPU register to store it in.
    7942  * @param   iEffSeg             The effective memory operand selector register.
    7943  * @param   GCPtrEff            The effective memory operand offset.
    7944  */
    7945 IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
    7946                                            uint8_t iEffSeg, RTGCPTR GCPtrEff)
    7947 {
    7948     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7949     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    7950     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7951     iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
    7952 }
    7953 
    7954 
    7955 /**
    7956  * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
    7957  * FPUDP, and FPUDS, and then pops the stack.
    7958  *
    7959  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7960  * @param   pResult             The result to store.
    7961  * @param   iStReg              Which FPU register to store it in.
    7962  * @param   iEffSeg             The effective memory operand selector register.
    7963  * @param   GCPtrEff            The effective memory operand offset.
    7964  */
    7965 IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
    7966                                                   uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    7967 {
    7968     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7969     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    7970     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7971     iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
    7972     iemFpuMaybePopOne(pFpuCtx);
    7973 }
    7974 
    7975 
    7976 /**
    7977  * Updates the FOP, FPUIP, and FPUCS.  For FNOP.
    7978  *
    7979  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7980  */
    7981 IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
    7982 {
    7983     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7984     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    7985 }
    7986 
    7987 
    7988 /**
    7989  * Marks the specified stack register as free (for FFREE).
    7990  *
    7991  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    7992  * @param   iStReg              The register to free.
    7993  */
    7994 IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
    7995 {
    7996     Assert(iStReg < 8);
    7997     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    7998     uint8_t     iReg    = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
    7999     pFpuCtx->FTW &= ~RT_BIT(iReg);
    8000 }
    8001 
    8002 
    8003 /**
    8004  * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
    8005  *
    8006  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8007  */
    8008 IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
    8009 {
    8010     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8011     uint16_t    uFsw    = pFpuCtx->FSW;
    8012     uint16_t    uTop    = uFsw & X86_FSW_TOP_MASK;
    8013     uTop  = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
    8014     uFsw &= ~X86_FSW_TOP_MASK;
    8015     uFsw |= uTop;
    8016     pFpuCtx->FSW = uFsw;
    8017 }
    8018 
    8019 
    8020 /**
    8021  * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
    8022  *
    8023  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8024  */
    8025 IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
    8026 {
    8027     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8028     uint16_t    uFsw    = pFpuCtx->FSW;
    8029     uint16_t    uTop    = uFsw & X86_FSW_TOP_MASK;
    8030     uTop  = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
    8031     uFsw &= ~X86_FSW_TOP_MASK;
    8032     uFsw |= uTop;
    8033     pFpuCtx->FSW = uFsw;
    8034 }
    8035 
    8036 
    8037 /**
    8038  * Updates the FSW, FOP, FPUIP, and FPUCS.
    8039  *
    8040  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8041  * @param   u16FSW              The FSW from the current instruction.
    8042  */
    8043 IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
    8044 {
    8045     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8046     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8047     iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
    8048 }
    8049 
    8050 
    8051 /**
    8052  * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
    8053  *
    8054  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8055  * @param   u16FSW              The FSW from the current instruction.
    8056  */
    8057 IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
    8058 {
    8059     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8060     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8061     iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
    8062     iemFpuMaybePopOne(pFpuCtx);
    8063 }
    8064 
    8065 
    8066 /**
    8067  * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
    8068  *
    8069  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8070  * @param   u16FSW              The FSW from the current instruction.
    8071  * @param   iEffSeg             The effective memory operand selector register.
    8072  * @param   GCPtrEff            The effective memory operand offset.
    8073  */
    8074 IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    8075 {
    8076     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8077     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    8078     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8079     iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
    8080 }
    8081 
    8082 
    8083 /**
    8084  * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
    8085  *
    8086  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8087  * @param   u16FSW              The FSW from the current instruction.
    8088  */
    8089 IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
    8090 {
    8091     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8092     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8093     iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
    8094     iemFpuMaybePopOne(pFpuCtx);
    8095     iemFpuMaybePopOne(pFpuCtx);
    8096 }
    8097 
    8098 
    8099 /**
    8100  * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
    8101  *
    8102  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8103  * @param   u16FSW              The FSW from the current instruction.
    8104  * @param   iEffSeg             The effective memory operand selector register.
    8105  * @param   GCPtrEff            The effective memory operand offset.
    8106  */
    8107 IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    8108 {
    8109     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8110     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    8111     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8112     iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
    8113     iemFpuMaybePopOne(pFpuCtx);
    8114 }
    8115 
    8116 
    8117 /**
    8118  * Worker routine for raising an FPU stack underflow exception.
    8119  *
    8120  * @param   pFpuCtx             The FPU context.
    8121  * @param   iStReg              The stack register being accessed.
    8122  */
    8123 IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
    8124 {
    8125     Assert(iStReg < 8 || iStReg == UINT8_MAX);
    8126     if (pFpuCtx->FCW & X86_FCW_IM)
    8127     {
    8128         /* Masked underflow. */
    8129         pFpuCtx->FSW &= ~X86_FSW_C_MASK;
    8130         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
    8131         uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
    8132         if (iStReg != UINT8_MAX)
    8133         {
    8134             pFpuCtx->FTW |= RT_BIT(iReg);
    8135             iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
    8136         }
    8137     }
    8138     else
    8139     {
    8140         pFpuCtx->FSW &= ~X86_FSW_C_MASK;
    8141         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
    8142     }
    8143 }
    8144 
    8145 
    8146 /**
    8147  * Raises a FPU stack underflow exception.
    8148  *
    8149  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8150  * @param   iStReg              The destination register that should be loaded
    8151  *                              with QNaN if \#IS is not masked. Specify
    8152  *                              UINT8_MAX if none (like for fcom).
    8153  */
    8154 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
    8155 {
    8156     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8157     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8158     iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
    8159 }
    8160 
    8161 
    8162 DECL_NO_INLINE(IEM_STATIC, void)
    8163 iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    8164 {
    8165     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8166     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    8167     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8168     iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
    8169 }
    8170 
    8171 
    8172 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
    8173 {
    8174     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8175     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8176     iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
    8177     iemFpuMaybePopOne(pFpuCtx);
    8178 }
    8179 
    8180 
    8181 DECL_NO_INLINE(IEM_STATIC, void)
    8182 iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    8183 {
    8184     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8185     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    8186     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8187     iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
    8188     iemFpuMaybePopOne(pFpuCtx);
    8189 }
    8190 
    8191 
    8192 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
    8193 {
    8194     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8195     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8196     iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
    8197     iemFpuMaybePopOne(pFpuCtx);
    8198     iemFpuMaybePopOne(pFpuCtx);
    8199 }
    8200 
    8201 
    8202 DECL_NO_INLINE(IEM_STATIC, void)
    8203 iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
    8204 {
    8205     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8206     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8207 
    8208     if (pFpuCtx->FCW & X86_FCW_IM)
    8209     {
    8210         /* Masked overflow - Push QNaN. */
    8211         uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
    8212         pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
    8213         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
    8214         pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
    8215         pFpuCtx->FTW |= RT_BIT(iNewTop);
    8216         iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
    8217         iemFpuRotateStackPush(pFpuCtx);
    8218     }
    8219     else
    8220     {
    8221         /* Exception pending - don't change TOP or the register stack. */
    8222         pFpuCtx->FSW &= ~X86_FSW_C_MASK;
    8223         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
    8224     }
    8225 }
    8226 
    8227 
    8228 DECL_NO_INLINE(IEM_STATIC, void)
    8229 iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
    8230 {
    8231     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8232     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8233 
    8234     if (pFpuCtx->FCW & X86_FCW_IM)
    8235     {
    8236         /* Masked overflow - Push QNaN. */
    8237         uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
    8238         pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
    8239         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
    8240         pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
    8241         pFpuCtx->FTW |= RT_BIT(iNewTop);
    8242         iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
    8243         iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
    8244         iemFpuRotateStackPush(pFpuCtx);
    8245     }
    8246     else
    8247     {
    8248         /* Exception pending - don't change TOP or the register stack. */
    8249         pFpuCtx->FSW &= ~X86_FSW_C_MASK;
    8250         pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
    8251     }
    8252 }
    8253 
    8254 
    8255 /**
    8256  * Worker routine for raising an FPU stack overflow exception on a push.
    8257  *
    8258  * @param   pFpuCtx             The FPU context.
    8259  */
    8260 IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
    8261 {
    8262     if (pFpuCtx->FCW & X86_FCW_IM)
    8263     {
    8264         /* Masked overflow. */
    8265         uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
    8266         pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
    8267         pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
    8268         pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
    8269         pFpuCtx->FTW |= RT_BIT(iNewTop);
    8270         iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
    8271         iemFpuRotateStackPush(pFpuCtx);
    8272     }
    8273     else
    8274     {
    8275         /* Exception pending - don't change TOP or the register stack. */
    8276         pFpuCtx->FSW &= ~X86_FSW_C_MASK;
    8277         pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
    8278     }
    8279 }
    8280 
    8281 
    8282 /**
    8283  * Raises a FPU stack overflow exception on a push.
    8284  *
    8285  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8286  */
    8287 DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
    8288 {
    8289     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8290     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8291     iemFpuStackPushOverflowOnly(pFpuCtx);
    8292 }
    8293 
    8294 
    8295 /**
    8296  * Raises a FPU stack overflow exception on a push with a memory operand.
    8297  *
    8298  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8299  * @param   iEffSeg             The effective memory operand selector register.
    8300  * @param   GCPtrEff            The effective memory operand offset.
    8301  */
    8302 DECL_NO_INLINE(IEM_STATIC, void)
    8303 iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
    8304 {
    8305     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8306     iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
    8307     iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
    8308     iemFpuStackPushOverflowOnly(pFpuCtx);
    8309 }
    8310 
    8311 
    8312 IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
    8313 {
    8314     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8315     uint16_t    iReg    = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
    8316     if (pFpuCtx->FTW & RT_BIT(iReg))
    8317         return VINF_SUCCESS;
    8318     return VERR_NOT_FOUND;
    8319 }
    8320 
    8321 
    8322 IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
    8323 {
    8324     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8325     uint16_t    iReg    = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
    8326     if (pFpuCtx->FTW & RT_BIT(iReg))
    8327     {
    8328         *ppRef = &pFpuCtx->aRegs[iStReg].r80;
    8329         return VINF_SUCCESS;
    8330     }
    8331     return VERR_NOT_FOUND;
    8332 }
    8333 
    8334 
    8335 IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
    8336                                         uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
    8337 {
    8338     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8339     uint16_t    iTop    = X86_FSW_TOP_GET(pFpuCtx->FSW);
    8340     uint16_t    iReg0   = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
    8341     uint16_t    iReg1   = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
    8342     if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
    8343     {
    8344         *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
    8345         *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
    8346         return VINF_SUCCESS;
    8347     }
    8348     return VERR_NOT_FOUND;
    8349 }
    8350 
    8351 
    8352 IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
    8353 {
    8354     PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    8355     uint16_t    iTop    = X86_FSW_TOP_GET(pFpuCtx->FSW);
    8356     uint16_t    iReg0   = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
    8357     uint16_t    iReg1   = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
    8358     if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
    8359     {
    8360         *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
    8361         return VINF_SUCCESS;
    8362     }
    8363     return VERR_NOT_FOUND;
    8364 }
    8365 
    8366 
    8367 /**
    8368  * Updates the FPU exception status after FCW is changed.
    8369  *
    8370  * @param   pFpuCtx             The FPU context.
    8371  */
    8372 IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
    8373 {
    8374     uint16_t u16Fsw = pFpuCtx->FSW;
    8375     if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
    8376         u16Fsw |= X86_FSW_ES | X86_FSW_B;
    8377     else
    8378         u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
    8379     pFpuCtx->FSW = u16Fsw;
    8380 }
    8381 
    8382 
    8383 /**
    8384  * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
    8385  *
    8386  * @returns The full FTW.
    8387  * @param   pFpuCtx             The FPU context.
    8388  */
    8389 IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
    8390 {
    8391     uint8_t const   u8Ftw  = (uint8_t)pFpuCtx->FTW;
    8392     uint16_t        u16Ftw = 0;
    8393     unsigned const  iTop   = X86_FSW_TOP_GET(pFpuCtx->FSW);
    8394     for (unsigned iSt = 0; iSt < 8; iSt++)
    8395     {
    8396         unsigned const iReg = (iSt + iTop) & 7;
    8397         if (!(u8Ftw & RT_BIT(iReg)))
    8398             u16Ftw |= 3 << (iReg * 2); /* empty */
    8399         else
    8400         {
    8401             uint16_t uTag;
    8402             PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
    8403             if (pr80Reg->s.uExponent == 0x7fff)
    8404                 uTag = 2; /* Exponent is all 1's => Special. */
    8405             else if (pr80Reg->s.uExponent == 0x0000)
    8406             {
    8407                 if (pr80Reg->s.uMantissa == 0x0000)
    8408                     uTag = 1; /* All bits are zero => Zero. */
    8409                 else
    8410                     uTag = 2; /* Must be special. */
    8411             }
    8412             else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
    8413                 uTag = 0; /* Valid. */
    8414             else
    8415                 uTag = 2; /* Must be special. */
    8416 
    8417             u16Ftw |= uTag << (iReg * 2); /* empty */
    8418         }
    8419     }
    8420 
    8421     return u16Ftw;
    8422 }
    8423 
    8424 
    8425 /**
    8426  * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
    8427  *
    8428  * @returns The compressed FTW.
    8429  * @param   u16FullFtw      The full FTW to convert.
    8430  */
    8431 IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
    8432 {
    8433     uint8_t u8Ftw = 0;
    8434     for (unsigned i = 0; i < 8; i++)
    8435     {
    8436         if ((u16FullFtw & 3) != 3 /*empty*/)
    8437             u8Ftw |= RT_BIT(i);
    8438         u16FullFtw >>= 2;
    8439     }
    8440 
    8441     return u8Ftw;
    8442 }
    8443 
    8444 /** @}  */
    8445 
    8446 
    8447 /** @name   Memory access.
    8448  *
    8449  * @{
    8450  */
    8451 
    8452 
    8453 /**
    8454  * Updates the IEMCPU::cbWritten counter if applicable.
    8455  *
    8456  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8457  * @param   fAccess             The access being accounted for.
    8458  * @param   cbMem               The access size.
    8459  */
    8460 DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
    8461 {
    8462     if (   (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
    8463         || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
    8464         pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
    8465 }
    8466 
    8467 
    8468 /**
    8469  * Checks if the given segment can be written to, raise the appropriate
    8470  * exception if not.
    8471  *
    8472  * @returns VBox strict status code.
    8473  *
    8474  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8475  * @param   pHid                Pointer to the hidden register.
    8476  * @param   iSegReg             The register number.
    8477  * @param   pu64BaseAddr        Where to return the base address to use for the
    8478  *                              segment. (In 64-bit code it may differ from the
    8479  *                              base in the hidden segment.)
    8480  */
    8481 IEM_STATIC VBOXSTRICTRC
    8482 iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
    8483 {
    8484     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    8485 
    8486     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    8487         *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
    8488     else
    8489     {
    8490         if (!pHid->Attr.n.u1Present)
    8491         {
    8492             uint16_t    uSel = iemSRegFetchU16(pVCpu, iSegReg);
    8493             AssertRelease(uSel == 0);
    8494             Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
    8495             return iemRaiseGeneralProtectionFault0(pVCpu);
    8496         }
    8497 
    8498         if (   (   (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
    8499                 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
    8500             &&  pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
    8501             return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
    8502         *pu64BaseAddr = pHid->u64Base;
    8503     }
    8504     return VINF_SUCCESS;
    8505 }
    8506 
    8507 
    8508 /**
    8509  * Checks if the given segment can be read from, raise the appropriate
    8510  * exception if not.
    8511  *
    8512  * @returns VBox strict status code.
    8513  *
    8514  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8515  * @param   pHid                Pointer to the hidden register.
    8516  * @param   iSegReg             The register number.
    8517  * @param   pu64BaseAddr        Where to return the base address to use for the
    8518  *                              segment. (In 64-bit code it may differ from the
    8519  *                              base in the hidden segment.)
    8520  */
    8521 IEM_STATIC VBOXSTRICTRC
    8522 iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
    8523 {
    8524     IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    8525 
    8526     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    8527         *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
    8528     else
    8529     {
    8530         if (!pHid->Attr.n.u1Present)
    8531         {
    8532             uint16_t    uSel = iemSRegFetchU16(pVCpu, iSegReg);
    8533             AssertRelease(uSel == 0);
    8534             Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
    8535             return iemRaiseGeneralProtectionFault0(pVCpu);
    8536         }
    8537 
    8538         if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
    8539             return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
    8540         *pu64BaseAddr = pHid->u64Base;
    8541     }
    8542     return VINF_SUCCESS;
    8543 }
    8544 
    8545 
    8546 /**
    8547  * Applies the segment limit, base and attributes.
    8548  *
    8549  * This may raise a \#GP or \#SS.
    8550  *
    8551  * @returns VBox strict status code.
    8552  *
    8553  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8554  * @param   fAccess             The kind of access which is being performed.
    8555  * @param   iSegReg             The index of the segment register to apply.
    8556  *                              This is UINT8_MAX if none (for IDT, GDT, LDT,
    8557  *                              TSS, ++).
    8558  * @param   cbMem               The access size.
    8559  * @param   pGCPtrMem           Pointer to the guest memory address to apply
    8560  *                              segmentation to.  Input and output parameter.
    8561  */
    8562 IEM_STATIC VBOXSTRICTRC
    8563 iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
    8564 {
    8565     if (iSegReg == UINT8_MAX)
    8566         return VINF_SUCCESS;
    8567 
    8568     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    8569     PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
    8570     switch (pVCpu->iem.s.enmCpuMode)
    8571     {
    8572         case IEMMODE_16BIT:
    8573         case IEMMODE_32BIT:
    8574         {
    8575             RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
    8576             RTGCPTR32 GCPtrLast32  = GCPtrFirst32 + (uint32_t)cbMem - 1;
    8577 
    8578             if (   pSel->Attr.n.u1Present
    8579                 && !pSel->Attr.n.u1Unusable)
    8580             {
    8581                 Assert(pSel->Attr.n.u1DescType);
    8582                 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
    8583                 {
    8584                     if (   (fAccess & IEM_ACCESS_TYPE_WRITE)
    8585                         && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
    8586                         return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
    8587 
    8588                     if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
    8589                     {
    8590                         /** @todo CPL check. */
    8591                     }
    8592 
    8593                     /*
    8594                      * There are two kinds of data selectors, normal and expand down.
    8595                      */
    8596                     if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
    8597                     {
    8598                         if (   GCPtrFirst32 > pSel->u32Limit
    8599                             || GCPtrLast32  > pSel->u32Limit) /* yes, in real mode too (since 80286). */
    8600                             return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
    8601                     }
    8602                     else
    8603                     {
    8604                        /*
    8605                         * The upper boundary is defined by the B bit, not the G bit!
    8606                         */
    8607                        if (   GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
    8608                            || GCPtrLast32  > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
    8609                           return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
    8610                     }
    8611                     *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
    8612                 }
    8613                 else
    8614                 {
    8615 
    8616                     /*
    8617                      * Code selector and usually be used to read thru, writing is
    8618                      * only permitted in real and V8086 mode.
    8619                      */
    8620                     if (   (   (fAccess & IEM_ACCESS_TYPE_WRITE)
    8621                             || (   (fAccess & IEM_ACCESS_TYPE_READ)
    8622                                && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
    8623                         && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
    8624                         return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
    8625 
    8626                     if (   GCPtrFirst32 > pSel->u32Limit
    8627                         || GCPtrLast32  > pSel->u32Limit) /* yes, in real mode too (since 80286). */
    8628                         return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
    8629 
    8630                     if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
    8631                     {
    8632                         /** @todo CPL check. */
    8633                     }
    8634 
    8635                     *pGCPtrMem  = GCPtrFirst32 += (uint32_t)pSel->u64Base;
    8636                 }
    8637             }
    8638             else
    8639                 return iemRaiseGeneralProtectionFault0(pVCpu);
    8640             return VINF_SUCCESS;
    8641         }
    8642 
    8643         case IEMMODE_64BIT:
    8644         {
    8645             RTGCPTR GCPtrMem = *pGCPtrMem;
    8646             if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
    8647                 *pGCPtrMem = GCPtrMem + pSel->u64Base;
    8648 
    8649             Assert(cbMem >= 1);
    8650             if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
    8651                 return VINF_SUCCESS;
    8652             /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
    8653              *        4.12.2 "Data Limit Checks in 64-bit Mode". */
    8654             return iemRaiseGeneralProtectionFault0(pVCpu);
    8655         }
    8656 
    8657         default:
    8658             AssertFailedReturn(VERR_IEM_IPE_7);
    8659     }
    8660 }
    8661 
    8662 
    8663 /**
    8664  * Translates a virtual address to a physical physical address and checks if we
    8665  * can access the page as specified.
    8666  *
    8667  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8668  * @param   GCPtrMem            The virtual address.
    8669  * @param   fAccess             The intended access.
    8670  * @param   pGCPhysMem          Where to return the physical address.
    8671  */
    8672 IEM_STATIC VBOXSTRICTRC
    8673 iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
    8674 {
    8675     /** @todo Need a different PGM interface here.  We're currently using
    8676      *        generic / REM interfaces. this won't cut it for R0. */
    8677     /** @todo If/when PGM handles paged real-mode, we can remove the hack in
    8678      *        iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
    8679      *        here. */
    8680     PGMPTWALK Walk;
    8681     int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
    8682     if (RT_FAILURE(rc))
    8683     {
    8684         Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
    8685         /** @todo Check unassigned memory in unpaged mode. */
    8686         /** @todo Reserved bits in page tables. Requires new PGM interface. */
    8687 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    8688         if (Walk.fFailed & PGM_WALKFAIL_EPT)
    8689             IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
    8690 #endif
    8691         *pGCPhysMem = NIL_RTGCPHYS;
    8692         return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
    8693     }
    8694 
    8695     /* If the page is writable and does not have the no-exec bit set, all
    8696        access is allowed.  Otherwise we'll have to check more carefully... */
    8697     if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
    8698     {
    8699         /* Write to read only memory? */
    8700         if (   (fAccess & IEM_ACCESS_TYPE_WRITE)
    8701             && !(Walk.fEffective & X86_PTE_RW)
    8702             && (   (    pVCpu->iem.s.uCpl == 3
    8703                     && !(fAccess & IEM_ACCESS_WHAT_SYS))
    8704                 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
    8705         {
    8706             Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
    8707             *pGCPhysMem = NIL_RTGCPHYS;
    8708 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    8709             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    8710                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    8711 #endif
    8712             return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
    8713         }
    8714 
    8715         /* Kernel memory accessed by userland? */
    8716         if (   !(Walk.fEffective & X86_PTE_US)
    8717             && pVCpu->iem.s.uCpl == 3
    8718             && !(fAccess & IEM_ACCESS_WHAT_SYS))
    8719         {
    8720             Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
    8721             *pGCPhysMem = NIL_RTGCPHYS;
    8722 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    8723             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    8724                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    8725 #endif
    8726             return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
    8727         }
    8728 
    8729         /* Executing non-executable memory? */
    8730         if (   (fAccess & IEM_ACCESS_TYPE_EXEC)
    8731             && (Walk.fEffective & X86_PTE_PAE_NX)
    8732             && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
    8733         {
    8734             Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
    8735             *pGCPhysMem = NIL_RTGCPHYS;
    8736 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    8737             if (Walk.fFailed & PGM_WALKFAIL_EPT)
    8738                 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
    8739 #endif
    8740             return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
    8741                                      VERR_ACCESS_DENIED);
    8742         }
    8743     }
    8744 
    8745     /*
    8746      * Set the dirty / access flags.
    8747      * ASSUMES this is set when the address is translated rather than on committ...
    8748      */
    8749     /** @todo testcase: check when A and D bits are actually set by the CPU.  */
    8750     uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
    8751     if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
    8752     {
    8753         int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
    8754         AssertRC(rc2);
    8755         /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
    8756         Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
    8757     }
    8758 
    8759     RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
    8760     *pGCPhysMem = GCPhys;
    8761     return VINF_SUCCESS;
    8762 }
    8763 
    8764 
    8765 
    8766 /**
    8767  * Maps a physical page.
    8768  *
    8769  * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
    8770  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8771  * @param   GCPhysMem           The physical address.
    8772  * @param   fAccess             The intended access.
    8773  * @param   ppvMem              Where to return the mapping address.
    8774  * @param   pLock               The PGM lock.
    8775  */
    8776 IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
    8777 {
    8778 #ifdef IEM_LOG_MEMORY_WRITES
    8779     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    8780         return VERR_PGM_PHYS_TLB_CATCH_ALL;
    8781 #endif
    8782 
    8783     /** @todo This API may require some improving later.  A private deal with PGM
    8784      *        regarding locking and unlocking needs to be struct.  A couple of TLBs
    8785      *        living in PGM, but with publicly accessible inlined access methods
    8786      *        could perhaps be an even better solution. */
    8787     int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
    8788                                   GCPhysMem,
    8789                                   RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
    8790                                   pVCpu->iem.s.fBypassHandlers,
    8791                                   ppvMem,
    8792                                   pLock);
    8793     /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
    8794     AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
    8795 
    8796     return rc;
    8797 }
    8798 
    8799 
    8800 /**
    8801  * Unmap a page previously mapped by iemMemPageMap.
    8802  *
    8803  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8804  * @param   GCPhysMem           The physical address.
    8805  * @param   fAccess             The intended access.
    8806  * @param   pvMem               What iemMemPageMap returned.
    8807  * @param   pLock               The PGM lock.
    8808  */
    8809 DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
    8810 {
    8811     NOREF(pVCpu);
    8812     NOREF(GCPhysMem);
    8813     NOREF(fAccess);
    8814     NOREF(pvMem);
    8815     PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
    8816 }
    8817 
    8818 
    8819 /**
    8820  * Looks up a memory mapping entry.
    8821  *
    8822  * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
    8823  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    8824  * @param   pvMem           The memory address.
    8825  * @param   fAccess         The access to.
    8826  */
    8827 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    8828 {
    8829     Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    8830     fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
    8831     if (   pVCpu->iem.s.aMemMappings[0].pv == pvMem
    8832         && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    8833         return 0;
    8834     if (   pVCpu->iem.s.aMemMappings[1].pv == pvMem
    8835         && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    8836         return 1;
    8837     if (   pVCpu->iem.s.aMemMappings[2].pv == pvMem
    8838         && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    8839         return 2;
    8840     return VERR_NOT_FOUND;
    8841 }
    8842 
    8843 
    8844 /**
    8845  * Finds a free memmap entry when using iNextMapping doesn't work.
    8846  *
    8847  * @returns Memory mapping index, 1024 on failure.
    8848  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    8849  */
    8850 IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
    8851 {
    8852     /*
    8853      * The easy case.
    8854      */
    8855     if (pVCpu->iem.s.cActiveMappings == 0)
    8856     {
    8857         pVCpu->iem.s.iNextMapping = 1;
    8858         return 0;
    8859     }
    8860 
    8861     /* There should be enough mappings for all instructions. */
    8862     AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
    8863 
    8864     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
    8865         if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
    8866             return i;
    8867 
    8868     AssertFailedReturn(1024);
    8869 }
    8870 
    8871 
    8872 /**
    8873  * Commits a bounce buffer that needs writing back and unmaps it.
    8874  *
    8875  * @returns Strict VBox status code.
    8876  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    8877  * @param   iMemMap         The index of the buffer to commit.
    8878  * @param   fPostponeFail   Whether we can postpone writer failures to ring-3.
    8879  *                          Always false in ring-3, obviously.
    8880  */
    8881 IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
    8882 {
    8883     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    8884     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    8885 #ifdef IN_RING3
    8886     Assert(!fPostponeFail);
    8887     RT_NOREF_PV(fPostponeFail);
    8888 #endif
    8889 
    8890     /*
    8891      * Do the writing.
    8892      */
    8893     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    8894     if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
    8895     {
    8896         uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    8897         uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    8898         uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    8899         if (!pVCpu->iem.s.fBypassHandlers)
    8900         {
    8901             /*
    8902              * Carefully and efficiently dealing with access handler return
    8903              * codes make this a little bloated.
    8904              */
    8905             VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
    8906                                                  pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    8907                                                  pbBuf,
    8908                                                  cbFirst,
    8909                                                  PGMACCESSORIGIN_IEM);
    8910             if (rcStrict == VINF_SUCCESS)
    8911             {
    8912                 if (cbSecond)
    8913                 {
    8914                     rcStrict = PGMPhysWrite(pVM,
    8915                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    8916                                             pbBuf + cbFirst,
    8917                                             cbSecond,
    8918                                             PGMACCESSORIGIN_IEM);
    8919                     if (rcStrict == VINF_SUCCESS)
    8920                     { /* nothing */ }
    8921                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    8922                     {
    8923                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
    8924                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    8925                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    8926                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    8927                     }
    8928 #ifndef IN_RING3
    8929                     else if (fPostponeFail)
    8930                     {
    8931                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    8932                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    8933                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    8934                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    8935                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    8936                         return iemSetPassUpStatus(pVCpu, rcStrict);
    8937                     }
    8938 #endif
    8939                     else
    8940                     {
    8941                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    8942                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    8943                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    8944                         return rcStrict;
    8945                     }
    8946                 }
    8947             }
    8948             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    8949             {
    8950                 if (!cbSecond)
    8951                 {
    8952                     Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
    8953                          pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    8954                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    8955                 }
    8956                 else
    8957                 {
    8958                     VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
    8959                                                           pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    8960                                                           pbBuf + cbFirst,
    8961                                                           cbSecond,
    8962                                                           PGMACCESSORIGIN_IEM);
    8963                     if (rcStrict2 == VINF_SUCCESS)
    8964                     {
    8965                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
    8966                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    8967                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    8968                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    8969                     }
    8970                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    8971                     {
    8972                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
    8973                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    8974                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    8975                         PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    8976                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    8977                     }
    8978 #ifndef IN_RING3
    8979                     else if (fPostponeFail)
    8980                     {
    8981                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    8982                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    8983                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    8984                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    8985                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    8986                         return iemSetPassUpStatus(pVCpu, rcStrict);
    8987                     }
    8988 #endif
    8989                     else
    8990                     {
    8991                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    8992                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    8993                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    8994                         return rcStrict2;
    8995                     }
    8996                 }
    8997             }
    8998 #ifndef IN_RING3
    8999             else if (fPostponeFail)
    9000             {
    9001                 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    9002                      pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    9003                      pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    9004                 if (!cbSecond)
    9005                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
    9006                 else
    9007                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
    9008                 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    9009                 return iemSetPassUpStatus(pVCpu, rcStrict);
    9010             }
    9011 #endif
    9012             else
    9013             {
    9014                 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    9015                      pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    9016                      pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    9017                 return rcStrict;
    9018             }
    9019         }
    9020         else
    9021         {
    9022             /*
    9023              * No access handlers, much simpler.
    9024              */
    9025             int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
    9026             if (RT_SUCCESS(rc))
    9027             {
    9028                 if (cbSecond)
    9029                 {
    9030                     rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
    9031                     if (RT_SUCCESS(rc))
    9032                     { /* likely */ }
    9033                     else
    9034                     {
    9035                         Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    9036                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    9037                              pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
    9038                         return rc;
    9039                     }
    9040                 }
    9041             }
    9042             else
    9043             {
    9044                 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    9045                      pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
    9046                      pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    9047                 return rc;
    9048             }
    9049         }
    9050     }
    9051 
    9052 #if defined(IEM_LOG_MEMORY_WRITES)
    9053     Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    9054          RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
    9055     if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
    9056         Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    9057              RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
    9058              &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
    9059 
    9060     size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    9061     g_cbIemWrote = cbWrote;
    9062     memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
    9063 #endif
    9064 
    9065     /*
    9066      * Free the mapping entry.
    9067      */
    9068     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    9069     Assert(pVCpu->iem.s.cActiveMappings != 0);
    9070     pVCpu->iem.s.cActiveMappings--;
    9071     return VINF_SUCCESS;
    9072 }
    9073 
    9074 
    9075 /**
    9076  * iemMemMap worker that deals with a request crossing pages.
    9077  */
    9078 IEM_STATIC VBOXSTRICTRC
    9079 iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
    9080 {
    9081     /*
    9082      * Do the address translations.
    9083      */
    9084     RTGCPHYS GCPhysFirst;
    9085     VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
    9086     if (rcStrict != VINF_SUCCESS)
    9087         return rcStrict;
    9088 
    9089     RTGCPHYS GCPhysSecond;
    9090     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    9091                                                  fAccess, &GCPhysSecond);
    9092     if (rcStrict != VINF_SUCCESS)
    9093         return rcStrict;
    9094     GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
    9095 
    9096     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    9097 
    9098     /*
    9099      * Read in the current memory content if it's a read, execute or partial
    9100      * write access.
    9101      */
    9102     uint8_t        *pbBuf        = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    9103     uint32_t const  cbFirstPage  = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
    9104     uint32_t const  cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
    9105 
    9106     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    9107     {
    9108         if (!pVCpu->iem.s.fBypassHandlers)
    9109         {
    9110             /*
    9111              * Must carefully deal with access handler status codes here,
    9112              * makes the code a bit bloated.
    9113              */
    9114             rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
    9115             if (rcStrict == VINF_SUCCESS)
    9116             {
    9117                 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    9118                 if (rcStrict == VINF_SUCCESS)
    9119                 { /*likely */ }
    9120                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    9121                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    9122                 else
    9123                 {
    9124                     Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
    9125                          GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    9126                     return rcStrict;
    9127                 }
    9128             }
    9129             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    9130             {
    9131                 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    9132                 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    9133                 {
    9134                     PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    9135                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    9136                 }
    9137                 else
    9138                 {
    9139                     Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
    9140                          GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
    9141                     return rcStrict2;
    9142                 }
    9143             }
    9144             else
    9145             {
    9146                 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    9147                      GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    9148                 return rcStrict;
    9149             }
    9150         }
    9151         else
    9152         {
    9153             /*
    9154              * No informational status codes here, much more straight forward.
    9155              */
    9156             int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
    9157             if (RT_SUCCESS(rc))
    9158             {
    9159                 Assert(rc == VINF_SUCCESS);
    9160                 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
    9161                 if (RT_SUCCESS(rc))
    9162                     Assert(rc == VINF_SUCCESS);
    9163                 else
    9164                 {
    9165                     Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
    9166                     return rc;
    9167                 }
    9168             }
    9169             else
    9170             {
    9171                 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
    9172                 return rc;
    9173             }
    9174         }
    9175     }
    9176 #ifdef VBOX_STRICT
    9177     else
    9178         memset(pbBuf, 0xcc, cbMem);
    9179     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    9180         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    9181 #endif
    9182 
    9183     /*
    9184      * Commit the bounce buffer entry.
    9185      */
    9186     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    9187     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = GCPhysSecond;
    9188     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbFirstPage;
    9189     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = (uint16_t)cbSecondPage;
    9190     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = false;
    9191     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    9192     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    9193     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    9194     pVCpu->iem.s.cActiveMappings++;
    9195 
    9196     iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
    9197     *ppvMem = pbBuf;
    9198     return VINF_SUCCESS;
    9199 }
    9200 
    9201 
    9202 /**
    9203  * iemMemMap woker that deals with iemMemPageMap failures.
    9204  */
    9205 IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
    9206                                                   RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
    9207 {
    9208     /*
    9209      * Filter out conditions we can handle and the ones which shouldn't happen.
    9210      */
    9211     if (   rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
    9212         && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
    9213         && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
    9214     {
    9215         AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
    9216         return rcMap;
    9217     }
    9218     pVCpu->iem.s.cPotentialExits++;
    9219 
    9220     /*
    9221      * Read in the current memory content if it's a read, execute or partial
    9222      * write access.
    9223      */
    9224     uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    9225     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    9226     {
    9227         if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
    9228             memset(pbBuf, 0xff, cbMem);
    9229         else
    9230         {
    9231             int rc;
    9232             if (!pVCpu->iem.s.fBypassHandlers)
    9233             {
    9234                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
    9235                 if (rcStrict == VINF_SUCCESS)
    9236                 { /* nothing */ }
    9237                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    9238                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    9239                 else
    9240                 {
    9241                     Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    9242                          GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    9243                     return rcStrict;
    9244                 }
    9245             }
    9246             else
    9247             {
    9248                 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
    9249                 if (RT_SUCCESS(rc))
    9250                 { /* likely */ }
    9251                 else
    9252                 {
    9253                     Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    9254                          GCPhysFirst, rc));
    9255                     return rc;
    9256                 }
    9257             }
    9258         }
    9259     }
    9260 #ifdef VBOX_STRICT
    9261     else
    9262         memset(pbBuf, 0xcc, cbMem);
    9263 #endif
    9264 #ifdef VBOX_STRICT
    9265     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    9266         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    9267 #endif
    9268 
    9269     /*
    9270      * Commit the bounce buffer entry.
    9271      */
    9272     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    9273     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = NIL_RTGCPHYS;
    9274     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbMem;
    9275     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = 0;
    9276     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
    9277     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    9278     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    9279     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    9280     pVCpu->iem.s.cActiveMappings++;
    9281 
    9282     iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
    9283     *ppvMem = pbBuf;
    9284     return VINF_SUCCESS;
    9285 }
    9286 
    9287 
    9288 
    9289 /**
    9290  * Maps the specified guest memory for the given kind of access.
    9291  *
    9292  * This may be using bounce buffering of the memory if it's crossing a page
    9293  * boundary or if there is an access handler installed for any of it.  Because
    9294  * of lock prefix guarantees, we're in for some extra clutter when this
    9295  * happens.
    9296  *
    9297  * This may raise a \#GP, \#SS, \#PF or \#AC.
    9298  *
    9299  * @returns VBox strict status code.
    9300  *
    9301  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9302  * @param   ppvMem              Where to return the pointer to the mapped
    9303  *                              memory.
    9304  * @param   cbMem               The number of bytes to map.  This is usually 1,
    9305  *                              2, 4, 6, 8, 12, 16, 32 or 512.  When used by
    9306  *                              string operations it can be up to a page.
    9307  * @param   iSegReg             The index of the segment register to use for
    9308  *                              this access.  The base and limits are checked.
    9309  *                              Use UINT8_MAX to indicate that no segmentation
    9310  *                              is required (for IDT, GDT and LDT accesses).
    9311  * @param   GCPtrMem            The address of the guest memory.
    9312  * @param   fAccess             How the memory is being accessed.  The
    9313  *                              IEM_ACCESS_TYPE_XXX bit is used to figure out
    9314  *                              how to map the memory, while the
    9315  *                              IEM_ACCESS_WHAT_XXX bit is used when raising
    9316  *                              exceptions.
    9317  */
    9318 IEM_STATIC VBOXSTRICTRC
    9319 iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
    9320 {
    9321     /*
    9322      * Check the input and figure out which mapping entry to use.
    9323      */
    9324     Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
    9325     Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
    9326     Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    9327 
    9328     unsigned iMemMap = pVCpu->iem.s.iNextMapping;
    9329     if (   iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    9330         || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
    9331     {
    9332         iMemMap = iemMemMapFindFree(pVCpu);
    9333         AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
    9334                               ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
    9335                                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
    9336                                pVCpu->iem.s.aMemMappings[2].fAccess),
    9337                               VERR_IEM_IPE_9);
    9338     }
    9339 
    9340     /*
    9341      * Map the memory, checking that we can actually access it.  If something
    9342      * slightly complicated happens, fall back on bounce buffering.
    9343      */
    9344     VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
    9345     if (rcStrict != VINF_SUCCESS)
    9346         return rcStrict;
    9347 
    9348     if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem > GUEST_PAGE_SIZE) /* Crossing a page boundary? */
    9349         return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
    9350 
    9351     RTGCPHYS GCPhysFirst;
    9352     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
    9353     if (rcStrict != VINF_SUCCESS)
    9354         return rcStrict;
    9355 
    9356     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    9357         Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
    9358     if (fAccess & IEM_ACCESS_TYPE_READ)
    9359         Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
    9360 
    9361     void *pvMem;
    9362     rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    9363     if (rcStrict != VINF_SUCCESS)
    9364         return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
    9365 
    9366     /*
    9367      * Fill in the mapping table entry.
    9368      */
    9369     pVCpu->iem.s.aMemMappings[iMemMap].pv      = pvMem;
    9370     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
    9371     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    9372     pVCpu->iem.s.cActiveMappings++;
    9373 
    9374     iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
    9375     *ppvMem = pvMem;
    9376 
    9377     return VINF_SUCCESS;
    9378 }
    9379 
    9380 
    9381 /**
    9382  * Commits the guest memory if bounce buffered and unmaps it.
    9383  *
    9384  * @returns Strict VBox status code.
    9385  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9386  * @param   pvMem               The mapping.
    9387  * @param   fAccess             The kind of access.
    9388  */
    9389 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    9390 {
    9391     int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
    9392     AssertReturn(iMemMap >= 0, iMemMap);
    9393 
    9394     /* If it's bounce buffered, we may need to write back the buffer. */
    9395     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    9396     {
    9397         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    9398             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    9399     }
    9400     /* Otherwise unlock it. */
    9401     else
    9402         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    9403 
    9404     /* Free the entry. */
    9405     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    9406     Assert(pVCpu->iem.s.cActiveMappings != 0);
    9407     pVCpu->iem.s.cActiveMappings--;
    9408     return VINF_SUCCESS;
    9409 }
    9410 
    9411 #ifdef IEM_WITH_SETJMP
    9412 
    9413 /**
    9414  * Maps the specified guest memory for the given kind of access, longjmp on
    9415  * error.
    9416  *
    9417  * This may be using bounce buffering of the memory if it's crossing a page
    9418  * boundary or if there is an access handler installed for any of it.  Because
    9419  * of lock prefix guarantees, we're in for some extra clutter when this
    9420  * happens.
    9421  *
    9422  * This may raise a \#GP, \#SS, \#PF or \#AC.
    9423  *
    9424  * @returns Pointer to the mapped memory.
    9425  *
    9426  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9427  * @param   cbMem               The number of bytes to map.  This is usually 1,
    9428  *                              2, 4, 6, 8, 12, 16, 32 or 512.  When used by
    9429  *                              string operations it can be up to a page.
    9430  * @param   iSegReg             The index of the segment register to use for
    9431  *                              this access.  The base and limits are checked.
    9432  *                              Use UINT8_MAX to indicate that no segmentation
    9433  *                              is required (for IDT, GDT and LDT accesses).
    9434  * @param   GCPtrMem            The address of the guest memory.
    9435  * @param   fAccess             How the memory is being accessed.  The
    9436  *                              IEM_ACCESS_TYPE_XXX bit is used to figure out
    9437  *                              how to map the memory, while the
    9438  *                              IEM_ACCESS_WHAT_XXX bit is used when raising
    9439  *                              exceptions.
    9440  */
    9441 IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
    9442 {
    9443     /*
    9444      * Check the input and figure out which mapping entry to use.
    9445      */
    9446     Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
    9447     Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
    9448     Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    9449 
    9450     unsigned iMemMap = pVCpu->iem.s.iNextMapping;
    9451     if (   iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    9452         || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
    9453     {
    9454         iMemMap = iemMemMapFindFree(pVCpu);
    9455         AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
    9456                             ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
    9457                              pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
    9458                              pVCpu->iem.s.aMemMappings[2].fAccess),
    9459                             longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
    9460     }
    9461 
    9462     /*
    9463      * Map the memory, checking that we can actually access it.  If something
    9464      * slightly complicated happens, fall back on bounce buffering.
    9465      */
    9466     VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
    9467     if (rcStrict == VINF_SUCCESS) { /*likely*/ }
    9468     else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    9469 
    9470     /* Crossing a page boundary? */
    9471     if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
    9472     { /* No (likely). */ }
    9473     else
    9474     {
    9475         void *pvMem;
    9476         rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
    9477         if (rcStrict == VINF_SUCCESS)
    9478             return pvMem;
    9479         longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    9480     }
    9481 
    9482     RTGCPHYS GCPhysFirst;
    9483     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
    9484     if (rcStrict == VINF_SUCCESS) { /*likely*/ }
    9485     else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    9486 
    9487     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    9488         Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
    9489     if (fAccess & IEM_ACCESS_TYPE_READ)
    9490         Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
    9491 
    9492     void *pvMem;
    9493     rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    9494     if (rcStrict == VINF_SUCCESS)
    9495     { /* likely */ }
    9496     else
    9497     {
    9498         rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
    9499         if (rcStrict == VINF_SUCCESS)
    9500             return pvMem;
    9501         longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    9502     }
    9503 
    9504     /*
    9505      * Fill in the mapping table entry.
    9506      */
    9507     pVCpu->iem.s.aMemMappings[iMemMap].pv      = pvMem;
    9508     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
    9509     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    9510     pVCpu->iem.s.cActiveMappings++;
    9511 
    9512     iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
    9513     return pvMem;
    9514 }
    9515 
    9516 
    9517 /**
    9518  * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
    9519  *
    9520  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9521  * @param   pvMem               The mapping.
    9522  * @param   fAccess             The kind of access.
    9523  */
    9524 IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    9525 {
    9526     int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
    9527     AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
    9528 
    9529     /* If it's bounce buffered, we may need to write back the buffer. */
    9530     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    9531     {
    9532         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    9533         {
    9534             VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    9535             if (rcStrict == VINF_SUCCESS)
    9536                 return;
    9537             longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    9538         }
    9539     }
    9540     /* Otherwise unlock it. */
    9541     else
    9542         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    9543 
    9544     /* Free the entry. */
    9545     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    9546     Assert(pVCpu->iem.s.cActiveMappings != 0);
    9547     pVCpu->iem.s.cActiveMappings--;
    9548 }
    9549 
    9550 #endif /* IEM_WITH_SETJMP */
    9551 
    9552 #ifndef IN_RING3
    9553 /**
    9554  * Commits the guest memory if bounce buffered and unmaps it, if any bounce
    9555  * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
    9556  *
    9557  * Allows the instruction to be completed and retired, while the IEM user will
    9558  * return to ring-3 immediately afterwards and do the postponed writes there.
    9559  *
    9560  * @returns VBox status code (no strict statuses).  Caller must check
    9561  *          VMCPU_FF_IEM before repeating string instructions and similar stuff.
    9562  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9563  * @param   pvMem               The mapping.
    9564  * @param   fAccess             The kind of access.
    9565  */
    9566 IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    9567 {
    9568     int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
    9569     AssertReturn(iMemMap >= 0, iMemMap);
    9570 
    9571     /* If it's bounce buffered, we may need to write back the buffer. */
    9572     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    9573     {
    9574         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    9575             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
    9576     }
    9577     /* Otherwise unlock it. */
    9578     else
    9579         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    9580 
    9581     /* Free the entry. */
    9582     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    9583     Assert(pVCpu->iem.s.cActiveMappings != 0);
    9584     pVCpu->iem.s.cActiveMappings--;
    9585     return VINF_SUCCESS;
    9586 }
    9587 #endif
    9588 
    9589 
    9590 /**
    9591  * Rollbacks mappings, releasing page locks and such.
    9592  *
    9593  * The caller shall only call this after checking cActiveMappings.
    9594  *
    9595  * @returns Strict VBox status code to pass up.
    9596  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    9597  */
    9598 IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
    9599 {
    9600     Assert(pVCpu->iem.s.cActiveMappings > 0);
    9601 
    9602     uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    9603     while (iMemMap-- > 0)
    9604     {
    9605         uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
    9606         if (fAccess != IEM_ACCESS_INVALID)
    9607         {
    9608             AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
    9609             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    9610             if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
    9611                 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    9612             AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
    9613                       ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
    9614                        iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
    9615                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
    9616             pVCpu->iem.s.cActiveMappings--;
    9617         }
    9618     }
    9619 }
    9620 
    9621 
    9622 /**
    9623  * Fetches a data byte.
    9624  *
    9625  * @returns Strict VBox status code.
    9626  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9627  * @param   pu8Dst              Where to return the byte.
    9628  * @param   iSegReg             The index of the segment register to use for
    9629  *                              this access.  The base and limits are checked.
    9630  * @param   GCPtrMem            The address of the guest memory.
    9631  */
    9632 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9633 {
    9634     /* The lazy approach for now... */
    9635     uint8_t const *pu8Src;
    9636     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9637     if (rc == VINF_SUCCESS)
    9638     {
    9639         *pu8Dst = *pu8Src;
    9640         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
    9641     }
    9642     return rc;
    9643 }
    9644 
    9645 
    9646 #ifdef IEM_WITH_SETJMP
    9647 /**
    9648  * Fetches a data byte, longjmp on error.
    9649  *
    9650  * @returns The byte.
    9651  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9652  * @param   iSegReg             The index of the segment register to use for
    9653  *                              this access.  The base and limits are checked.
    9654  * @param   GCPtrMem            The address of the guest memory.
    9655  */
    9656 DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9657 {
    9658     /* The lazy approach for now... */
    9659     uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9660     uint8_t const  bRet   = *pu8Src;
    9661     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
    9662     return bRet;
    9663 }
    9664 #endif /* IEM_WITH_SETJMP */
    9665 
    9666 
    9667 /**
    9668  * Fetches a data word.
    9669  *
    9670  * @returns Strict VBox status code.
    9671  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9672  * @param   pu16Dst             Where to return the word.
    9673  * @param   iSegReg             The index of the segment register to use for
    9674  *                              this access.  The base and limits are checked.
    9675  * @param   GCPtrMem            The address of the guest memory.
    9676  */
    9677 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9678 {
    9679     /* The lazy approach for now... */
    9680     uint16_t const *pu16Src;
    9681     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9682     if (rc == VINF_SUCCESS)
    9683     {
    9684         *pu16Dst = *pu16Src;
    9685         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
    9686     }
    9687     return rc;
    9688 }
    9689 
    9690 
    9691 #ifdef IEM_WITH_SETJMP
    9692 /**
    9693  * Fetches a data word, longjmp on error.
    9694  *
    9695  * @returns The word
    9696  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9697  * @param   iSegReg             The index of the segment register to use for
    9698  *                              this access.  The base and limits are checked.
    9699  * @param   GCPtrMem            The address of the guest memory.
    9700  */
    9701 DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9702 {
    9703     /* The lazy approach for now... */
    9704     uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9705     uint16_t const u16Ret = *pu16Src;
    9706     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
    9707     return u16Ret;
    9708 }
    9709 #endif
    9710 
    9711 
    9712 /**
    9713  * Fetches a data dword.
    9714  *
    9715  * @returns Strict VBox status code.
    9716  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9717  * @param   pu32Dst             Where to return the dword.
    9718  * @param   iSegReg             The index of the segment register to use for
    9719  *                              this access.  The base and limits are checked.
    9720  * @param   GCPtrMem            The address of the guest memory.
    9721  */
    9722 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9723 {
    9724     /* The lazy approach for now... */
    9725     uint32_t const *pu32Src;
    9726     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9727     if (rc == VINF_SUCCESS)
    9728     {
    9729         *pu32Dst = *pu32Src;
    9730         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
    9731     }
    9732     return rc;
    9733 }
    9734 
    9735 
    9736 /**
    9737  * Fetches a data dword and zero extends it to a qword.
    9738  *
    9739  * @returns Strict VBox status code.
    9740  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9741  * @param   pu64Dst             Where to return the qword.
    9742  * @param   iSegReg             The index of the segment register to use for
    9743  *                              this access.  The base and limits are checked.
    9744  * @param   GCPtrMem            The address of the guest memory.
    9745  */
    9746 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9747 {
    9748     /* The lazy approach for now... */
    9749     uint32_t const *pu32Src;
    9750     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9751     if (rc == VINF_SUCCESS)
    9752     {
    9753         *pu64Dst = *pu32Src;
    9754         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
    9755     }
    9756     return rc;
    9757 }
    9758 
    9759 
    9760 #ifdef IEM_WITH_SETJMP
    9761 
    9762 IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
    9763 {
    9764     Assert(cbMem >= 1);
    9765     Assert(iSegReg < X86_SREG_COUNT);
    9766 
    9767     /*
    9768      * 64-bit mode is simpler.
    9769      */
    9770     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    9771     {
    9772         if (iSegReg >= X86_SREG_FS)
    9773         {
    9774             IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    9775             PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
    9776             GCPtrMem += pSel->u64Base;
    9777         }
    9778 
    9779         if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
    9780             return GCPtrMem;
    9781     }
    9782     /*
    9783      * 16-bit and 32-bit segmentation.
    9784      */
    9785     else
    9786     {
    9787         IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    9788         PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
    9789         if (      (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
    9790                == X86DESCATTR_P /* data, expand up */
    9791             ||    (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
    9792                == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
    9793         {
    9794             /* expand up */
    9795             uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
    9796             if (RT_LIKELY(   GCPtrLast32 > pSel->u32Limit
    9797                           && GCPtrLast32 > (uint32_t)GCPtrMem))
    9798                 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
    9799         }
    9800         else if (   (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
    9801                  == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
    9802         {
    9803             /* expand down */
    9804             uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
    9805             if (RT_LIKELY(   (uint32_t)GCPtrMem >  pSel->u32Limit
    9806                           && GCPtrLast32        <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
    9807                           && GCPtrLast32 > (uint32_t)GCPtrMem))
    9808                 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
    9809         }
    9810         else
    9811             iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
    9812         iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
    9813     }
    9814     iemRaiseGeneralProtectionFault0Jmp(pVCpu);
    9815 }
    9816 
    9817 
    9818 IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
    9819 {
    9820     Assert(cbMem >= 1);
    9821     Assert(iSegReg < X86_SREG_COUNT);
    9822 
    9823     /*
    9824      * 64-bit mode is simpler.
    9825      */
    9826     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    9827     {
    9828         if (iSegReg >= X86_SREG_FS)
    9829         {
    9830             IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    9831             PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
    9832             GCPtrMem += pSel->u64Base;
    9833         }
    9834 
    9835         if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
    9836             return GCPtrMem;
    9837     }
    9838     /*
    9839      * 16-bit and 32-bit segmentation.
    9840      */
    9841     else
    9842     {
    9843         IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
    9844         PCPUMSELREGHID pSel           = iemSRegGetHid(pVCpu, iSegReg);
    9845         uint32_t const fRelevantAttrs = pSel->Attr.u & (  X86DESCATTR_P     | X86DESCATTR_UNUSABLE
    9846                                                         | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
    9847         if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
    9848         {
    9849             /* expand up */
    9850             uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
    9851             if (RT_LIKELY(   GCPtrLast32 > pSel->u32Limit
    9852                           && GCPtrLast32 > (uint32_t)GCPtrMem))
    9853                 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
    9854         }
    9855         else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
    9856         {
    9857             /* expand down */
    9858             uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
    9859             if (RT_LIKELY(   (uint32_t)GCPtrMem >  pSel->u32Limit
    9860                           && GCPtrLast32        <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
    9861                           && GCPtrLast32 > (uint32_t)GCPtrMem))
    9862                 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
    9863         }
    9864         else
    9865             iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
    9866         iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
    9867     }
    9868     iemRaiseGeneralProtectionFault0Jmp(pVCpu);
    9869 }
    9870 
    9871 
    9872 /**
    9873  * Fetches a data dword, longjmp on error, fallback/safe version.
    9874  *
    9875  * @returns The dword
    9876  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9877  * @param   iSegReg             The index of the segment register to use for
    9878  *                              this access.  The base and limits are checked.
    9879  * @param   GCPtrMem            The address of the guest memory.
    9880  */
    9881 IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9882 {
    9883     uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9884     uint32_t const  u32Ret  = *pu32Src;
    9885     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
    9886     return u32Ret;
    9887 }
    9888 
    9889 
    9890 /**
    9891  * Fetches a data dword, longjmp on error.
    9892  *
    9893  * @returns The dword
    9894  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9895  * @param   iSegReg             The index of the segment register to use for
    9896  *                              this access.  The base and limits are checked.
    9897  * @param   GCPtrMem            The address of the guest memory.
    9898  */
    9899 DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9900 {
    9901 # ifdef IEM_WITH_DATA_TLB
    9902     RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
    9903     if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
    9904     {
    9905         /// @todo more later.
    9906     }
    9907 
    9908     return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
    9909 # else
    9910     /* The lazy approach. */
    9911     uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9912     uint32_t const  u32Ret  = *pu32Src;
    9913     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
    9914     return u32Ret;
    9915 # endif
    9916 }
    9917 #endif
    9918 
    9919 
    9920 #ifdef SOME_UNUSED_FUNCTION
    9921 /**
    9922  * Fetches a data dword and sign extends it to a qword.
    9923  *
    9924  * @returns Strict VBox status code.
    9925  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9926  * @param   pu64Dst             Where to return the sign extended value.
    9927  * @param   iSegReg             The index of the segment register to use for
    9928  *                              this access.  The base and limits are checked.
    9929  * @param   GCPtrMem            The address of the guest memory.
    9930  */
    9931 IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9932 {
    9933     /* The lazy approach for now... */
    9934     int32_t const *pi32Src;
    9935     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9936     if (rc == VINF_SUCCESS)
    9937     {
    9938         *pu64Dst = *pi32Src;
    9939         rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
    9940     }
    9941 #ifdef __GNUC__ /* warning: GCC may be a royal pain */
    9942     else
    9943         *pu64Dst = 0;
    9944 #endif
    9945     return rc;
    9946 }
    9947 #endif
    9948 
    9949 
    9950 /**
    9951  * Fetches a data qword.
    9952  *
    9953  * @returns Strict VBox status code.
    9954  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9955  * @param   pu64Dst             Where to return the qword.
    9956  * @param   iSegReg             The index of the segment register to use for
    9957  *                              this access.  The base and limits are checked.
    9958  * @param   GCPtrMem            The address of the guest memory.
    9959  */
    9960 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9961 {
    9962     /* The lazy approach for now... */
    9963     uint64_t const *pu64Src;
    9964     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9965     if (rc == VINF_SUCCESS)
    9966     {
    9967         *pu64Dst = *pu64Src;
    9968         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
    9969     }
    9970     return rc;
    9971 }
    9972 
    9973 
    9974 #ifdef IEM_WITH_SETJMP
    9975 /**
    9976  * Fetches a data qword, longjmp on error.
    9977  *
    9978  * @returns The qword.
    9979  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    9980  * @param   iSegReg             The index of the segment register to use for
    9981  *                              this access.  The base and limits are checked.
    9982  * @param   GCPtrMem            The address of the guest memory.
    9983  */
    9984 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
    9985 {
    9986     /* The lazy approach for now... */
    9987     uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    9988     uint64_t const u64Ret = *pu64Src;
    9989     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
    9990     return u64Ret;
    9991 }
    9992 #endif
    9993 
    9994 
    9995 /**
    9996  * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
    9997  *
    9998  * @returns Strict VBox status code.
    9999  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10000  * @param   pu64Dst             Where to return the qword.
    10001  * @param   iSegReg             The index of the segment register to use for
    10002  *                              this access.  The base and limits are checked.
    10003  * @param   GCPtrMem            The address of the guest memory.
    10004  */
    10005 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10006 {
    10007     /* The lazy approach for now... */
    10008     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    10009     if (RT_UNLIKELY(GCPtrMem & 15))
    10010         return iemRaiseGeneralProtectionFault0(pVCpu);
    10011 
    10012     uint64_t const *pu64Src;
    10013     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10014     if (rc == VINF_SUCCESS)
    10015     {
    10016         *pu64Dst = *pu64Src;
    10017         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
    10018     }
    10019     return rc;
    10020 }
    10021 
    10022 
    10023 #ifdef IEM_WITH_SETJMP
    10024 /**
    10025  * Fetches a data qword, longjmp on error.
    10026  *
    10027  * @returns The qword.
    10028  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10029  * @param   iSegReg             The index of the segment register to use for
    10030  *                              this access.  The base and limits are checked.
    10031  * @param   GCPtrMem            The address of the guest memory.
    10032  */
    10033 DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10034 {
    10035     /* The lazy approach for now... */
    10036     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    10037     if (RT_LIKELY(!(GCPtrMem & 15)))
    10038     {
    10039         uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10040         uint64_t const u64Ret = *pu64Src;
    10041         iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
    10042         return u64Ret;
    10043     }
    10044 
    10045     VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
    10046     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
    10047 }
    10048 #endif
    10049 
    10050 
    10051 /**
    10052  * Fetches a data tword.
    10053  *
    10054  * @returns Strict VBox status code.
    10055  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10056  * @param   pr80Dst             Where to return the tword.
    10057  * @param   iSegReg             The index of the segment register to use for
    10058  *                              this access.  The base and limits are checked.
    10059  * @param   GCPtrMem            The address of the guest memory.
    10060  */
    10061 IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10062 {
    10063     /* The lazy approach for now... */
    10064     PCRTFLOAT80U pr80Src;
    10065     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10066     if (rc == VINF_SUCCESS)
    10067     {
    10068         *pr80Dst = *pr80Src;
    10069         rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
    10070     }
    10071     return rc;
    10072 }
    10073 
    10074 
    10075 #ifdef IEM_WITH_SETJMP
    10076 /**
    10077  * Fetches a data tword, longjmp on error.
    10078  *
    10079  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10080  * @param   pr80Dst             Where to return the tword.
    10081  * @param   iSegReg             The index of the segment register to use for
    10082  *                              this access.  The base and limits are checked.
    10083  * @param   GCPtrMem            The address of the guest memory.
    10084  */
    10085 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10086 {
    10087     /* The lazy approach for now... */
    10088     PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10089     *pr80Dst = *pr80Src;
    10090     iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
    10091 }
    10092 #endif
    10093 
    10094 
    10095 /**
    10096  * Fetches a data tword.
    10097  *
    10098  * @returns Strict VBox status code.
    10099  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10100  * @param   pd80Dst             Where to return the tword.
    10101  * @param   iSegReg             The index of the segment register to use for
    10102  *                              this access.  The base and limits are checked.
    10103  * @param   GCPtrMem            The address of the guest memory.
    10104  */
    10105 IEM_STATIC VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10106 {
    10107     /* The lazy approach for now... */
    10108     PCRTPBCD80U pd80Src;
    10109     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10110     if (rc == VINF_SUCCESS)
    10111     {
    10112         *pd80Dst = *pd80Src;
    10113         rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
    10114     }
    10115     return rc;
    10116 }
    10117 
    10118 
    10119 #ifdef IEM_WITH_SETJMP
    10120 /**
    10121  * Fetches a data tword, longjmp on error.
    10122  *
    10123  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10124  * @param   pd80Dst             Where to return the tword.
    10125  * @param   iSegReg             The index of the segment register to use for
    10126  *                              this access.  The base and limits are checked.
    10127  * @param   GCPtrMem            The address of the guest memory.
    10128  */
    10129 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10130 {
    10131     /* The lazy approach for now... */
    10132     PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10133     *pd80Dst = *pd80Src;
    10134     iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
    10135 }
    10136 #endif
    10137 
    10138 
    10139 /**
    10140  * Fetches a data dqword (double qword), generally SSE related.
    10141  *
    10142  * @returns Strict VBox status code.
    10143  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10144  * @param   pu128Dst            Where to return the qword.
    10145  * @param   iSegReg             The index of the segment register to use for
    10146  *                              this access.  The base and limits are checked.
    10147  * @param   GCPtrMem            The address of the guest memory.
    10148  */
    10149 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10150 {
    10151     /* The lazy approach for now... */
    10152     PCRTUINT128U pu128Src;
    10153     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10154     if (rc == VINF_SUCCESS)
    10155     {
    10156         pu128Dst->au64[0] = pu128Src->au64[0];
    10157         pu128Dst->au64[1] = pu128Src->au64[1];
    10158         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
    10159     }
    10160     return rc;
    10161 }
    10162 
    10163 
    10164 #ifdef IEM_WITH_SETJMP
    10165 /**
    10166  * Fetches a data dqword (double qword), generally SSE related.
    10167  *
    10168  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10169  * @param   pu128Dst            Where to return the qword.
    10170  * @param   iSegReg             The index of the segment register to use for
    10171  *                              this access.  The base and limits are checked.
    10172  * @param   GCPtrMem            The address of the guest memory.
    10173  */
    10174 IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10175 {
    10176     /* The lazy approach for now... */
    10177     PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10178     pu128Dst->au64[0] = pu128Src->au64[0];
    10179     pu128Dst->au64[1] = pu128Src->au64[1];
    10180     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
    10181 }
    10182 #endif
    10183 
    10184 
    10185 /**
    10186  * Fetches a data dqword (double qword) at an aligned address, generally SSE
    10187  * related.
    10188  *
    10189  * Raises \#GP(0) if not aligned.
    10190  *
    10191  * @returns Strict VBox status code.
    10192  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10193  * @param   pu128Dst            Where to return the qword.
    10194  * @param   iSegReg             The index of the segment register to use for
    10195  *                              this access.  The base and limits are checked.
    10196  * @param   GCPtrMem            The address of the guest memory.
    10197  */
    10198 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10199 {
    10200     /* The lazy approach for now... */
    10201     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    10202     if (   (GCPtrMem & 15)
    10203         && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    10204         return iemRaiseGeneralProtectionFault0(pVCpu);
    10205 
    10206     PCRTUINT128U pu128Src;
    10207     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10208     if (rc == VINF_SUCCESS)
    10209     {
    10210         pu128Dst->au64[0] = pu128Src->au64[0];
    10211         pu128Dst->au64[1] = pu128Src->au64[1];
    10212         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
    10213     }
    10214     return rc;
    10215 }
    10216 
    10217 
    10218 #ifdef IEM_WITH_SETJMP
    10219 /**
    10220  * Fetches a data dqword (double qword) at an aligned address, generally SSE
    10221  * related, longjmp on error.
    10222  *
    10223  * Raises \#GP(0) if not aligned.
    10224  *
    10225  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10226  * @param   pu128Dst            Where to return the qword.
    10227  * @param   iSegReg             The index of the segment register to use for
    10228  *                              this access.  The base and limits are checked.
    10229  * @param   GCPtrMem            The address of the guest memory.
    10230  */
    10231 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10232 {
    10233     /* The lazy approach for now... */
    10234     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
    10235     if (   (GCPtrMem & 15) == 0
    10236         || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    10237     {
    10238         PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10239         pu128Dst->au64[0] = pu128Src->au64[0];
    10240         pu128Dst->au64[1] = pu128Src->au64[1];
    10241         iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
    10242         return;
    10243     }
    10244 
    10245     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    10246     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    10247 }
    10248 #endif
    10249 
    10250 
    10251 /**
    10252  * Fetches a data oword (octo word), generally AVX related.
    10253  *
    10254  * @returns Strict VBox status code.
    10255  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10256  * @param   pu256Dst            Where to return the qword.
    10257  * @param   iSegReg             The index of the segment register to use for
    10258  *                              this access.  The base and limits are checked.
    10259  * @param   GCPtrMem            The address of the guest memory.
    10260  */
    10261 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10262 {
    10263     /* The lazy approach for now... */
    10264     PCRTUINT256U pu256Src;
    10265     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10266     if (rc == VINF_SUCCESS)
    10267     {
    10268         pu256Dst->au64[0] = pu256Src->au64[0];
    10269         pu256Dst->au64[1] = pu256Src->au64[1];
    10270         pu256Dst->au64[2] = pu256Src->au64[2];
    10271         pu256Dst->au64[3] = pu256Src->au64[3];
    10272         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
    10273     }
    10274     return rc;
    10275 }
    10276 
    10277 
    10278 #ifdef IEM_WITH_SETJMP
    10279 /**
    10280  * Fetches a data oword (octo word), generally AVX related.
    10281  *
    10282  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10283  * @param   pu256Dst            Where to return the qword.
    10284  * @param   iSegReg             The index of the segment register to use for
    10285  *                              this access.  The base and limits are checked.
    10286  * @param   GCPtrMem            The address of the guest memory.
    10287  */
    10288 IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10289 {
    10290     /* The lazy approach for now... */
    10291     PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10292     pu256Dst->au64[0] = pu256Src->au64[0];
    10293     pu256Dst->au64[1] = pu256Src->au64[1];
    10294     pu256Dst->au64[2] = pu256Src->au64[2];
    10295     pu256Dst->au64[3] = pu256Src->au64[3];
    10296     iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
    10297 }
    10298 #endif
    10299 
    10300 
    10301 /**
    10302  * Fetches a data oword (octo word) at an aligned address, generally AVX
    10303  * related.
    10304  *
    10305  * Raises \#GP(0) if not aligned.
    10306  *
    10307  * @returns Strict VBox status code.
    10308  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10309  * @param   pu256Dst            Where to return the qword.
    10310  * @param   iSegReg             The index of the segment register to use for
    10311  *                              this access.  The base and limits are checked.
    10312  * @param   GCPtrMem            The address of the guest memory.
    10313  */
    10314 IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10315 {
    10316     /* The lazy approach for now... */
    10317     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
    10318     if (GCPtrMem & 31)
    10319         return iemRaiseGeneralProtectionFault0(pVCpu);
    10320 
    10321     PCRTUINT256U pu256Src;
    10322     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10323     if (rc == VINF_SUCCESS)
    10324     {
    10325         pu256Dst->au64[0] = pu256Src->au64[0];
    10326         pu256Dst->au64[1] = pu256Src->au64[1];
    10327         pu256Dst->au64[2] = pu256Src->au64[2];
    10328         pu256Dst->au64[3] = pu256Src->au64[3];
    10329         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
    10330     }
    10331     return rc;
    10332 }
    10333 
    10334 
    10335 #ifdef IEM_WITH_SETJMP
    10336 /**
    10337  * Fetches a data oword (octo word) at an aligned address, generally AVX
    10338  * related, longjmp on error.
    10339  *
    10340  * Raises \#GP(0) if not aligned.
    10341  *
    10342  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10343  * @param   pu256Dst            Where to return the qword.
    10344  * @param   iSegReg             The index of the segment register to use for
    10345  *                              this access.  The base and limits are checked.
    10346  * @param   GCPtrMem            The address of the guest memory.
    10347  */
    10348 DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10349 {
    10350     /* The lazy approach for now... */
    10351     /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
    10352     if ((GCPtrMem & 31) == 0)
    10353     {
    10354         PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
    10355         pu256Dst->au64[0] = pu256Src->au64[0];
    10356         pu256Dst->au64[1] = pu256Src->au64[1];
    10357         pu256Dst->au64[2] = pu256Src->au64[2];
    10358         pu256Dst->au64[3] = pu256Src->au64[3];
    10359         iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
    10360         return;
    10361     }
    10362 
    10363     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    10364     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    10365 }
    10366 #endif
    10367 
    10368 
    10369 
    10370 /**
    10371  * Fetches a descriptor register (lgdt, lidt).
    10372  *
    10373  * @returns Strict VBox status code.
    10374  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10375  * @param   pcbLimit            Where to return the limit.
    10376  * @param   pGCPtrBase          Where to return the base.
    10377  * @param   iSegReg             The index of the segment register to use for
    10378  *                              this access.  The base and limits are checked.
    10379  * @param   GCPtrMem            The address of the guest memory.
    10380  * @param   enmOpSize           The effective operand size.
    10381  */
    10382 IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
    10383                                             RTGCPTR GCPtrMem, IEMMODE enmOpSize)
    10384 {
    10385     /*
    10386      * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
    10387      * little special:
    10388      *      - The two reads are done separately.
    10389      *      - Operand size override works in 16-bit and 32-bit code, but 64-bit.
    10390      *      - We suspect the 386 to actually commit the limit before the base in
    10391      *        some cases (search for 386 in  bs3CpuBasic2_lidt_lgdt_One).  We
    10392      *        don't try emulate this eccentric behavior, because it's not well
    10393      *        enough understood and rather hard to trigger.
    10394      *      - The 486 seems to do a dword limit read when the operand size is 32-bit.
    10395      */
    10396     VBOXSTRICTRC rcStrict;
    10397     if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    10398     {
    10399         rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
    10400         if (rcStrict == VINF_SUCCESS)
    10401             rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
    10402     }
    10403     else
    10404     {
    10405         uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
    10406         if (enmOpSize == IEMMODE_32BIT)
    10407         {
    10408             if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
    10409             {
    10410                 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
    10411                 if (rcStrict == VINF_SUCCESS)
    10412                     rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
    10413             }
    10414             else
    10415             {
    10416                 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
    10417                 if (rcStrict == VINF_SUCCESS)
    10418                 {
    10419                     *pcbLimit = (uint16_t)uTmp;
    10420                     rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
    10421                 }
    10422             }
    10423             if (rcStrict == VINF_SUCCESS)
    10424                 *pGCPtrBase = uTmp;
    10425         }
    10426         else
    10427         {
    10428             rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
    10429             if (rcStrict == VINF_SUCCESS)
    10430             {
    10431                 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
    10432                 if (rcStrict == VINF_SUCCESS)
    10433                     *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
    10434             }
    10435         }
    10436     }
    10437     return rcStrict;
    10438 }
    10439 
    10440 
    10441 
    10442 /**
    10443  * Stores a data byte.
    10444  *
    10445  * @returns Strict VBox status code.
    10446  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10447  * @param   iSegReg             The index of the segment register to use for
    10448  *                              this access.  The base and limits are checked.
    10449  * @param   GCPtrMem            The address of the guest memory.
    10450  * @param   u8Value             The value to store.
    10451  */
    10452 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
    10453 {
    10454     /* The lazy approach for now... */
    10455     uint8_t *pu8Dst;
    10456     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10457     if (rc == VINF_SUCCESS)
    10458     {
    10459         *pu8Dst = u8Value;
    10460         rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
    10461     }
    10462     return rc;
    10463 }
    10464 
    10465 
    10466 #ifdef IEM_WITH_SETJMP
    10467 /**
    10468  * Stores a data byte, longjmp on error.
    10469  *
    10470  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10471  * @param   iSegReg             The index of the segment register to use for
    10472  *                              this access.  The base and limits are checked.
    10473  * @param   GCPtrMem            The address of the guest memory.
    10474  * @param   u8Value             The value to store.
    10475  */
    10476 IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
    10477 {
    10478     /* The lazy approach for now... */
    10479     uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10480     *pu8Dst = u8Value;
    10481     iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
    10482 }
    10483 #endif
    10484 
    10485 
    10486 /**
    10487  * Stores a data word.
    10488  *
    10489  * @returns Strict VBox status code.
    10490  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10491  * @param   iSegReg             The index of the segment register to use for
    10492  *                              this access.  The base and limits are checked.
    10493  * @param   GCPtrMem            The address of the guest memory.
    10494  * @param   u16Value            The value to store.
    10495  */
    10496 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
    10497 {
    10498     /* The lazy approach for now... */
    10499     uint16_t *pu16Dst;
    10500     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10501     if (rc == VINF_SUCCESS)
    10502     {
    10503         *pu16Dst = u16Value;
    10504         rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
    10505     }
    10506     return rc;
    10507 }
    10508 
    10509 
    10510 #ifdef IEM_WITH_SETJMP
    10511 /**
    10512  * Stores a data word, longjmp on error.
    10513  *
    10514  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10515  * @param   iSegReg             The index of the segment register to use for
    10516  *                              this access.  The base and limits are checked.
    10517  * @param   GCPtrMem            The address of the guest memory.
    10518  * @param   u16Value            The value to store.
    10519  */
    10520 IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
    10521 {
    10522     /* The lazy approach for now... */
    10523     uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10524     *pu16Dst = u16Value;
    10525     iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
    10526 }
    10527 #endif
    10528 
    10529 
    10530 /**
    10531  * Stores a data dword.
    10532  *
    10533  * @returns Strict VBox status code.
    10534  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10535  * @param   iSegReg             The index of the segment register to use for
    10536  *                              this access.  The base and limits are checked.
    10537  * @param   GCPtrMem            The address of the guest memory.
    10538  * @param   u32Value            The value to store.
    10539  */
    10540 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
    10541 {
    10542     /* The lazy approach for now... */
    10543     uint32_t *pu32Dst;
    10544     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10545     if (rc == VINF_SUCCESS)
    10546     {
    10547         *pu32Dst = u32Value;
    10548         rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
    10549     }
    10550     return rc;
    10551 }
    10552 
    10553 
    10554 #ifdef IEM_WITH_SETJMP
    10555 /**
    10556  * Stores a data dword.
    10557  *
    10558  * @returns Strict VBox status code.
    10559  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10560  * @param   iSegReg             The index of the segment register to use for
    10561  *                              this access.  The base and limits are checked.
    10562  * @param   GCPtrMem            The address of the guest memory.
    10563  * @param   u32Value            The value to store.
    10564  */
    10565 IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
    10566 {
    10567     /* The lazy approach for now... */
    10568     uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10569     *pu32Dst = u32Value;
    10570     iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
    10571 }
    10572 #endif
    10573 
    10574 
    10575 /**
    10576  * Stores a data qword.
    10577  *
    10578  * @returns Strict VBox status code.
    10579  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10580  * @param   iSegReg             The index of the segment register to use for
    10581  *                              this access.  The base and limits are checked.
    10582  * @param   GCPtrMem            The address of the guest memory.
    10583  * @param   u64Value            The value to store.
    10584  */
    10585 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
    10586 {
    10587     /* The lazy approach for now... */
    10588     uint64_t *pu64Dst;
    10589     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10590     if (rc == VINF_SUCCESS)
    10591     {
    10592         *pu64Dst = u64Value;
    10593         rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
    10594     }
    10595     return rc;
    10596 }
    10597 
    10598 
    10599 #ifdef IEM_WITH_SETJMP
    10600 /**
    10601  * Stores a data qword, longjmp on error.
    10602  *
    10603  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10604  * @param   iSegReg             The index of the segment register to use for
    10605  *                              this access.  The base and limits are checked.
    10606  * @param   GCPtrMem            The address of the guest memory.
    10607  * @param   u64Value            The value to store.
    10608  */
    10609 IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
    10610 {
    10611     /* The lazy approach for now... */
    10612     uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10613     *pu64Dst = u64Value;
    10614     iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
    10615 }
    10616 #endif
    10617 
    10618 
    10619 /**
    10620  * Stores a data dqword.
    10621  *
    10622  * @returns Strict VBox status code.
    10623  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10624  * @param   iSegReg             The index of the segment register to use for
    10625  *                              this access.  The base and limits are checked.
    10626  * @param   GCPtrMem            The address of the guest memory.
    10627  * @param   u128Value            The value to store.
    10628  */
    10629 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
    10630 {
    10631     /* The lazy approach for now... */
    10632     PRTUINT128U pu128Dst;
    10633     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10634     if (rc == VINF_SUCCESS)
    10635     {
    10636         pu128Dst->au64[0] = u128Value.au64[0];
    10637         pu128Dst->au64[1] = u128Value.au64[1];
    10638         rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
    10639     }
    10640     return rc;
    10641 }
    10642 
    10643 
    10644 #ifdef IEM_WITH_SETJMP
    10645 /**
    10646  * Stores a data dqword, longjmp on error.
    10647  *
    10648  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10649  * @param   iSegReg             The index of the segment register to use for
    10650  *                              this access.  The base and limits are checked.
    10651  * @param   GCPtrMem            The address of the guest memory.
    10652  * @param   u128Value            The value to store.
    10653  */
    10654 IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
    10655 {
    10656     /* The lazy approach for now... */
    10657     PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10658     pu128Dst->au64[0] = u128Value.au64[0];
    10659     pu128Dst->au64[1] = u128Value.au64[1];
    10660     iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
    10661 }
    10662 #endif
    10663 
    10664 
    10665 /**
    10666  * Stores a data dqword, SSE aligned.
    10667  *
    10668  * @returns Strict VBox status code.
    10669  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10670  * @param   iSegReg             The index of the segment register to use for
    10671  *                              this access.  The base and limits are checked.
    10672  * @param   GCPtrMem            The address of the guest memory.
    10673  * @param   u128Value           The value to store.
    10674  */
    10675 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
    10676 {
    10677     /* The lazy approach for now... */
    10678     if (   (GCPtrMem & 15)
    10679         && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    10680         return iemRaiseGeneralProtectionFault0(pVCpu);
    10681 
    10682     PRTUINT128U pu128Dst;
    10683     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10684     if (rc == VINF_SUCCESS)
    10685     {
    10686         pu128Dst->au64[0] = u128Value.au64[0];
    10687         pu128Dst->au64[1] = u128Value.au64[1];
    10688         rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
    10689     }
    10690     return rc;
    10691 }
    10692 
    10693 
    10694 #ifdef IEM_WITH_SETJMP
    10695 /**
    10696  * Stores a data dqword, SSE aligned.
    10697  *
    10698  * @returns Strict VBox status code.
    10699  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10700  * @param   iSegReg             The index of the segment register to use for
    10701  *                              this access.  The base and limits are checked.
    10702  * @param   GCPtrMem            The address of the guest memory.
    10703  * @param   u128Value           The value to store.
    10704  */
    10705 DECL_NO_INLINE(IEM_STATIC, void)
    10706 iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
    10707 {
    10708     /* The lazy approach for now... */
    10709     if (   (GCPtrMem & 15) == 0
    10710         || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
    10711     {
    10712         PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10713         pu128Dst->au64[0] = u128Value.au64[0];
    10714         pu128Dst->au64[1] = u128Value.au64[1];
    10715         iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
    10716         return;
    10717     }
    10718 
    10719     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    10720     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    10721 }
    10722 #endif
    10723 
    10724 
    10725 /**
    10726  * Stores a data dqword.
    10727  *
    10728  * @returns Strict VBox status code.
    10729  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10730  * @param   iSegReg             The index of the segment register to use for
    10731  *                              this access.  The base and limits are checked.
    10732  * @param   GCPtrMem            The address of the guest memory.
    10733  * @param   pu256Value          Pointer to the value to store.
    10734  */
    10735 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
    10736 {
    10737     /* The lazy approach for now... */
    10738     PRTUINT256U pu256Dst;
    10739     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10740     if (rc == VINF_SUCCESS)
    10741     {
    10742         pu256Dst->au64[0] = pu256Value->au64[0];
    10743         pu256Dst->au64[1] = pu256Value->au64[1];
    10744         pu256Dst->au64[2] = pu256Value->au64[2];
    10745         pu256Dst->au64[3] = pu256Value->au64[3];
    10746         rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
    10747     }
    10748     return rc;
    10749 }
    10750 
    10751 
    10752 #ifdef IEM_WITH_SETJMP
    10753 /**
    10754  * Stores a data dqword, longjmp on error.
    10755  *
    10756  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10757  * @param   iSegReg             The index of the segment register to use for
    10758  *                              this access.  The base and limits are checked.
    10759  * @param   GCPtrMem            The address of the guest memory.
    10760  * @param   pu256Value          Pointer to the value to store.
    10761  */
    10762 IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
    10763 {
    10764     /* The lazy approach for now... */
    10765     PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10766     pu256Dst->au64[0] = pu256Value->au64[0];
    10767     pu256Dst->au64[1] = pu256Value->au64[1];
    10768     pu256Dst->au64[2] = pu256Value->au64[2];
    10769     pu256Dst->au64[3] = pu256Value->au64[3];
    10770     iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
    10771 }
    10772 #endif
    10773 
    10774 
    10775 /**
    10776  * Stores a data dqword, AVX aligned.
    10777  *
    10778  * @returns Strict VBox status code.
    10779  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10780  * @param   iSegReg             The index of the segment register to use for
    10781  *                              this access.  The base and limits are checked.
    10782  * @param   GCPtrMem            The address of the guest memory.
    10783  * @param   pu256Value          Pointer to the value to store.
    10784  */
    10785 IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
    10786 {
    10787     /* The lazy approach for now... */
    10788     if (GCPtrMem & 31)
    10789         return iemRaiseGeneralProtectionFault0(pVCpu);
    10790 
    10791     PRTUINT256U pu256Dst;
    10792     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10793     if (rc == VINF_SUCCESS)
    10794     {
    10795         pu256Dst->au64[0] = pu256Value->au64[0];
    10796         pu256Dst->au64[1] = pu256Value->au64[1];
    10797         pu256Dst->au64[2] = pu256Value->au64[2];
    10798         pu256Dst->au64[3] = pu256Value->au64[3];
    10799         rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
    10800     }
    10801     return rc;
    10802 }
    10803 
    10804 
    10805 #ifdef IEM_WITH_SETJMP
    10806 /**
    10807  * Stores a data dqword, AVX aligned.
    10808  *
    10809  * @returns Strict VBox status code.
    10810  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10811  * @param   iSegReg             The index of the segment register to use for
    10812  *                              this access.  The base and limits are checked.
    10813  * @param   GCPtrMem            The address of the guest memory.
    10814  * @param   pu256Value          Pointer to the value to store.
    10815  */
    10816 DECL_NO_INLINE(IEM_STATIC, void)
    10817 iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
    10818 {
    10819     /* The lazy approach for now... */
    10820     if ((GCPtrMem & 31) == 0)
    10821     {
    10822         PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
    10823         pu256Dst->au64[0] = pu256Value->au64[0];
    10824         pu256Dst->au64[1] = pu256Value->au64[1];
    10825         pu256Dst->au64[2] = pu256Value->au64[2];
    10826         pu256Dst->au64[3] = pu256Value->au64[3];
    10827         iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
    10828         return;
    10829     }
    10830 
    10831     VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
    10832     longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
    10833 }
    10834 #endif
    10835 
    10836 
    10837 /**
    10838  * Stores a descriptor register (sgdt, sidt).
    10839  *
    10840  * @returns Strict VBox status code.
    10841  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10842  * @param   cbLimit             The limit.
    10843  * @param   GCPtrBase           The base address.
    10844  * @param   iSegReg             The index of the segment register to use for
    10845  *                              this access.  The base and limits are checked.
    10846  * @param   GCPtrMem            The address of the guest memory.
    10847  */
    10848 IEM_STATIC VBOXSTRICTRC
    10849 iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
    10850 {
    10851     /*
    10852      * The SIDT and SGDT instructions actually stores the data using two
    10853      * independent writes.  The instructions does not respond to opsize prefixes.
    10854      */
    10855     VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
    10856     if (rcStrict == VINF_SUCCESS)
    10857     {
    10858         if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
    10859             rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
    10860                                           IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
    10861                                           ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
    10862         else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
    10863             rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
    10864         else
    10865             rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
    10866     }
    10867     return rcStrict;
    10868 }
    10869 
    10870 
    10871 /**
    10872  * Pushes a word onto the stack.
    10873  *
    10874  * @returns Strict VBox status code.
    10875  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10876  * @param   u16Value            The value to push.
    10877  */
    10878 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
    10879 {
    10880     /* Increment the stack pointer. */
    10881     uint64_t    uNewRsp;
    10882     RTGCPTR     GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
    10883 
    10884     /* Write the word the lazy way. */
    10885     uint16_t *pu16Dst;
    10886     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
    10887     if (rc == VINF_SUCCESS)
    10888     {
    10889         *pu16Dst = u16Value;
    10890         rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
    10891     }
    10892 
    10893     /* Commit the new RSP value unless we an access handler made trouble. */
    10894     if (rc == VINF_SUCCESS)
    10895         pVCpu->cpum.GstCtx.rsp = uNewRsp;
    10896 
    10897     return rc;
    10898 }
    10899 
    10900 
    10901 /**
    10902  * Pushes a dword onto the stack.
    10903  *
    10904  * @returns Strict VBox status code.
    10905  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10906  * @param   u32Value            The value to push.
    10907  */
    10908 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
    10909 {
    10910     /* Increment the stack pointer. */
    10911     uint64_t    uNewRsp;
    10912     RTGCPTR     GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
    10913 
    10914     /* Write the dword the lazy way. */
    10915     uint32_t *pu32Dst;
    10916     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
    10917     if (rc == VINF_SUCCESS)
    10918     {
    10919         *pu32Dst = u32Value;
    10920         rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
    10921     }
    10922 
    10923     /* Commit the new RSP value unless we an access handler made trouble. */
    10924     if (rc == VINF_SUCCESS)
    10925         pVCpu->cpum.GstCtx.rsp = uNewRsp;
    10926 
    10927     return rc;
    10928 }
    10929 
    10930 
    10931 /**
    10932  * Pushes a dword segment register value onto the stack.
    10933  *
    10934  * @returns Strict VBox status code.
    10935  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10936  * @param   u32Value            The value to push.
    10937  */
    10938 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
    10939 {
    10940     /* Increment the stack pointer. */
    10941     uint64_t    uNewRsp;
    10942     RTGCPTR     GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
    10943 
    10944     /* The intel docs talks about zero extending the selector register
    10945        value.  My actual intel CPU here might be zero extending the value
    10946        but it still only writes the lower word... */
    10947     /** @todo Test this on new HW and on AMD and in 64-bit mode.  Also test what
    10948      * happens when crossing an electric page boundrary, is the high word checked
    10949      * for write accessibility or not? Probably it is.  What about segment limits?
    10950      * It appears this behavior is also shared with trap error codes.
    10951      *
    10952      * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
    10953      * ancient hardware when it actually did change. */
    10954     uint16_t *pu16Dst;
    10955     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
    10956     if (rc == VINF_SUCCESS)
    10957     {
    10958         *pu16Dst = (uint16_t)u32Value;
    10959         rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
    10960     }
    10961 
    10962     /* Commit the new RSP value unless we an access handler made trouble. */
    10963     if (rc == VINF_SUCCESS)
    10964         pVCpu->cpum.GstCtx.rsp = uNewRsp;
    10965 
    10966     return rc;
    10967 }
    10968 
    10969 
    10970 /**
    10971  * Pushes a qword onto the stack.
    10972  *
    10973  * @returns Strict VBox status code.
    10974  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    10975  * @param   u64Value            The value to push.
    10976  */
    10977 IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
    10978 {
    10979     /* Increment the stack pointer. */
    10980     uint64_t    uNewRsp;
    10981     RTGCPTR     GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
    10982 
    10983     /* Write the word the lazy way. */
    10984     uint64_t *pu64Dst;
    10985     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
    10986     if (rc == VINF_SUCCESS)
    10987     {
    10988         *pu64Dst = u64Value;
    10989         rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
    10990     }
    10991 
    10992     /* Commit the new RSP value unless we an access handler made trouble. */
    10993     if (rc == VINF_SUCCESS)
    10994         pVCpu->cpum.GstCtx.rsp = uNewRsp;
    10995 
    10996     return rc;
    10997 }
    10998 
    10999 
    11000 /**
    11001  * Pops a word from the stack.
    11002  *
    11003  * @returns Strict VBox status code.
    11004  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11005  * @param   pu16Value           Where to store the popped value.
    11006  */
    11007 IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
    11008 {
    11009     /* Increment the stack pointer. */
    11010     uint64_t    uNewRsp;
    11011     RTGCPTR     GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
    11012 
    11013     /* Write the word the lazy way. */
    11014     uint16_t const *pu16Src;
    11015     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11016     if (rc == VINF_SUCCESS)
    11017     {
    11018         *pu16Value = *pu16Src;
    11019         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
    11020 
    11021         /* Commit the new RSP value. */
    11022         if (rc == VINF_SUCCESS)
    11023             pVCpu->cpum.GstCtx.rsp = uNewRsp;
    11024     }
    11025 
    11026     return rc;
    11027 }
    11028 
    11029 
    11030 /**
    11031  * Pops a dword from the stack.
    11032  *
    11033  * @returns Strict VBox status code.
    11034  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11035  * @param   pu32Value           Where to store the popped value.
    11036  */
    11037 IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
    11038 {
    11039     /* Increment the stack pointer. */
    11040     uint64_t    uNewRsp;
    11041     RTGCPTR     GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
    11042 
    11043     /* Write the word the lazy way. */
    11044     uint32_t const *pu32Src;
    11045     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11046     if (rc == VINF_SUCCESS)
    11047     {
    11048         *pu32Value = *pu32Src;
    11049         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
    11050 
    11051         /* Commit the new RSP value. */
    11052         if (rc == VINF_SUCCESS)
    11053             pVCpu->cpum.GstCtx.rsp = uNewRsp;
    11054     }
    11055 
    11056     return rc;
    11057 }
    11058 
    11059 
    11060 /**
    11061  * Pops a qword from the stack.
    11062  *
    11063  * @returns Strict VBox status code.
    11064  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11065  * @param   pu64Value           Where to store the popped value.
    11066  */
    11067 IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
    11068 {
    11069     /* Increment the stack pointer. */
    11070     uint64_t    uNewRsp;
    11071     RTGCPTR     GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
    11072 
    11073     /* Write the word the lazy way. */
    11074     uint64_t const *pu64Src;
    11075     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11076     if (rc == VINF_SUCCESS)
    11077     {
    11078         *pu64Value = *pu64Src;
    11079         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
    11080 
    11081         /* Commit the new RSP value. */
    11082         if (rc == VINF_SUCCESS)
    11083             pVCpu->cpum.GstCtx.rsp = uNewRsp;
    11084     }
    11085 
    11086     return rc;
    11087 }
    11088 
    11089 
    11090 /**
    11091  * Pushes a word onto the stack, using a temporary stack pointer.
    11092  *
    11093  * @returns Strict VBox status code.
    11094  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11095  * @param   u16Value            The value to push.
    11096  * @param   pTmpRsp             Pointer to the temporary stack pointer.
    11097  */
    11098 IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
    11099 {
    11100     /* Increment the stack pointer. */
    11101     RTUINT64U   NewRsp = *pTmpRsp;
    11102     RTGCPTR     GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
    11103 
    11104     /* Write the word the lazy way. */
    11105     uint16_t *pu16Dst;
    11106     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
    11107     if (rc == VINF_SUCCESS)
    11108     {
    11109         *pu16Dst = u16Value;
    11110         rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
    11111     }
    11112 
    11113     /* Commit the new RSP value unless we an access handler made trouble. */
    11114     if (rc == VINF_SUCCESS)
    11115         *pTmpRsp = NewRsp;
    11116 
    11117     return rc;
    11118 }
    11119 
    11120 
    11121 /**
    11122  * Pushes a dword onto the stack, using a temporary stack pointer.
    11123  *
    11124  * @returns Strict VBox status code.
    11125  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11126  * @param   u32Value            The value to push.
    11127  * @param   pTmpRsp             Pointer to the temporary stack pointer.
    11128  */
    11129 IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
    11130 {
    11131     /* Increment the stack pointer. */
    11132     RTUINT64U   NewRsp = *pTmpRsp;
    11133     RTGCPTR     GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
    11134 
    11135     /* Write the word the lazy way. */
    11136     uint32_t *pu32Dst;
    11137     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
    11138     if (rc == VINF_SUCCESS)
    11139     {
    11140         *pu32Dst = u32Value;
    11141         rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
    11142     }
    11143 
    11144     /* Commit the new RSP value unless we an access handler made trouble. */
    11145     if (rc == VINF_SUCCESS)
    11146         *pTmpRsp = NewRsp;
    11147 
    11148     return rc;
    11149 }
    11150 
    11151 
    11152 /**
    11153  * Pushes a dword onto the stack, using a temporary stack pointer.
    11154  *
    11155  * @returns Strict VBox status code.
    11156  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11157  * @param   u64Value            The value to push.
    11158  * @param   pTmpRsp             Pointer to the temporary stack pointer.
    11159  */
    11160 IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
    11161 {
    11162     /* Increment the stack pointer. */
    11163     RTUINT64U   NewRsp = *pTmpRsp;
    11164     RTGCPTR     GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
    11165 
    11166     /* Write the word the lazy way. */
    11167     uint64_t *pu64Dst;
    11168     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
    11169     if (rc == VINF_SUCCESS)
    11170     {
    11171         *pu64Dst = u64Value;
    11172         rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
    11173     }
    11174 
    11175     /* Commit the new RSP value unless we an access handler made trouble. */
    11176     if (rc == VINF_SUCCESS)
    11177         *pTmpRsp = NewRsp;
    11178 
    11179     return rc;
    11180 }
    11181 
    11182 
    11183 /**
    11184  * Pops a word from the stack, using a temporary stack pointer.
    11185  *
    11186  * @returns Strict VBox status code.
    11187  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11188  * @param   pu16Value           Where to store the popped value.
    11189  * @param   pTmpRsp             Pointer to the temporary stack pointer.
    11190  */
    11191 IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
    11192 {
    11193     /* Increment the stack pointer. */
    11194     RTUINT64U   NewRsp = *pTmpRsp;
    11195     RTGCPTR     GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
    11196 
    11197     /* Write the word the lazy way. */
    11198     uint16_t const *pu16Src;
    11199     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11200     if (rc == VINF_SUCCESS)
    11201     {
    11202         *pu16Value = *pu16Src;
    11203         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
    11204 
    11205         /* Commit the new RSP value. */
    11206         if (rc == VINF_SUCCESS)
    11207             *pTmpRsp = NewRsp;
    11208     }
    11209 
    11210     return rc;
    11211 }
    11212 
    11213 
    11214 /**
    11215  * Pops a dword from the stack, using a temporary stack pointer.
    11216  *
    11217  * @returns Strict VBox status code.
    11218  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11219  * @param   pu32Value           Where to store the popped value.
    11220  * @param   pTmpRsp             Pointer to the temporary stack pointer.
    11221  */
    11222 IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
    11223 {
    11224     /* Increment the stack pointer. */
    11225     RTUINT64U   NewRsp = *pTmpRsp;
    11226     RTGCPTR     GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
    11227 
    11228     /* Write the word the lazy way. */
    11229     uint32_t const *pu32Src;
    11230     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11231     if (rc == VINF_SUCCESS)
    11232     {
    11233         *pu32Value = *pu32Src;
    11234         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
    11235 
    11236         /* Commit the new RSP value. */
    11237         if (rc == VINF_SUCCESS)
    11238             *pTmpRsp = NewRsp;
    11239     }
    11240 
    11241     return rc;
    11242 }
    11243 
    11244 
    11245 /**
    11246  * Pops a qword from the stack, using a temporary stack pointer.
    11247  *
    11248  * @returns Strict VBox status code.
    11249  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11250  * @param   pu64Value           Where to store the popped value.
    11251  * @param   pTmpRsp             Pointer to the temporary stack pointer.
    11252  */
    11253 IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
    11254 {
    11255     /* Increment the stack pointer. */
    11256     RTUINT64U   NewRsp = *pTmpRsp;
    11257     RTGCPTR     GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
    11258 
    11259     /* Write the word the lazy way. */
    11260     uint64_t const *pu64Src;
    11261     VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11262     if (rcStrict == VINF_SUCCESS)
    11263     {
    11264         *pu64Value = *pu64Src;
    11265         rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
    11266 
    11267         /* Commit the new RSP value. */
    11268         if (rcStrict == VINF_SUCCESS)
    11269             *pTmpRsp = NewRsp;
    11270     }
    11271 
    11272     return rcStrict;
    11273 }
    11274 
    11275 
    11276 /**
    11277  * Begin a special stack push (used by interrupt, exceptions and such).
    11278  *
    11279  * This will raise \#SS or \#PF if appropriate.
    11280  *
    11281  * @returns Strict VBox status code.
    11282  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11283  * @param   cbMem               The number of bytes to push onto the stack.
    11284  * @param   ppvMem              Where to return the pointer to the stack memory.
    11285  *                              As with the other memory functions this could be
    11286  *                              direct access or bounce buffered access, so
    11287  *                              don't commit register until the commit call
    11288  *                              succeeds.
    11289  * @param   puNewRsp            Where to return the new RSP value.  This must be
    11290  *                              passed unchanged to
    11291  *                              iemMemStackPushCommitSpecial().
    11292  */
    11293 IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
    11294 {
    11295     Assert(cbMem < UINT8_MAX);
    11296     RTGCPTR     GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
    11297     return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
    11298 }
    11299 
    11300 
    11301 /**
    11302  * Commits a special stack push (started by iemMemStackPushBeginSpecial).
    11303  *
    11304  * This will update the rSP.
    11305  *
    11306  * @returns Strict VBox status code.
    11307  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11308  * @param   pvMem               The pointer returned by
    11309  *                              iemMemStackPushBeginSpecial().
    11310  * @param   uNewRsp             The new RSP value returned by
    11311  *                              iemMemStackPushBeginSpecial().
    11312  */
    11313 IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
    11314 {
    11315     VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
    11316     if (rcStrict == VINF_SUCCESS)
    11317         pVCpu->cpum.GstCtx.rsp = uNewRsp;
    11318     return rcStrict;
    11319 }
    11320 
    11321 
    11322 /**
    11323  * Begin a special stack pop (used by iret, retf and such).
    11324  *
    11325  * This will raise \#SS or \#PF if appropriate.
    11326  *
    11327  * @returns Strict VBox status code.
    11328  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11329  * @param   cbMem               The number of bytes to pop from the stack.
    11330  * @param   ppvMem              Where to return the pointer to the stack memory.
    11331  * @param   puNewRsp            Where to return the new RSP value.  This must be
    11332  *                              assigned to CPUMCTX::rsp manually some time
    11333  *                              after iemMemStackPopDoneSpecial() has been
    11334  *                              called.
    11335  */
    11336 IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
    11337 {
    11338     Assert(cbMem < UINT8_MAX);
    11339     RTGCPTR     GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
    11340     return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11341 }
    11342 
    11343 
    11344 /**
    11345  * Continue a special stack pop (used by iret and retf).
    11346  *
    11347  * This will raise \#SS or \#PF if appropriate.
    11348  *
    11349  * @returns Strict VBox status code.
    11350  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11351  * @param   cbMem               The number of bytes to pop from the stack.
    11352  * @param   ppvMem              Where to return the pointer to the stack memory.
    11353  * @param   puNewRsp            Where to return the new RSP value.  This must be
    11354  *                              assigned to CPUMCTX::rsp manually some time
    11355  *                              after iemMemStackPopDoneSpecial() has been
    11356  *                              called.
    11357  */
    11358 IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
    11359 {
    11360     Assert(cbMem < UINT8_MAX);
    11361     RTUINT64U   NewRsp;
    11362     NewRsp.u = *puNewRsp;
    11363     RTGCPTR     GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
    11364     *puNewRsp = NewRsp.u;
    11365     return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
    11366 }
    11367 
    11368 
    11369 /**
    11370  * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
    11371  * iemMemStackPopContinueSpecial).
    11372  *
    11373  * The caller will manually commit the rSP.
    11374  *
    11375  * @returns Strict VBox status code.
    11376  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11377  * @param   pvMem               The pointer returned by
    11378  *                              iemMemStackPopBeginSpecial() or
    11379  *                              iemMemStackPopContinueSpecial().
    11380  */
    11381 IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
    11382 {
    11383     return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
    11384 }
    11385 
    11386 
    11387 /**
    11388  * Fetches a system table byte.
    11389  *
    11390  * @returns Strict VBox status code.
    11391  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11392  * @param   pbDst               Where to return the byte.
    11393  * @param   iSegReg             The index of the segment register to use for
    11394  *                              this access.  The base and limits are checked.
    11395  * @param   GCPtrMem            The address of the guest memory.
    11396  */
    11397 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    11398 {
    11399     /* The lazy approach for now... */
    11400     uint8_t const *pbSrc;
    11401     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
    11402     if (rc == VINF_SUCCESS)
    11403     {
    11404         *pbDst = *pbSrc;
    11405         rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
    11406     }
    11407     return rc;
    11408 }
    11409 
    11410 
    11411 /**
    11412  * Fetches a system table word.
    11413  *
    11414  * @returns Strict VBox status code.
    11415  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11416  * @param   pu16Dst             Where to return the word.
    11417  * @param   iSegReg             The index of the segment register to use for
    11418  *                              this access.  The base and limits are checked.
    11419  * @param   GCPtrMem            The address of the guest memory.
    11420  */
    11421 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    11422 {
    11423     /* The lazy approach for now... */
    11424     uint16_t const *pu16Src;
    11425     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
    11426     if (rc == VINF_SUCCESS)
    11427     {
    11428         *pu16Dst = *pu16Src;
    11429         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
    11430     }
    11431     return rc;
    11432 }
    11433 
    11434 
    11435 /**
    11436  * Fetches a system table dword.
    11437  *
    11438  * @returns Strict VBox status code.
    11439  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11440  * @param   pu32Dst             Where to return the dword.
    11441  * @param   iSegReg             The index of the segment register to use for
    11442  *                              this access.  The base and limits are checked.
    11443  * @param   GCPtrMem            The address of the guest memory.
    11444  */
    11445 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    11446 {
    11447     /* The lazy approach for now... */
    11448     uint32_t const *pu32Src;
    11449     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
    11450     if (rc == VINF_SUCCESS)
    11451     {
    11452         *pu32Dst = *pu32Src;
    11453         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
    11454     }
    11455     return rc;
    11456 }
    11457 
    11458 
    11459 /**
    11460  * Fetches a system table qword.
    11461  *
    11462  * @returns Strict VBox status code.
    11463  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11464  * @param   pu64Dst             Where to return the qword.
    11465  * @param   iSegReg             The index of the segment register to use for
    11466  *                              this access.  The base and limits are checked.
    11467  * @param   GCPtrMem            The address of the guest memory.
    11468  */
    11469 IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
    11470 {
    11471     /* The lazy approach for now... */
    11472     uint64_t const *pu64Src;
    11473     VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
    11474     if (rc == VINF_SUCCESS)
    11475     {
    11476         *pu64Dst = *pu64Src;
    11477         rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
    11478     }
    11479     return rc;
    11480 }
    11481 
    11482 
    11483 /**
    11484  * Fetches a descriptor table entry with caller specified error code.
    11485  *
    11486  * @returns Strict VBox status code.
    11487  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11488  * @param   pDesc               Where to return the descriptor table entry.
    11489  * @param   uSel                The selector which table entry to fetch.
    11490  * @param   uXcpt               The exception to raise on table lookup error.
    11491  * @param   uErrorCode          The error code associated with the exception.
    11492  */
    11493 IEM_STATIC VBOXSTRICTRC
    11494 iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
    11495 {
    11496     AssertPtr(pDesc);
    11497     IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
    11498 
    11499     /** @todo did the 286 require all 8 bytes to be accessible? */
    11500     /*
    11501      * Get the selector table base and check bounds.
    11502      */
    11503     RTGCPTR GCPtrBase;
    11504     if (uSel & X86_SEL_LDT)
    11505     {
    11506         if (   !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
    11507             || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
    11508         {
    11509             Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
    11510                  uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
    11511             return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    11512                                      uErrorCode, 0);
    11513         }
    11514 
    11515         Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
    11516         GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
    11517     }
    11518     else
    11519     {
    11520         if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
    11521         {
    11522             Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
    11523             return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
    11524                                      uErrorCode, 0);
    11525         }
    11526         GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
    11527     }
    11528 
    11529     /*
    11530      * Read the legacy descriptor and maybe the long mode extensions if
    11531      * required.
    11532      */
    11533     VBOXSTRICTRC rcStrict;
    11534     if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
    11535         rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
    11536     else
    11537     {
    11538         rcStrict     = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
    11539         if (rcStrict == VINF_SUCCESS)
    11540             rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
    11541         if (rcStrict == VINF_SUCCESS)
    11542             rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
    11543         if (rcStrict == VINF_SUCCESS)
    11544             pDesc->Legacy.au16[3] = 0;
    11545         else
    11546             return rcStrict;
    11547     }
    11548 
    11549     if (rcStrict == VINF_SUCCESS)
    11550     {
    11551         if (   !IEM_IS_LONG_MODE(pVCpu)
    11552             || pDesc->Legacy.Gen.u1DescType)
    11553             pDesc->Long.au64[1] = 0;
    11554         else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
    11555             rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
    11556         else
    11557         {
    11558             Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
    11559             /** @todo is this the right exception? */
    11560             return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
    11561         }
    11562     }
    11563     return rcStrict;
    11564 }
    11565 
    11566 
    11567 /**
    11568  * Fetches a descriptor table entry.
    11569  *
    11570  * @returns Strict VBox status code.
    11571  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11572  * @param   pDesc               Where to return the descriptor table entry.
    11573  * @param   uSel                The selector which table entry to fetch.
    11574  * @param   uXcpt               The exception to raise on table lookup error.
    11575  */
    11576 IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
    11577 {
    11578     return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
    11579 }
    11580 
    11581 
    11582 /**
    11583  * Fakes a long mode stack selector for SS = 0.
    11584  *
    11585  * @param   pDescSs             Where to return the fake stack descriptor.
    11586  * @param   uDpl                The DPL we want.
    11587  */
    11588 IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
    11589 {
    11590     pDescSs->Long.au64[0] = 0;
    11591     pDescSs->Long.au64[1] = 0;
    11592     pDescSs->Long.Gen.u4Type     = X86_SEL_TYPE_RW_ACC;
    11593     pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
    11594     pDescSs->Long.Gen.u2Dpl      = uDpl;
    11595     pDescSs->Long.Gen.u1Present  = 1;
    11596     pDescSs->Long.Gen.u1Long     = 1;
    11597 }
    11598 
    11599 
    11600 /**
    11601  * Marks the selector descriptor as accessed (only non-system descriptors).
    11602  *
    11603  * This function ASSUMES that iemMemFetchSelDesc has be called previously and
    11604  * will therefore skip the limit checks.
    11605  *
    11606  * @returns Strict VBox status code.
    11607  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    11608  * @param   uSel                The selector.
    11609  */
    11610 IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
    11611 {
    11612     /*
    11613      * Get the selector table base and calculate the entry address.
    11614      */
    11615     RTGCPTR GCPtr = uSel & X86_SEL_LDT
    11616                   ? pVCpu->cpum.GstCtx.ldtr.u64Base
    11617                   : pVCpu->cpum.GstCtx.gdtr.pGdt;
    11618     GCPtr += uSel & X86_SEL_MASK;
    11619 
    11620     /*
    11621      * ASMAtomicBitSet will assert if the address is misaligned, so do some
    11622      * ugly stuff to avoid this.  This will make sure it's an atomic access
    11623      * as well more or less remove any question about 8-bit or 32-bit accesss.
    11624      */
    11625     VBOXSTRICTRC        rcStrict;
    11626     uint32_t volatile  *pu32;
    11627     if ((GCPtr & 3) == 0)
    11628     {
    11629         /* The normal case, map the 32-bit bits around the accessed bit (40). */
    11630         GCPtr += 2 + 2;
    11631         rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
    11632         if (rcStrict != VINF_SUCCESS)
    11633             return rcStrict;
    11634         ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
    11635     }
    11636     else
    11637     {
    11638         /* The misaligned GDT/LDT case, map the whole thing. */
    11639         rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
    11640         if (rcStrict != VINF_SUCCESS)
    11641             return rcStrict;
    11642         switch ((uintptr_t)pu32 & 3)
    11643         {
    11644             case 0: ASMAtomicBitSet(pu32,                         40 + 0 -  0); break;
    11645             case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
    11646             case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
    11647             case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 -  8); break;
    11648         }
    11649     }
    11650 
    11651     return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
    11652 }
    11653 
    11654 /** @} */
    11655 
    11656 
    11657 /*
    11658  * Include the C/C++ implementation of instruction.
    11659  */
    11660 #include "IEMAllCImpl.cpp.h"
    11661 
    1166223
    1166324
     
    130071368/** @}  */
    130081369
    13009 
    13010 /** @name   Opcode Debug Helpers.
    13011  * @{
    13012  */
    13013 #ifdef VBOX_WITH_STATISTICS
    13014 # ifdef IN_RING3
    13015 #  define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
    13016 # else
    13017 #  define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
    13018 # endif
    13019 #else
    13020 # define IEMOP_INC_STATS(a_Stats) do { } while (0)
    13021 #endif
    13022 
    13023 #ifdef DEBUG
    13024 # define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
    13025     do { \
    13026         IEMOP_INC_STATS(a_Stats); \
    13027         Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
    13028               pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
    13029     } while (0)
    13030 
    13031 # define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
    13032     do { \
    13033         IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
    13034         (void)RT_CONCAT(IEMOPFORM_, a_Form); \
    13035         (void)RT_CONCAT(OP_,a_Upper); \
    13036         (void)(a_fDisHints); \
    13037         (void)(a_fIemHints); \
    13038     } while (0)
    13039 
    13040 # define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
    13041     do { \
    13042         IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
    13043         (void)RT_CONCAT(IEMOPFORM_, a_Form); \
    13044         (void)RT_CONCAT(OP_,a_Upper); \
    13045         (void)RT_CONCAT(OP_PARM_,a_Op1); \
    13046         (void)(a_fDisHints); \
    13047         (void)(a_fIemHints); \
    13048     } while (0)
    13049 
    13050 # define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
    13051     do { \
    13052         IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
    13053         (void)RT_CONCAT(IEMOPFORM_, a_Form); \
    13054         (void)RT_CONCAT(OP_,a_Upper); \
    13055         (void)RT_CONCAT(OP_PARM_,a_Op1); \
    13056         (void)RT_CONCAT(OP_PARM_,a_Op2); \
    13057         (void)(a_fDisHints); \
    13058         (void)(a_fIemHints); \
    13059     } while (0)
    13060 
    13061 # define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
    13062     do { \
    13063         IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
    13064         (void)RT_CONCAT(IEMOPFORM_, a_Form); \
    13065         (void)RT_CONCAT(OP_,a_Upper); \
    13066         (void)RT_CONCAT(OP_PARM_,a_Op1); \
    13067         (void)RT_CONCAT(OP_PARM_,a_Op2); \
    13068         (void)RT_CONCAT(OP_PARM_,a_Op3); \
    13069         (void)(a_fDisHints); \
    13070         (void)(a_fIemHints); \
    13071     } while (0)
    13072 
    13073 # define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
    13074     do { \
    13075         IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
    13076         (void)RT_CONCAT(IEMOPFORM_, a_Form); \
    13077         (void)RT_CONCAT(OP_,a_Upper); \
    13078         (void)RT_CONCAT(OP_PARM_,a_Op1); \
    13079         (void)RT_CONCAT(OP_PARM_,a_Op2); \
    13080         (void)RT_CONCAT(OP_PARM_,a_Op3); \
    13081         (void)RT_CONCAT(OP_PARM_,a_Op4); \
    13082         (void)(a_fDisHints); \
    13083         (void)(a_fIemHints); \
    13084     } while (0)
    13085 
    13086 #else
    13087 # define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
    13088 
    13089 # define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
    13090          IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
    13091 # define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
    13092          IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
    13093 # define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
    13094          IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
    13095 # define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
    13096          IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
    13097 # define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
    13098          IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
    13099 
    13100 #endif
    13101 
    13102 #define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
    13103     IEMOP_MNEMONIC0EX(a_Lower, \
    13104                       #a_Lower, \
    13105                       a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
    13106 #define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
    13107     IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
    13108                       #a_Lower " " #a_Op1, \
    13109                       a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
    13110 #define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
    13111     IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
    13112                       #a_Lower " " #a_Op1 "," #a_Op2, \
    13113                       a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
    13114 #define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
    13115     IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
    13116                       #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
    13117                       a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
    13118 #define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
    13119     IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
    13120                       #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
    13121                       a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
    13122 
    13123 /** @} */
    13124 
    13125 
    13126 /** @name   Opcode Helpers.
    13127  * @{
    13128  */
    13129 
    13130 #ifdef IN_RING3
    13131 # define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
    13132     do { \
    13133         if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
    13134         else \
    13135         { \
    13136             (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
    13137             return IEMOP_RAISE_INVALID_OPCODE(); \
    13138         } \
    13139     } while (0)
    13140 #else
    13141 # define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
    13142     do { \
    13143         if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
    13144         else return IEMOP_RAISE_INVALID_OPCODE(); \
    13145     } while (0)
    13146 #endif
    13147 
    13148 /** The instruction requires a 186 or later. */
    13149 #if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
    13150 # define IEMOP_HLP_MIN_186() do { } while (0)
    13151 #else
    13152 # define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
    13153 #endif
    13154 
    13155 /** The instruction requires a 286 or later. */
    13156 #if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
    13157 # define IEMOP_HLP_MIN_286() do { } while (0)
    13158 #else
    13159 # define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
    13160 #endif
    13161 
    13162 /** The instruction requires a 386 or later. */
    13163 #if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
    13164 # define IEMOP_HLP_MIN_386() do { } while (0)
    13165 #else
    13166 # define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
    13167 #endif
    13168 
    13169 /** The instruction requires a 386 or later if the given expression is true. */
    13170 #if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
    13171 # define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
    13172 #else
    13173 # define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
    13174 #endif
    13175 
    13176 /** The instruction requires a 486 or later. */
    13177 #if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
    13178 # define IEMOP_HLP_MIN_486() do { } while (0)
    13179 #else
    13180 # define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
    13181 #endif
    13182 
    13183 /** The instruction requires a Pentium (586) or later. */
    13184 #if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
    13185 # define IEMOP_HLP_MIN_586() do { } while (0)
    13186 #else
    13187 # define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
    13188 #endif
    13189 
    13190 /** The instruction requires a PentiumPro (686) or later. */
    13191 #if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
    13192 # define IEMOP_HLP_MIN_686() do { } while (0)
    13193 #else
    13194 # define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
    13195 #endif
    13196 
    13197 
    13198 /** The instruction raises an \#UD in real and V8086 mode. */
    13199 #define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
    13200     do \
    13201     { \
    13202         if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
    13203         else return IEMOP_RAISE_INVALID_OPCODE(); \
    13204     } while (0)
    13205 
    13206 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    13207 /** This instruction raises an \#UD in real and V8086 mode or when not using a
    13208  *  64-bit code segment when in long mode (applicable to all VMX instructions
    13209  *  except VMCALL).
    13210  */
    13211 #define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
    13212     do \
    13213     { \
    13214         if (   !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
    13215             && (  !IEM_IS_LONG_MODE(pVCpu) \
    13216                 || IEM_IS_64BIT_CODE(pVCpu))) \
    13217         { /* likely */ } \
    13218         else \
    13219         { \
    13220             if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
    13221             { \
    13222                 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
    13223                 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
    13224                 return IEMOP_RAISE_INVALID_OPCODE(); \
    13225             } \
    13226             if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
    13227             { \
    13228                 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
    13229                 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
    13230                 return IEMOP_RAISE_INVALID_OPCODE(); \
    13231             } \
    13232         } \
    13233     } while (0)
    13234 
    13235 /** The instruction can only be executed in VMX operation (VMX root mode and
    13236  * non-root mode).
    13237  *
    13238  *  @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
    13239  */
    13240 # define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
    13241     do \
    13242     { \
    13243         if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
    13244         else \
    13245         { \
    13246             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
    13247             Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
    13248             return IEMOP_RAISE_INVALID_OPCODE(); \
    13249         } \
    13250     } while (0)
    13251 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    13252 
    13253 /** The instruction is not available in 64-bit mode, throw \#UD if we're in
    13254  * 64-bit mode. */
    13255 #define IEMOP_HLP_NO_64BIT() \
    13256     do \
    13257     { \
    13258         if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
    13259             return IEMOP_RAISE_INVALID_OPCODE(); \
    13260     } while (0)
    13261 
    13262 /** The instruction is only available in 64-bit mode, throw \#UD if we're not in
    13263  * 64-bit mode. */
    13264 #define IEMOP_HLP_ONLY_64BIT() \
    13265     do \
    13266     { \
    13267         if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
    13268             return IEMOP_RAISE_INVALID_OPCODE(); \
    13269     } while (0)
    13270 
    13271 /** The instruction defaults to 64-bit operand size if 64-bit mode. */
    13272 #define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
    13273     do \
    13274     { \
    13275         if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
    13276             iemRecalEffOpSize64Default(pVCpu); \
    13277     } while (0)
    13278 
    13279 /** The instruction has 64-bit operand size if 64-bit mode. */
    13280 #define IEMOP_HLP_64BIT_OP_SIZE() \
    13281     do \
    13282     { \
    13283         if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
    13284             pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
    13285     } while (0)
    13286 
    13287 /** Only a REX prefix immediately preceeding the first opcode byte takes
    13288  * effect. This macro helps ensuring this as well as logging bad guest code.  */
    13289 #define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
    13290     do \
    13291     { \
    13292         if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
    13293         { \
    13294             Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
    13295             pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
    13296             pVCpu->iem.s.uRexB     = 0; \
    13297             pVCpu->iem.s.uRexIndex = 0; \
    13298             pVCpu->iem.s.uRexReg   = 0; \
    13299             iemRecalEffOpSize(pVCpu); \
    13300         } \
    13301     } while (0)
    13302 
    13303 /**
    13304  * Done decoding.
    13305  */
    13306 #define IEMOP_HLP_DONE_DECODING() \
    13307     do \
    13308     { \
    13309         /*nothing for now, maybe later... */ \
    13310     } while (0)
    13311 
    13312 /**
    13313  * Done decoding, raise \#UD exception if lock prefix present.
    13314  */
    13315 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
    13316     do \
    13317     { \
    13318         if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
    13319         { /* likely */ } \
    13320         else \
    13321             return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
    13322     } while (0)
    13323 
    13324 
    13325 /**
    13326  * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
    13327  * repnz or size prefixes are present, or if in real or v8086 mode.
    13328  */
    13329 #define IEMOP_HLP_DONE_VEX_DECODING() \
    13330     do \
    13331     { \
    13332         if (RT_LIKELY(   !(  pVCpu->iem.s.fPrefixes \
    13333                            & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
    13334                       && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
    13335         { /* likely */ } \
    13336         else \
    13337             return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
    13338     } while (0)
    13339 
    13340 /**
    13341  * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
    13342  * repnz or size prefixes are present, or if in real or v8086 mode.
    13343  */
    13344 #define IEMOP_HLP_DONE_VEX_DECODING_L0() \
    13345     do \
    13346     { \
    13347         if (RT_LIKELY(   !(  pVCpu->iem.s.fPrefixes \
    13348                            & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
    13349                       && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
    13350                       && pVCpu->iem.s.uVexLength == 0)) \
    13351         { /* likely */ } \
    13352         else \
    13353             return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
    13354     } while (0)
    13355 
    13356 
    13357 /**
    13358  * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
    13359  * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
    13360  * register 0, or if in real or v8086 mode.
    13361  */
    13362 #define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
    13363     do \
    13364     { \
    13365         if (RT_LIKELY(   !(  pVCpu->iem.s.fPrefixes \
    13366                            & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
    13367                       && !pVCpu->iem.s.uVex3rdReg \
    13368                       && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
    13369         { /* likely */ } \
    13370         else \
    13371             return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
    13372     } while (0)
    13373 
    13374 /**
    13375  * Done decoding VEX, no V, L=0.
    13376  * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
    13377  * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
    13378  */
    13379 #define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
    13380     do \
    13381     { \
    13382         if (RT_LIKELY(   !(  pVCpu->iem.s.fPrefixes \
    13383                            & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
    13384                       && pVCpu->iem.s.uVexLength == 0 \
    13385                       && pVCpu->iem.s.uVex3rdReg == 0 \
    13386                       && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
    13387         { /* likely */ } \
    13388         else \
    13389             return IEMOP_RAISE_INVALID_OPCODE(); \
    13390     } while (0)
    13391 
    13392 #define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
    13393     do \
    13394     { \
    13395         if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
    13396         { /* likely */ } \
    13397         else \
    13398         { \
    13399             NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
    13400             return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
    13401         } \
    13402     } while (0)
    13403 #define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
    13404     do \
    13405     { \
    13406         if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
    13407         { /* likely */ } \
    13408         else \
    13409         { \
    13410             NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
    13411             return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
    13412         } \
    13413     } while (0)
    13414 
    13415 /**
    13416  * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
    13417  * are present.
    13418  */
    13419 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
    13420     do \
    13421     { \
    13422         if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
    13423         { /* likely */ } \
    13424         else \
    13425             return IEMOP_RAISE_INVALID_OPCODE(); \
    13426     } while (0)
    13427 
    13428 /**
    13429  * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
    13430  * prefixes are present.
    13431  */
    13432 #define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
    13433     do \
    13434     { \
    13435         if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
    13436         { /* likely */ } \
    13437         else \
    13438             return IEMOP_RAISE_INVALID_OPCODE(); \
    13439     } while (0)
    13440 
    13441 
    13442 /**
    13443  * Calculates the effective address of a ModR/M memory operand.
    13444  *
    13445  * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
    13446  *
    13447  * @return  Strict VBox status code.
    13448  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    13449  * @param   bRm                 The ModRM byte.
    13450  * @param   cbImm               The size of any immediate following the
    13451  *                              effective address opcode bytes. Important for
    13452  *                              RIP relative addressing.
    13453  * @param   pGCPtrEff           Where to return the effective address.
    13454  */
    13455 IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
    13456 {
    13457     Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
    13458 # define SET_SS_DEF() \
    13459     do \
    13460     { \
    13461         if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
    13462             pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
    13463     } while (0)
    13464 
    13465     if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
    13466     {
    13467 /** @todo Check the effective address size crap! */
    13468         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
    13469         {
    13470             uint16_t u16EffAddr;
    13471 
    13472             /* Handle the disp16 form with no registers first. */
    13473             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
    13474                 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
    13475             else
    13476             {
    13477                 /* Get the displacment. */
    13478                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    13479                 {
    13480                     case 0:  u16EffAddr = 0;                             break;
    13481                     case 1:  IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
    13482                     case 2:  IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);       break;
    13483                     default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
    13484                 }
    13485 
    13486                 /* Add the base and index registers to the disp. */
    13487                 switch (bRm & X86_MODRM_RM_MASK)
    13488                 {
    13489                     case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
    13490                     case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
    13491                     case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
    13492                     case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
    13493                     case 4: u16EffAddr += pVCpu->cpum.GstCtx.si;            break;
    13494                     case 5: u16EffAddr += pVCpu->cpum.GstCtx.di;            break;
    13495                     case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp;            SET_SS_DEF(); break;
    13496                     case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx;            break;
    13497                 }
    13498             }
    13499 
    13500             *pGCPtrEff = u16EffAddr;
    13501         }
    13502         else
    13503         {
    13504             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    13505             uint32_t u32EffAddr;
    13506 
    13507             /* Handle the disp32 form with no registers first. */
    13508             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    13509                 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
    13510             else
    13511             {
    13512                 /* Get the register (or SIB) value. */
    13513                 switch ((bRm & X86_MODRM_RM_MASK))
    13514                 {
    13515                     case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    13516                     case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    13517                     case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    13518                     case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    13519                     case 4: /* SIB */
    13520                     {
    13521                         uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    13522 
    13523                         /* Get the index and scale it. */
    13524                         switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
    13525                         {
    13526                             case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    13527                             case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    13528                             case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    13529                             case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    13530                             case 4: u32EffAddr = 0; /*none */ break;
    13531                             case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
    13532                             case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    13533                             case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    13534                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13535                         }
    13536                         u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    13537 
    13538                         /* add base */
    13539                         switch (bSib & X86_SIB_BASE_MASK)
    13540                         {
    13541                             case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
    13542                             case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
    13543                             case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
    13544                             case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
    13545                             case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
    13546                             case 5:
    13547                                 if ((bRm & X86_MODRM_MOD_MASK) != 0)
    13548                                 {
    13549                                     u32EffAddr += pVCpu->cpum.GstCtx.ebp;
    13550                                     SET_SS_DEF();
    13551                                 }
    13552                                 else
    13553                                 {
    13554                                     uint32_t u32Disp;
    13555                                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    13556                                     u32EffAddr += u32Disp;
    13557                                 }
    13558                                 break;
    13559                             case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
    13560                             case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
    13561                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13562                         }
    13563                         break;
    13564                     }
    13565                     case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
    13566                     case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    13567                     case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    13568                     IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13569                 }
    13570 
    13571                 /* Get and add the displacement. */
    13572                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    13573                 {
    13574                     case 0:
    13575                         break;
    13576                     case 1:
    13577                     {
    13578                         int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    13579                         u32EffAddr += i8Disp;
    13580                         break;
    13581                     }
    13582                     case 2:
    13583                     {
    13584                         uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    13585                         u32EffAddr += u32Disp;
    13586                         break;
    13587                     }
    13588                     default:
    13589                         AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
    13590                 }
    13591 
    13592             }
    13593             if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
    13594                 *pGCPtrEff = u32EffAddr;
    13595             else
    13596             {
    13597                 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
    13598                 *pGCPtrEff = u32EffAddr & UINT16_MAX;
    13599             }
    13600         }
    13601     }
    13602     else
    13603     {
    13604         uint64_t u64EffAddr;
    13605 
    13606         /* Handle the rip+disp32 form with no registers first. */
    13607         if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    13608         {
    13609             IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
    13610             u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
    13611         }
    13612         else
    13613         {
    13614             /* Get the register (or SIB) value. */
    13615             switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
    13616             {
    13617                 case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    13618                 case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    13619                 case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    13620                 case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    13621                 case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
    13622                 case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    13623                 case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    13624                 case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    13625                 case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    13626                 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    13627                 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    13628                 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    13629                 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    13630                 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    13631                 /* SIB */
    13632                 case 4:
    13633                 case 12:
    13634                 {
    13635                     uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    13636 
    13637                     /* Get the index and scale it. */
    13638                     switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
    13639                     {
    13640                         case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    13641                         case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    13642                         case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    13643                         case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    13644                         case  4: u64EffAddr = 0; /*none */ break;
    13645                         case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
    13646                         case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    13647                         case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    13648                         case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    13649                         case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    13650                         case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    13651                         case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    13652                         case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
    13653                         case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    13654                         case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    13655                         case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    13656                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13657                     }
    13658                     u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    13659 
    13660                     /* add base */
    13661                     switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
    13662                     {
    13663                         case  0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
    13664                         case  1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
    13665                         case  2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
    13666                         case  3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
    13667                         case  4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
    13668                         case  6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
    13669                         case  7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
    13670                         case  8: u64EffAddr += pVCpu->cpum.GstCtx.r8;  break;
    13671                         case  9: u64EffAddr += pVCpu->cpum.GstCtx.r9;  break;
    13672                         case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
    13673                         case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
    13674                         case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
    13675                         case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
    13676                         case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
    13677                         /* complicated encodings */
    13678                         case 5:
    13679                         case 13:
    13680                             if ((bRm & X86_MODRM_MOD_MASK) != 0)
    13681                             {
    13682                                 if (!pVCpu->iem.s.uRexB)
    13683                                 {
    13684                                     u64EffAddr += pVCpu->cpum.GstCtx.rbp;
    13685                                     SET_SS_DEF();
    13686                                 }
    13687                                 else
    13688                                     u64EffAddr += pVCpu->cpum.GstCtx.r13;
    13689                             }
    13690                             else
    13691                             {
    13692                                 uint32_t u32Disp;
    13693                                 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    13694                                 u64EffAddr += (int32_t)u32Disp;
    13695                             }
    13696                             break;
    13697                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13698                     }
    13699                     break;
    13700                 }
    13701                 IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13702             }
    13703 
    13704             /* Get and add the displacement. */
    13705             switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    13706             {
    13707                 case 0:
    13708                     break;
    13709                 case 1:
    13710                 {
    13711                     int8_t i8Disp;
    13712                     IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    13713                     u64EffAddr += i8Disp;
    13714                     break;
    13715                 }
    13716                 case 2:
    13717                 {
    13718                     uint32_t u32Disp;
    13719                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    13720                     u64EffAddr += (int32_t)u32Disp;
    13721                     break;
    13722                 }
    13723                 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
    13724             }
    13725 
    13726         }
    13727 
    13728         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
    13729             *pGCPtrEff = u64EffAddr;
    13730         else
    13731         {
    13732             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    13733             *pGCPtrEff = u64EffAddr & UINT32_MAX;
    13734         }
    13735     }
    13736 
    13737     Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
    13738     return VINF_SUCCESS;
    13739 }
    13740 
    13741 
    13742 /**
    13743  * Calculates the effective address of a ModR/M memory operand.
    13744  *
    13745  * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
    13746  *
    13747  * @return  Strict VBox status code.
    13748  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    13749  * @param   bRm                 The ModRM byte.
    13750  * @param   cbImm               The size of any immediate following the
    13751  *                              effective address opcode bytes. Important for
    13752  *                              RIP relative addressing.
    13753  * @param   pGCPtrEff           Where to return the effective address.
    13754  * @param   offRsp              RSP displacement.
    13755  */
    13756 IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
    13757 {
    13758     Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
    13759 # define SET_SS_DEF() \
    13760     do \
    13761     { \
    13762         if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
    13763             pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
    13764     } while (0)
    13765 
    13766     if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
    13767     {
    13768 /** @todo Check the effective address size crap! */
    13769         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
    13770         {
    13771             uint16_t u16EffAddr;
    13772 
    13773             /* Handle the disp16 form with no registers first. */
    13774             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
    13775                 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
    13776             else
    13777             {
    13778                 /* Get the displacment. */
    13779                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    13780                 {
    13781                     case 0:  u16EffAddr = 0;                             break;
    13782                     case 1:  IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
    13783                     case 2:  IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);       break;
    13784                     default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
    13785                 }
    13786 
    13787                 /* Add the base and index registers to the disp. */
    13788                 switch (bRm & X86_MODRM_RM_MASK)
    13789                 {
    13790                     case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
    13791                     case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
    13792                     case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
    13793                     case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
    13794                     case 4: u16EffAddr += pVCpu->cpum.GstCtx.si;            break;
    13795                     case 5: u16EffAddr += pVCpu->cpum.GstCtx.di;            break;
    13796                     case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp;            SET_SS_DEF(); break;
    13797                     case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx;            break;
    13798                 }
    13799             }
    13800 
    13801             *pGCPtrEff = u16EffAddr;
    13802         }
    13803         else
    13804         {
    13805             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    13806             uint32_t u32EffAddr;
    13807 
    13808             /* Handle the disp32 form with no registers first. */
    13809             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    13810                 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
    13811             else
    13812             {
    13813                 /* Get the register (or SIB) value. */
    13814                 switch ((bRm & X86_MODRM_RM_MASK))
    13815                 {
    13816                     case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    13817                     case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    13818                     case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    13819                     case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    13820                     case 4: /* SIB */
    13821                     {
    13822                         uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    13823 
    13824                         /* Get the index and scale it. */
    13825                         switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
    13826                         {
    13827                             case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    13828                             case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    13829                             case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    13830                             case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    13831                             case 4: u32EffAddr = 0; /*none */ break;
    13832                             case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
    13833                             case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    13834                             case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    13835                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13836                         }
    13837                         u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    13838 
    13839                         /* add base */
    13840                         switch (bSib & X86_SIB_BASE_MASK)
    13841                         {
    13842                             case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
    13843                             case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
    13844                             case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
    13845                             case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
    13846                             case 4:
    13847                                 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
    13848                                 SET_SS_DEF();
    13849                                 break;
    13850                             case 5:
    13851                                 if ((bRm & X86_MODRM_MOD_MASK) != 0)
    13852                                 {
    13853                                     u32EffAddr += pVCpu->cpum.GstCtx.ebp;
    13854                                     SET_SS_DEF();
    13855                                 }
    13856                                 else
    13857                                 {
    13858                                     uint32_t u32Disp;
    13859                                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    13860                                     u32EffAddr += u32Disp;
    13861                                 }
    13862                                 break;
    13863                             case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
    13864                             case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
    13865                             IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13866                         }
    13867                         break;
    13868                     }
    13869                     case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
    13870                     case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    13871                     case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    13872                     IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13873                 }
    13874 
    13875                 /* Get and add the displacement. */
    13876                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    13877                 {
    13878                     case 0:
    13879                         break;
    13880                     case 1:
    13881                     {
    13882                         int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    13883                         u32EffAddr += i8Disp;
    13884                         break;
    13885                     }
    13886                     case 2:
    13887                     {
    13888                         uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    13889                         u32EffAddr += u32Disp;
    13890                         break;
    13891                     }
    13892                     default:
    13893                         AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
    13894                 }
    13895 
    13896             }
    13897             if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
    13898                 *pGCPtrEff = u32EffAddr;
    13899             else
    13900             {
    13901                 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
    13902                 *pGCPtrEff = u32EffAddr & UINT16_MAX;
    13903             }
    13904         }
    13905     }
    13906     else
    13907     {
    13908         uint64_t u64EffAddr;
    13909 
    13910         /* Handle the rip+disp32 form with no registers first. */
    13911         if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    13912         {
    13913             IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
    13914             u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
    13915         }
    13916         else
    13917         {
    13918             /* Get the register (or SIB) value. */
    13919             switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
    13920             {
    13921                 case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    13922                 case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    13923                 case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    13924                 case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    13925                 case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
    13926                 case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    13927                 case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    13928                 case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    13929                 case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    13930                 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    13931                 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    13932                 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    13933                 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    13934                 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    13935                 /* SIB */
    13936                 case 4:
    13937                 case 12:
    13938                 {
    13939                     uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    13940 
    13941                     /* Get the index and scale it. */
    13942                     switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
    13943                     {
    13944                         case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    13945                         case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    13946                         case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    13947                         case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    13948                         case  4: u64EffAddr = 0; /*none */ break;
    13949                         case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
    13950                         case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    13951                         case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    13952                         case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    13953                         case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    13954                         case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    13955                         case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    13956                         case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
    13957                         case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    13958                         case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    13959                         case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    13960                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    13961                     }
    13962                     u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    13963 
    13964                     /* add base */
    13965                     switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
    13966                     {
    13967                         case  0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
    13968                         case  1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
    13969                         case  2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
    13970                         case  3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
    13971                         case  4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
    13972                         case  6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
    13973                         case  7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
    13974                         case  8: u64EffAddr += pVCpu->cpum.GstCtx.r8;  break;
    13975                         case  9: u64EffAddr += pVCpu->cpum.GstCtx.r9;  break;
    13976                         case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
    13977                         case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
    13978                         case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
    13979                         case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
    13980                         case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
    13981                         /* complicated encodings */
    13982                         case 5:
    13983                         case 13:
    13984                             if ((bRm & X86_MODRM_MOD_MASK) != 0)
    13985                             {
    13986                                 if (!pVCpu->iem.s.uRexB)
    13987                                 {
    13988                                     u64EffAddr += pVCpu->cpum.GstCtx.rbp;
    13989                                     SET_SS_DEF();
    13990                                 }
    13991                                 else
    13992                                     u64EffAddr += pVCpu->cpum.GstCtx.r13;
    13993                             }
    13994                             else
    13995                             {
    13996                                 uint32_t u32Disp;
    13997                                 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    13998                                 u64EffAddr += (int32_t)u32Disp;
    13999                             }
    14000                             break;
    14001                         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    14002                     }
    14003                     break;
    14004                 }
    14005                 IEM_NOT_REACHED_DEFAULT_CASE_RET();
    14006             }
    14007 
    14008             /* Get and add the displacement. */
    14009             switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    14010             {
    14011                 case 0:
    14012                     break;
    14013                 case 1:
    14014                 {
    14015                     int8_t i8Disp;
    14016                     IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    14017                     u64EffAddr += i8Disp;
    14018                     break;
    14019                 }
    14020                 case 2:
    14021                 {
    14022                     uint32_t u32Disp;
    14023                     IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    14024                     u64EffAddr += (int32_t)u32Disp;
    14025                     break;
    14026                 }
    14027                 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
    14028             }
    14029 
    14030         }
    14031 
    14032         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
    14033             *pGCPtrEff = u64EffAddr;
    14034         else
    14035         {
    14036             Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    14037             *pGCPtrEff = u64EffAddr & UINT32_MAX;
    14038         }
    14039     }
    14040 
    14041     Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
    14042     return VINF_SUCCESS;
    14043 }
    14044 
    14045 
    14046 #ifdef IEM_WITH_SETJMP
    14047 /**
    14048  * Calculates the effective address of a ModR/M memory operand.
    14049  *
    14050  * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
    14051  *
    14052  * May longjmp on internal error.
    14053  *
    14054  * @return  The effective address.
    14055  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    14056  * @param   bRm                 The ModRM byte.
    14057  * @param   cbImm               The size of any immediate following the
    14058  *                              effective address opcode bytes. Important for
    14059  *                              RIP relative addressing.
    14060  */
    14061 IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
    14062 {
    14063     Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
    14064 # define SET_SS_DEF() \
    14065     do \
    14066     { \
    14067         if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
    14068             pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
    14069     } while (0)
    14070 
    14071     if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
    14072     {
    14073 /** @todo Check the effective address size crap! */
    14074         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
    14075         {
    14076             uint16_t u16EffAddr;
    14077 
    14078             /* Handle the disp16 form with no registers first. */
    14079             if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
    14080                 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
    14081             else
    14082             {
    14083                 /* Get the displacment. */
    14084                 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    14085                 {
    14086                     case 0:  u16EffAddr = 0;                             break;
    14087                     case 1:  IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
    14088                     case 2:  IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);       break;
    14089                     default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
    14090                 }
    14091 
    14092                 /* Add the base and index registers to the disp. */
    14093                 switch (bRm & X86_MODRM_RM_MASK)
    14094                 {
    14095                     case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
    14096                     case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
    14097                     case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
    14098                     case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
    14099                     case 4: u16EffAddr += pVCpu->cpum.GstCtx.si;            break;
    14100                     case 5: u16EffAddr += pVCpu->cpum.GstCtx.di;            break;
    14101                     case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp;            SET_SS_DEF(); break;
    14102                     case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx;            break;
    14103                 }
    14104             }
    14105 
    14106             Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
    14107             return u16EffAddr;
    14108         }
    14109 
    14110         Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    14111         uint32_t u32EffAddr;
    14112 
    14113         /* Handle the disp32 form with no registers first. */
    14114         if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    14115             IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
    14116         else
    14117         {
    14118             /* Get the register (or SIB) value. */
    14119             switch ((bRm & X86_MODRM_RM_MASK))
    14120             {
    14121                 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    14122                 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    14123                 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    14124                 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    14125                 case 4: /* SIB */
    14126                 {
    14127                     uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    14128 
    14129                     /* Get the index and scale it. */
    14130                     switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
    14131                     {
    14132                         case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
    14133                         case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
    14134                         case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
    14135                         case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
    14136                         case 4: u32EffAddr = 0; /*none */ break;
    14137                         case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
    14138                         case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    14139                         case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    14140                         IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
    14141                     }
    14142                     u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    14143 
    14144                     /* add base */
    14145                     switch (bSib & X86_SIB_BASE_MASK)
    14146                     {
    14147                         case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
    14148                         case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
    14149                         case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
    14150                         case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
    14151                         case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
    14152                         case 5:
    14153                             if ((bRm & X86_MODRM_MOD_MASK) != 0)
    14154                             {
    14155                                 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
    14156                                 SET_SS_DEF();
    14157                             }
    14158                             else
    14159                             {
    14160                                 uint32_t u32Disp;
    14161                                 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    14162                                 u32EffAddr += u32Disp;
    14163                             }
    14164                             break;
    14165                         case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
    14166                         case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
    14167                         IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
    14168                     }
    14169                     break;
    14170                 }
    14171                 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
    14172                 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
    14173                 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
    14174                 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
    14175             }
    14176 
    14177             /* Get and add the displacement. */
    14178             switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    14179             {
    14180                 case 0:
    14181                     break;
    14182                 case 1:
    14183                 {
    14184                     int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    14185                     u32EffAddr += i8Disp;
    14186                     break;
    14187                 }
    14188                 case 2:
    14189                 {
    14190                     uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    14191                     u32EffAddr += u32Disp;
    14192                     break;
    14193                 }
    14194                 default:
    14195                     AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
    14196             }
    14197         }
    14198 
    14199         if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
    14200         {
    14201             Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
    14202             return u32EffAddr;
    14203         }
    14204         Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
    14205         Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
    14206         return u32EffAddr & UINT16_MAX;
    14207     }
    14208 
    14209     uint64_t u64EffAddr;
    14210 
    14211     /* Handle the rip+disp32 form with no registers first. */
    14212     if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
    14213     {
    14214         IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
    14215         u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
    14216     }
    14217     else
    14218     {
    14219         /* Get the register (or SIB) value. */
    14220         switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
    14221         {
    14222             case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    14223             case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    14224             case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    14225             case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    14226             case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
    14227             case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    14228             case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    14229             case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    14230             case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    14231             case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    14232             case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    14233             case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    14234             case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    14235             case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    14236             /* SIB */
    14237             case 4:
    14238             case 12:
    14239             {
    14240                 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
    14241 
    14242                 /* Get the index and scale it. */
    14243                 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
    14244                 {
    14245                     case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    14246                     case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
    14247                     case  2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
    14248                     case  3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
    14249                     case  4: u64EffAddr = 0; /*none */ break;
    14250                     case  5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
    14251                     case  6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
    14252                     case  7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
    14253                     case  8: u64EffAddr = pVCpu->cpum.GstCtx.r8;  break;
    14254                     case  9: u64EffAddr = pVCpu->cpum.GstCtx.r9;  break;
    14255                     case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
    14256                     case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
    14257                     case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
    14258                     case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
    14259                     case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
    14260                     case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
    14261                     IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
    14262                 }
    14263                 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
    14264 
    14265                 /* add base */
    14266                 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
    14267                 {
    14268                     case  0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
    14269                     case  1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
    14270                     case  2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
    14271                     case  3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
    14272                     case  4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
    14273                     case  6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
    14274                     case  7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
    14275                     case  8: u64EffAddr += pVCpu->cpum.GstCtx.r8;  break;
    14276                     case  9: u64EffAddr += pVCpu->cpum.GstCtx.r9;  break;
    14277                     case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
    14278                     case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
    14279                     case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
    14280                     case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
    14281                     case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
    14282                     /* complicated encodings */
    14283                     case 5:
    14284                     case 13:
    14285                         if ((bRm & X86_MODRM_MOD_MASK) != 0)
    14286                         {
    14287                             if (!pVCpu->iem.s.uRexB)
    14288                             {
    14289                                 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
    14290                                 SET_SS_DEF();
    14291                             }
    14292                             else
    14293                                 u64EffAddr += pVCpu->cpum.GstCtx.r13;
    14294                         }
    14295                         else
    14296                         {
    14297                             uint32_t u32Disp;
    14298                             IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    14299                             u64EffAddr += (int32_t)u32Disp;
    14300                         }
    14301                         break;
    14302                     IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
    14303                 }
    14304                 break;
    14305             }
    14306             IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
    14307         }
    14308 
    14309         /* Get and add the displacement. */
    14310         switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
    14311         {
    14312             case 0:
    14313                 break;
    14314             case 1:
    14315             {
    14316                 int8_t i8Disp;
    14317                 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
    14318                 u64EffAddr += i8Disp;
    14319                 break;
    14320             }
    14321             case 2:
    14322             {
    14323                 uint32_t u32Disp;
    14324                 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
    14325                 u64EffAddr += (int32_t)u32Disp;
    14326                 break;
    14327             }
    14328             IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
    14329         }
    14330 
    14331     }
    14332 
    14333     if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
    14334     {
    14335         Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
    14336         return u64EffAddr;
    14337     }
    14338     Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
    14339     Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
    14340     return u64EffAddr & UINT32_MAX;
    14341 }
    14342 #endif /* IEM_WITH_SETJMP */
    14343 
    14344 /** @}  */
    14345 
    14346 
    14347 
    14348 /*
    14349  * Include the instructions
    14350  */
    14351 #include "IEMAllInstructions.cpp.h"
    14352 
    14353 
    14354 
    14355 #ifdef LOG_ENABLED
    14356 /**
    14357  * Logs the current instruction.
    14358  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    14359  * @param   fSameCtx    Set if we have the same context information as the VMM,
    14360  *                      clear if we may have already executed an instruction in
    14361  *                      our debug context. When clear, we assume IEMCPU holds
    14362  *                      valid CPU mode info.
    14363  *
    14364  *                      The @a fSameCtx parameter is now misleading and obsolete.
    14365  * @param   pszFunction The IEM function doing the execution.
    14366  */
    14367 IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
    14368 {
    14369 # ifdef IN_RING3
    14370     if (LogIs2Enabled())
    14371     {
    14372         char     szInstr[256];
    14373         uint32_t cbInstr = 0;
    14374         if (fSameCtx)
    14375             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
    14376                                DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    14377                                szInstr, sizeof(szInstr), &cbInstr);
    14378         else
    14379         {
    14380             uint32_t fFlags = 0;
    14381             switch (pVCpu->iem.s.enmCpuMode)
    14382             {
    14383                 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
    14384                 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
    14385                 case IEMMODE_16BIT:
    14386                     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
    14387                         fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
    14388                     else
    14389                         fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
    14390                     break;
    14391             }
    14392             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
    14393                                szInstr, sizeof(szInstr), &cbInstr);
    14394         }
    14395 
    14396         PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    14397         Log2(("**** %s\n"
    14398               " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
    14399               " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
    14400               " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
    14401               " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
    14402               " %s\n"
    14403               , pszFunction,
    14404               pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
    14405               pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
    14406               pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
    14407               pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
    14408               pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
    14409               szInstr));
    14410 
    14411         if (LogIs3Enabled())
    14412             DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
    14413     }
    14414     else
    14415 # endif
    14416         LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
    14417                  pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
    14418     RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
    14419 }
    14420 #endif /* LOG_ENABLED */
    14421 
    14422 
    14423 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    14424 /**
    14425  * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
    14426  * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
    14427  *
    14428  * @returns Modified rcStrict.
    14429  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    14430  * @param   rcStrict    The instruction execution status.
    14431  */
    14432 static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    14433 {
    14434     Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
    14435     if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
    14436     {
    14437         /* VMX preemption timer takes priority over NMI-window exits. */
    14438         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
    14439         {
    14440             rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
    14441             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
    14442         }
    14443         /*
    14444          * Check remaining intercepts.
    14445          *
    14446          * NMI-window and Interrupt-window VM-exits.
    14447          * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
    14448          * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
    14449          *
    14450          * See Intel spec. 26.7.6 "NMI-Window Exiting".
    14451          * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    14452          */
    14453         else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
    14454                  && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    14455                  && !TRPMHasTrap(pVCpu))
    14456         {
    14457             Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
    14458             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
    14459                 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
    14460             {
    14461                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
    14462                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
    14463             }
    14464             else if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
    14465                      && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
    14466             {
    14467                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
    14468                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
    14469             }
    14470         }
    14471     }
    14472     /* TPR-below threshold/APIC write has the highest priority. */
    14473     else  if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
    14474     {
    14475         rcStrict = iemVmxApicWriteEmulation(pVCpu);
    14476         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    14477         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
    14478     }
    14479     /* MTF takes priority over VMX-preemption timer. */
    14480     else
    14481     {
    14482         rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
    14483         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
    14484         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    14485     }
    14486     return rcStrict;
    14487 }
    14488 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    14489 
    14490 
    14491 /**
    14492  * Makes status code addjustments (pass up from I/O and access handler)
    14493  * as well as maintaining statistics.
    14494  *
    14495  * @returns Strict VBox status code to pass up.
    14496  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    14497  * @param   rcStrict    The status from executing an instruction.
    14498  */
    14499 DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    14500 {
    14501     if (rcStrict != VINF_SUCCESS)
    14502     {
    14503         if (RT_SUCCESS(rcStrict))
    14504         {
    14505             AssertMsg(   (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
    14506                       || rcStrict == VINF_IOM_R3_IOPORT_READ
    14507                       || rcStrict == VINF_IOM_R3_IOPORT_WRITE
    14508                       || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
    14509                       || rcStrict == VINF_IOM_R3_MMIO_READ
    14510                       || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
    14511                       || rcStrict == VINF_IOM_R3_MMIO_WRITE
    14512                       || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
    14513                       || rcStrict == VINF_CPUM_R3_MSR_READ
    14514                       || rcStrict == VINF_CPUM_R3_MSR_WRITE
    14515                       || rcStrict == VINF_EM_RAW_EMULATE_INSTR
    14516                       || rcStrict == VINF_EM_RAW_TO_R3
    14517                       || rcStrict == VINF_EM_TRIPLE_FAULT
    14518                       || rcStrict == VINF_GIM_R3_HYPERCALL
    14519                       /* raw-mode / virt handlers only: */
    14520                       || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
    14521                       || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
    14522                       || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
    14523                       || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
    14524                       || rcStrict == VINF_SELM_SYNC_GDT
    14525                       || rcStrict == VINF_CSAM_PENDING_ACTION
    14526                       || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
    14527                       /* nested hw.virt codes: */
    14528                       || rcStrict == VINF_VMX_VMEXIT
    14529                       || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
    14530                       || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
    14531                       || rcStrict == VINF_SVM_VMEXIT
    14532                       , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    14533 /** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
    14534             int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
    14535 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    14536             if (   rcStrict == VINF_VMX_VMEXIT
    14537                 && rcPassUp == VINF_SUCCESS)
    14538                 rcStrict = VINF_SUCCESS;
    14539             else
    14540 #endif
    14541 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    14542             if (   rcStrict == VINF_SVM_VMEXIT
    14543                 && rcPassUp == VINF_SUCCESS)
    14544                 rcStrict = VINF_SUCCESS;
    14545             else
    14546 #endif
    14547             if (rcPassUp == VINF_SUCCESS)
    14548                 pVCpu->iem.s.cRetInfStatuses++;
    14549             else if (   rcPassUp < VINF_EM_FIRST
    14550                      || rcPassUp > VINF_EM_LAST
    14551                      || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
    14552             {
    14553                 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
    14554                 pVCpu->iem.s.cRetPassUpStatus++;
    14555                 rcStrict = rcPassUp;
    14556             }
    14557             else
    14558             {
    14559                 Log(("IEM: rcPassUp=%Rrc  rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
    14560                 pVCpu->iem.s.cRetInfStatuses++;
    14561             }
    14562         }
    14563         else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
    14564             pVCpu->iem.s.cRetAspectNotImplemented++;
    14565         else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
    14566             pVCpu->iem.s.cRetInstrNotImplemented++;
    14567         else
    14568             pVCpu->iem.s.cRetErrStatuses++;
    14569     }
    14570     else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
    14571     {
    14572         pVCpu->iem.s.cRetPassUpStatus++;
    14573         rcStrict = pVCpu->iem.s.rcPassUp;
    14574     }
    14575 
    14576     return rcStrict;
    14577 }
    14578 
    14579 
    14580 /**
    14581  * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
    14582  * IEMExecOneWithPrefetchedByPC.
    14583  *
    14584  * Similar code is found in IEMExecLots.
    14585  *
    14586  * @return  Strict VBox status code.
    14587  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    14588  * @param   fExecuteInhibit     If set, execute the instruction following CLI,
    14589  *                      POP SS and MOV SS,GR.
    14590  * @param   pszFunction The calling function name.
    14591  */
    14592 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
    14593 {
    14594     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    14595     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    14596     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    14597     RT_NOREF_PV(pszFunction);
    14598 
    14599 #ifdef IEM_WITH_SETJMP
    14600     VBOXSTRICTRC rcStrict;
    14601     jmp_buf      JmpBuf;
    14602     jmp_buf     *pSavedJmpBuf  = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
    14603     pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
    14604     if ((rcStrict = setjmp(JmpBuf)) == 0)
    14605     {
    14606         uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
    14607         rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
    14608     }
    14609     else
    14610         pVCpu->iem.s.cLongJumps++;
    14611     pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
    14612 #else
    14613     uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
    14614     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
    14615 #endif
    14616     if (rcStrict == VINF_SUCCESS)
    14617         pVCpu->iem.s.cInstructions++;
    14618     if (pVCpu->iem.s.cActiveMappings > 0)
    14619     {
    14620         Assert(rcStrict != VINF_SUCCESS);
    14621         iemMemRollback(pVCpu);
    14622     }
    14623     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    14624     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    14625     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    14626 
    14627 //#ifdef DEBUG
    14628 //    AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
    14629 //#endif
    14630 
    14631 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    14632     /*
    14633      * Perform any VMX nested-guest instruction boundary actions.
    14634      *
    14635      * If any of these causes a VM-exit, we must skip executing the next
    14636      * instruction (would run into stale page tables). A VM-exit makes sure
    14637      * there is no interrupt-inhibition, so that should ensure we don't go
    14638      * to try execute the next instruction. Clearing fExecuteInhibit is
    14639      * problematic because of the setjmp/longjmp clobbering above.
    14640      */
    14641     if (   rcStrict == VINF_SUCCESS
    14642         && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    14643                                     | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
    14644         rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
    14645 #endif
    14646 
    14647     /* Execute the next instruction as well if a cli, pop ss or
    14648        mov ss, Gr has just completed successfully. */
    14649     if (   fExecuteInhibit
    14650         && rcStrict == VINF_SUCCESS
    14651         && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
    14652         && EMIsInhibitInterruptsActive(pVCpu))
    14653     {
    14654         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
    14655         if (rcStrict == VINF_SUCCESS)
    14656         {
    14657 #ifdef LOG_ENABLED
    14658             iemLogCurInstr(pVCpu, false, pszFunction);
    14659 #endif
    14660 #ifdef IEM_WITH_SETJMP
    14661             pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
    14662             if ((rcStrict = setjmp(JmpBuf)) == 0)
    14663             {
    14664                 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
    14665                 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
    14666             }
    14667             else
    14668                 pVCpu->iem.s.cLongJumps++;
    14669             pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
    14670 #else
    14671             IEM_OPCODE_GET_NEXT_U8(&b);
    14672             rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
    14673 #endif
    14674             if (rcStrict == VINF_SUCCESS)
    14675                 pVCpu->iem.s.cInstructions++;
    14676             if (pVCpu->iem.s.cActiveMappings > 0)
    14677             {
    14678                 Assert(rcStrict != VINF_SUCCESS);
    14679                 iemMemRollback(pVCpu);
    14680             }
    14681             AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    14682             AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    14683             AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    14684         }
    14685         else if (pVCpu->iem.s.cActiveMappings > 0)
    14686             iemMemRollback(pVCpu);
    14687         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
    14688     }
    14689 
    14690     /*
    14691      * Return value fiddling, statistics and sanity assertions.
    14692      */
    14693     rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    14694 
    14695     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    14696     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    14697     return rcStrict;
    14698 }
    14699 
    14700 
    14701 /**
    14702  * Execute one instruction.
    14703  *
    14704  * @return  Strict VBox status code.
    14705  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    14706  */
    14707 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
    14708 {
    14709     AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
    14710 #ifdef LOG_ENABLED
    14711     iemLogCurInstr(pVCpu, true, "IEMExecOne");
    14712 #endif
    14713 
    14714     /*
    14715      * Do the decoding and emulation.
    14716      */
    14717     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
    14718     if (rcStrict == VINF_SUCCESS)
    14719         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
    14720     else if (pVCpu->iem.s.cActiveMappings > 0)
    14721         iemMemRollback(pVCpu);
    14722 
    14723     if (rcStrict != VINF_SUCCESS)
    14724         LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    14725                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    14726     return rcStrict;
    14727 }
    14728 
    14729 
    14730 VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
    14731 {
    14732     AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
    14733 
    14734     uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
    14735     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
    14736     if (rcStrict == VINF_SUCCESS)
    14737     {
    14738         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
    14739         if (pcbWritten)
    14740             *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
    14741     }
    14742     else if (pVCpu->iem.s.cActiveMappings > 0)
    14743         iemMemRollback(pVCpu);
    14744 
    14745     return rcStrict;
    14746 }
    14747 
    14748 
    14749 VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
    14750                                                    const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    14751 {
    14752     AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
    14753 
    14754     VBOXSTRICTRC rcStrict;
    14755     if (   cbOpcodeBytes
    14756         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    14757     {
    14758         iemInitDecoder(pVCpu, false, false);
    14759 #ifdef IEM_WITH_CODE_TLB
    14760         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    14761         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    14762         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    14763         pVCpu->iem.s.offCurInstrStart = 0;
    14764         pVCpu->iem.s.offInstrNextByte = 0;
    14765 #else
    14766         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    14767         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    14768 #endif
    14769         rcStrict = VINF_SUCCESS;
    14770     }
    14771     else
    14772         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
    14773     if (rcStrict == VINF_SUCCESS)
    14774         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
    14775     else if (pVCpu->iem.s.cActiveMappings > 0)
    14776         iemMemRollback(pVCpu);
    14777 
    14778     return rcStrict;
    14779 }
    14780 
    14781 
    14782 VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
    14783 {
    14784     AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
    14785 
    14786     uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
    14787     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
    14788     if (rcStrict == VINF_SUCCESS)
    14789     {
    14790         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
    14791         if (pcbWritten)
    14792             *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
    14793     }
    14794     else if (pVCpu->iem.s.cActiveMappings > 0)
    14795         iemMemRollback(pVCpu);
    14796 
    14797     return rcStrict;
    14798 }
    14799 
    14800 
    14801 VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
    14802                                                          const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    14803 {
    14804     AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
    14805 
    14806     VBOXSTRICTRC rcStrict;
    14807     if (   cbOpcodeBytes
    14808         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    14809     {
    14810         iemInitDecoder(pVCpu, true, false);
    14811 #ifdef IEM_WITH_CODE_TLB
    14812         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    14813         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    14814         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    14815         pVCpu->iem.s.offCurInstrStart = 0;
    14816         pVCpu->iem.s.offInstrNextByte = 0;
    14817 #else
    14818         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    14819         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    14820 #endif
    14821         rcStrict = VINF_SUCCESS;
    14822     }
    14823     else
    14824         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
    14825     if (rcStrict == VINF_SUCCESS)
    14826         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
    14827     else if (pVCpu->iem.s.cActiveMappings > 0)
    14828         iemMemRollback(pVCpu);
    14829 
    14830     return rcStrict;
    14831 }
    14832 
    14833 
    14834 /**
    14835  * For debugging DISGetParamSize, may come in handy.
    14836  *
    14837  * @returns Strict VBox status code.
    14838  * @param   pVCpu           The cross context virtual CPU structure of the
    14839  *                          calling EMT.
    14840  * @param   pCtxCore        The context core structure.
    14841  * @param   OpcodeBytesPC   The PC of the opcode bytes.
    14842  * @param   pvOpcodeBytes   Prefeched opcode bytes.
    14843  * @param   cbOpcodeBytes   Number of prefetched bytes.
    14844  * @param   pcbWritten      Where to return the number of bytes written.
    14845  *                          Optional.
    14846  */
    14847 VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
    14848                                                                 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
    14849                                                                 uint32_t *pcbWritten)
    14850 {
    14851     AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
    14852 
    14853     uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
    14854     VBOXSTRICTRC rcStrict;
    14855     if (   cbOpcodeBytes
    14856         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    14857     {
    14858         iemInitDecoder(pVCpu, true, false);
    14859 #ifdef IEM_WITH_CODE_TLB
    14860         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    14861         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    14862         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    14863         pVCpu->iem.s.offCurInstrStart = 0;
    14864         pVCpu->iem.s.offInstrNextByte = 0;
    14865 #else
    14866         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    14867         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    14868 #endif
    14869         rcStrict = VINF_SUCCESS;
    14870     }
    14871     else
    14872         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
    14873     if (rcStrict == VINF_SUCCESS)
    14874     {
    14875         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
    14876         if (pcbWritten)
    14877             *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
    14878     }
    14879     else if (pVCpu->iem.s.cActiveMappings > 0)
    14880         iemMemRollback(pVCpu);
    14881 
    14882     return rcStrict;
    14883 }
    14884 
    14885 
    14886 /**
    14887  * For handling split cacheline lock operations when the host has split-lock
    14888  * detection enabled.
    14889  *
    14890  * This will cause the interpreter to disregard the lock prefix and implicit
    14891  * locking (xchg).
    14892  *
    14893  * @returns Strict VBox status code.
    14894  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    14895  */
    14896 VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
    14897 {
    14898     /*
    14899      * Do the decoding and emulation.
    14900      */
    14901     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
    14902     if (rcStrict == VINF_SUCCESS)
    14903         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
    14904     else if (pVCpu->iem.s.cActiveMappings > 0)
    14905         iemMemRollback(pVCpu);
    14906 
    14907     if (rcStrict != VINF_SUCCESS)
    14908         LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    14909                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    14910     return rcStrict;
    14911 }
    14912 
    14913 
    14914 VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
    14915 {
    14916     uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
    14917     AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
    14918 
    14919     /*
    14920      * See if there is an interrupt pending in TRPM, inject it if we can.
    14921      */
    14922     /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
    14923 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    14924     bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
    14925     if (fIntrEnabled)
    14926     {
    14927         if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
    14928             fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    14929         else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    14930             fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
    14931         else
    14932         {
    14933             Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
    14934             fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
    14935         }
    14936     }
    14937 #else
    14938     bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    14939 #endif
    14940 
    14941     /** @todo What if we are injecting an exception and not an interrupt? Is that
    14942      *        possible here? For now we assert it is indeed only an interrupt. */
    14943     if (   fIntrEnabled
    14944         && TRPMHasTrap(pVCpu)
    14945         && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
    14946     {
    14947         uint8_t     u8TrapNo;
    14948         TRPMEVENT   enmType;
    14949         uint32_t    uErrCode;
    14950         RTGCPTR     uCr2;
    14951         int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
    14952         AssertRC(rc2);
    14953         Assert(enmType == TRPM_HARDWARE_INT);
    14954         VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
    14955         TRPMResetTrap(pVCpu);
    14956 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    14957         /* Injecting an event may cause a VM-exit. */
    14958         if (   rcStrict != VINF_SUCCESS
    14959             && rcStrict != VINF_IEM_RAISED_XCPT)
    14960             return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    14961 #else
    14962         NOREF(rcStrict);
    14963 #endif
    14964     }
    14965 
    14966     /*
    14967      * Initial decoder init w/ prefetch, then setup setjmp.
    14968      */
    14969     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
    14970     if (rcStrict == VINF_SUCCESS)
    14971     {
    14972 #ifdef IEM_WITH_SETJMP
    14973         jmp_buf         JmpBuf;
    14974         jmp_buf        *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
    14975         pVCpu->iem.s.CTX_SUFF(pJmpBuf)   = &JmpBuf;
    14976         pVCpu->iem.s.cActiveMappings     = 0;
    14977         if ((rcStrict = setjmp(JmpBuf)) == 0)
    14978 #endif
    14979         {
    14980             /*
    14981              * The run loop.  We limit ourselves to 4096 instructions right now.
    14982              */
    14983             uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
    14984             PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    14985             for (;;)
    14986             {
    14987                 /*
    14988                  * Log the state.
    14989                  */
    14990 #ifdef LOG_ENABLED
    14991                 iemLogCurInstr(pVCpu, true, "IEMExecLots");
    14992 #endif
    14993 
    14994                 /*
    14995                  * Do the decoding and emulation.
    14996                  */
    14997                 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
    14998                 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
    14999                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15000                 {
    15001                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    15002                     pVCpu->iem.s.cInstructions++;
    15003                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    15004                     {
    15005                         uint64_t fCpu = pVCpu->fLocalForcedActions
    15006                                       & ( VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    15007                                                                 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    15008                                                                 | VMCPU_FF_TLB_FLUSH
    15009                                                                 | VMCPU_FF_INHIBIT_INTERRUPTS
    15010                                                                 | VMCPU_FF_BLOCK_NMIS
    15011                                                                 | VMCPU_FF_UNHALT ));
    15012 
    15013                         if (RT_LIKELY(   (   !fCpu
    15014                                           || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    15015                                               && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
    15016                                       && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
    15017                         {
    15018                             if (cMaxInstructionsGccStupidity-- > 0)
    15019                             {
    15020                                 /* Poll timers every now an then according to the caller's specs. */
    15021                                 if (   (cMaxInstructionsGccStupidity & cPollRate) != 0
    15022                                     || !TMTimerPollBool(pVM, pVCpu))
    15023                                 {
    15024                                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    15025                                     iemReInitDecoder(pVCpu);
    15026                                     continue;
    15027                                 }
    15028                             }
    15029                         }
    15030                     }
    15031                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    15032                 }
    15033                 else if (pVCpu->iem.s.cActiveMappings > 0)
    15034                         iemMemRollback(pVCpu);
    15035                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    15036                 break;
    15037             }
    15038         }
    15039 #ifdef IEM_WITH_SETJMP
    15040         else
    15041         {
    15042             if (pVCpu->iem.s.cActiveMappings > 0)
    15043                 iemMemRollback(pVCpu);
    15044 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    15045             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    15046 # endif
    15047             pVCpu->iem.s.cLongJumps++;
    15048         }
    15049         pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
    15050 #endif
    15051 
    15052         /*
    15053          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    15054          */
    15055         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    15056         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    15057     }
    15058     else
    15059     {
    15060         if (pVCpu->iem.s.cActiveMappings > 0)
    15061             iemMemRollback(pVCpu);
    15062 
    15063 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    15064         /*
    15065          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    15066          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    15067          */
    15068         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    15069 #endif
    15070     }
    15071 
    15072     /*
    15073      * Maybe re-enter raw-mode and log.
    15074      */
    15075     if (rcStrict != VINF_SUCCESS)
    15076         LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    15077                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    15078     if (pcInstructions)
    15079         *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
    15080     return rcStrict;
    15081 }
    15082 
    15083 
    15084 /**
    15085  * Interface used by EMExecuteExec, does exit statistics and limits.
    15086  *
    15087  * @returns Strict VBox status code.
    15088  * @param   pVCpu               The cross context virtual CPU structure.
    15089  * @param   fWillExit           To be defined.
    15090  * @param   cMinInstructions    Minimum number of instructions to execute before checking for FFs.
    15091  * @param   cMaxInstructions    Maximum number of instructions to execute.
    15092  * @param   cMaxInstructionsWithoutExits
    15093  *                              The max number of instructions without exits.
    15094  * @param   pStats              Where to return statistics.
    15095  */
    15096 VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
    15097                                       uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
    15098 {
    15099     NOREF(fWillExit); /** @todo define flexible exit crits */
    15100 
    15101     /*
    15102      * Initialize return stats.
    15103      */
    15104     pStats->cInstructions    = 0;
    15105     pStats->cExits           = 0;
    15106     pStats->cMaxExitDistance = 0;
    15107     pStats->cReserved        = 0;
    15108 
    15109     /*
    15110      * Initial decoder init w/ prefetch, then setup setjmp.
    15111      */
    15112     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
    15113     if (rcStrict == VINF_SUCCESS)
    15114     {
    15115 #ifdef IEM_WITH_SETJMP
    15116         jmp_buf         JmpBuf;
    15117         jmp_buf        *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
    15118         pVCpu->iem.s.CTX_SUFF(pJmpBuf)   = &JmpBuf;
    15119         pVCpu->iem.s.cActiveMappings     = 0;
    15120         if ((rcStrict = setjmp(JmpBuf)) == 0)
    15121 #endif
    15122         {
    15123 #ifdef IN_RING0
    15124             bool const fCheckPreemptionPending   = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    15125 #endif
    15126             uint32_t   cInstructionSinceLastExit = 0;
    15127 
    15128             /*
    15129              * The run loop.  We limit ourselves to 4096 instructions right now.
    15130              */
    15131             PVM pVM = pVCpu->CTX_SUFF(pVM);
    15132             for (;;)
    15133             {
    15134                 /*
    15135                  * Log the state.
    15136                  */
    15137 #ifdef LOG_ENABLED
    15138                 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
    15139 #endif
    15140 
    15141                 /*
    15142                  * Do the decoding and emulation.
    15143                  */
    15144                 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
    15145 
    15146                 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
    15147                 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
    15148 
    15149                 if (   cPotentialExits != pVCpu->iem.s.cPotentialExits
    15150                     && cInstructionSinceLastExit > 0 /* don't count the first */ )
    15151                 {
    15152                     pStats->cExits += 1;
    15153                     if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
    15154                         pStats->cMaxExitDistance = cInstructionSinceLastExit;
    15155                     cInstructionSinceLastExit = 0;
    15156                 }
    15157 
    15158                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    15159                 {
    15160                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    15161                     pVCpu->iem.s.cInstructions++;
    15162                     pStats->cInstructions++;
    15163                     cInstructionSinceLastExit++;
    15164                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    15165                     {
    15166                         uint64_t fCpu = pVCpu->fLocalForcedActions
    15167                                       & ( VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    15168                                                                 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    15169                                                                 | VMCPU_FF_TLB_FLUSH
    15170                                                                 | VMCPU_FF_INHIBIT_INTERRUPTS
    15171                                                                 | VMCPU_FF_BLOCK_NMIS
    15172                                                                 | VMCPU_FF_UNHALT ));
    15173 
    15174                         if (RT_LIKELY(   (   (   !fCpu
    15175                                               || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    15176                                                   && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
    15177                                           && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
    15178                                       || pStats->cInstructions < cMinInstructions))
    15179                         {
    15180                             if (pStats->cInstructions < cMaxInstructions)
    15181                             {
    15182                                 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
    15183                                 {
    15184 #ifdef IN_RING0
    15185                                     if (   !fCheckPreemptionPending
    15186                                         || !RTThreadPreemptIsPending(NIL_RTTHREAD))
    15187 #endif
    15188                                     {
    15189                                         Assert(pVCpu->iem.s.cActiveMappings == 0);
    15190                                         iemReInitDecoder(pVCpu);
    15191                                         continue;
    15192                                     }
    15193 #ifdef IN_RING0
    15194                                     rcStrict = VINF_EM_RAW_INTERRUPT;
    15195                                     break;
    15196 #endif
    15197                                 }
    15198                             }
    15199                         }
    15200                         Assert(!(fCpu & VMCPU_FF_IEM));
    15201                     }
    15202                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    15203                 }
    15204                 else if (pVCpu->iem.s.cActiveMappings > 0)
    15205                         iemMemRollback(pVCpu);
    15206                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    15207                 break;
    15208             }
    15209         }
    15210 #ifdef IEM_WITH_SETJMP
    15211         else
    15212         {
    15213             if (pVCpu->iem.s.cActiveMappings > 0)
    15214                 iemMemRollback(pVCpu);
    15215             pVCpu->iem.s.cLongJumps++;
    15216         }
    15217         pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
    15218 #endif
    15219 
    15220         /*
    15221          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    15222          */
    15223         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    15224         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    15225     }
    15226     else
    15227     {
    15228         if (pVCpu->iem.s.cActiveMappings > 0)
    15229             iemMemRollback(pVCpu);
    15230 
    15231 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    15232         /*
    15233          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    15234          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    15235          */
    15236         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    15237 #endif
    15238     }
    15239 
    15240     /*
    15241      * Maybe re-enter raw-mode and log.
    15242      */
    15243     if (rcStrict != VINF_SUCCESS)
    15244         LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
    15245                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
    15246                  pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
    15247     return rcStrict;
    15248 }
    15249 
    15250 
    15251 /**
    15252  * Injects a trap, fault, abort, software interrupt or external interrupt.
    15253  *
    15254  * The parameter list matches TRPMQueryTrapAll pretty closely.
    15255  *
    15256  * @returns Strict VBox status code.
    15257  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    15258  * @param   u8TrapNo            The trap number.
    15259  * @param   enmType             What type is it (trap/fault/abort), software
    15260  *                              interrupt or hardware interrupt.
    15261  * @param   uErrCode            The error code if applicable.
    15262  * @param   uCr2                The CR2 value if applicable.
    15263  * @param   cbInstr             The instruction length (only relevant for
    15264  *                              software interrupts).
    15265  */
    15266 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
    15267                                          uint8_t cbInstr)
    15268 {
    15269     iemInitDecoder(pVCpu, false, false);
    15270 #ifdef DBGFTRACE_ENABLED
    15271     RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
    15272                       u8TrapNo, enmType, uErrCode, uCr2);
    15273 #endif
    15274 
    15275     uint32_t fFlags;
    15276     switch (enmType)
    15277     {
    15278         case TRPM_HARDWARE_INT:
    15279             Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
    15280             fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
    15281             uErrCode = uCr2 = 0;
    15282             break;
    15283 
    15284         case TRPM_SOFTWARE_INT:
    15285             Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
    15286             fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    15287             uErrCode = uCr2 = 0;
    15288             break;
    15289 
    15290         case TRPM_TRAP:
    15291             Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
    15292             fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
    15293             if (u8TrapNo == X86_XCPT_PF)
    15294                 fFlags |= IEM_XCPT_FLAGS_CR2;
    15295             switch (u8TrapNo)
    15296             {
    15297                 case X86_XCPT_DF:
    15298                 case X86_XCPT_TS:
    15299                 case X86_XCPT_NP:
    15300                 case X86_XCPT_SS:
    15301                 case X86_XCPT_PF:
    15302                 case X86_XCPT_AC:
    15303                 case X86_XCPT_GP:
    15304                     fFlags |= IEM_XCPT_FLAGS_ERR;
    15305                     break;
    15306             }
    15307             break;
    15308 
    15309         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    15310     }
    15311 
    15312     VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
    15313 
    15314     if (pVCpu->iem.s.cActiveMappings > 0)
    15315         iemMemRollback(pVCpu);
    15316 
    15317     return rcStrict;
    15318 }
    15319 
    15320 
    15321 /**
    15322  * Injects the active TRPM event.
    15323  *
    15324  * @returns Strict VBox status code.
    15325  * @param   pVCpu               The cross context virtual CPU structure.
    15326  */
    15327 VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
    15328 {
    15329 #ifndef IEM_IMPLEMENTS_TASKSWITCH
    15330     IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
    15331 #else
    15332     uint8_t     u8TrapNo;
    15333     TRPMEVENT   enmType;
    15334     uint32_t    uErrCode;
    15335     RTGCUINTPTR uCr2;
    15336     uint8_t     cbInstr;
    15337     int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
    15338     if (RT_FAILURE(rc))
    15339         return rc;
    15340 
    15341     /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
    15342      *        ICEBP \#DB injection as a special case. */
    15343     VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
    15344 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    15345     if (rcStrict == VINF_SVM_VMEXIT)
    15346         rcStrict = VINF_SUCCESS;
    15347 #endif
    15348 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    15349     if (rcStrict == VINF_VMX_VMEXIT)
    15350         rcStrict = VINF_SUCCESS;
    15351 #endif
    15352     /** @todo Are there any other codes that imply the event was successfully
    15353      *        delivered to the guest? See @bugref{6607}.  */
    15354     if (   rcStrict == VINF_SUCCESS
    15355         || rcStrict == VINF_IEM_RAISED_XCPT)
    15356         TRPMResetTrap(pVCpu);
    15357 
    15358     return rcStrict;
    15359 #endif
    15360 }
    15361 
    15362 
    15363 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
    15364 {
    15365     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    15366     return VERR_NOT_IMPLEMENTED;
    15367 }
    15368 
    15369 
    15370 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
    15371 {
    15372     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    15373     return VERR_NOT_IMPLEMENTED;
    15374 }
    15375 
    15376 
    15377 #if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
    15378 /**
    15379  * Executes a IRET instruction with default operand size.
    15380  *
    15381  * This is for PATM.
    15382  *
    15383  * @returns VBox status code.
    15384  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    15385  * @param   pCtxCore            The register frame.
    15386  */
    15387 VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
    15388 {
    15389     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    15390 
    15391     iemCtxCoreToCtx(pCtx, pCtxCore);
    15392     iemInitDecoder(pVCpu);
    15393     VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
    15394     if (rcStrict == VINF_SUCCESS)
    15395         iemCtxToCtxCore(pCtxCore, pCtx);
    15396     else
    15397         LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    15398                  pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    15399     return rcStrict;
    15400 }
    15401 #endif
    15402 
    15403 
    15404 /**
    15405  * Macro used by the IEMExec* method to check the given instruction length.
    15406  *
    15407  * Will return on failure!
    15408  *
    15409  * @param   a_cbInstr   The given instruction length.
    15410  * @param   a_cbMin     The minimum length.
    15411  */
    15412 #define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
    15413     AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
    15414                     ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
    15415 
    15416 
    15417 /**
    15418  * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
    15419  *
    15420  * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
    15421  *
    15422  * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
    15423  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    15424  * @param   rcStrict    The status code to fiddle.
    15425  */
    15426 DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    15427 {
    15428     iemUninitExec(pVCpu);
    15429     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    15430 }
    15431 
    15432 
    15433 /**
    15434  * Interface for HM and EM for executing string I/O OUT (write) instructions.
    15435  *
    15436  * This API ASSUMES that the caller has already verified that the guest code is
    15437  * allowed to access the I/O port.  (The I/O port is in the DX register in the
    15438  * guest state.)
    15439  *
    15440  * @returns Strict VBox status code.
    15441  * @param   pVCpu               The cross context virtual CPU structure.
    15442  * @param   cbValue             The size of the I/O port access (1, 2, or 4).
    15443  * @param   enmAddrMode         The addressing mode.
    15444  * @param   fRepPrefix          Indicates whether a repeat prefix is used
    15445  *                              (doesn't matter which for this instruction).
    15446  * @param   cbInstr             The instruction length in bytes.
    15447  * @param   iEffSeg             The effective segment address.
    15448  * @param   fIoChecked          Whether the access to the I/O port has been
    15449  *                              checked or not.  It's typically checked in the
    15450  *                              HM scenario.
    15451  */
    15452 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
    15453                                                 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
    15454 {
    15455     AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
    15456     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
    15457 
    15458     /*
    15459      * State init.
    15460      */
    15461     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15462 
    15463     /*
    15464      * Switch orgy for getting to the right handler.
    15465      */
    15466     VBOXSTRICTRC rcStrict;
    15467     if (fRepPrefix)
    15468     {
    15469         switch (enmAddrMode)
    15470         {
    15471             case IEMMODE_16BIT:
    15472                 switch (cbValue)
    15473                 {
    15474                     case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15475                     case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15476                     case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15477                     default:
    15478                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15479                 }
    15480                 break;
    15481 
    15482             case IEMMODE_32BIT:
    15483                 switch (cbValue)
    15484                 {
    15485                     case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15486                     case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15487                     case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15488                     default:
    15489                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15490                 }
    15491                 break;
    15492 
    15493             case IEMMODE_64BIT:
    15494                 switch (cbValue)
    15495                 {
    15496                     case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15497                     case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15498                     case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15499                     default:
    15500                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15501                 }
    15502                 break;
    15503 
    15504             default:
    15505                 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
    15506         }
    15507     }
    15508     else
    15509     {
    15510         switch (enmAddrMode)
    15511         {
    15512             case IEMMODE_16BIT:
    15513                 switch (cbValue)
    15514                 {
    15515                     case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15516                     case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15517                     case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15518                     default:
    15519                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15520                 }
    15521                 break;
    15522 
    15523             case IEMMODE_32BIT:
    15524                 switch (cbValue)
    15525                 {
    15526                     case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15527                     case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15528                     case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15529                     default:
    15530                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15531                 }
    15532                 break;
    15533 
    15534             case IEMMODE_64BIT:
    15535                 switch (cbValue)
    15536                 {
    15537                     case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15538                     case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15539                     case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
    15540                     default:
    15541                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15542                 }
    15543                 break;
    15544 
    15545             default:
    15546                 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
    15547         }
    15548     }
    15549 
    15550     if (pVCpu->iem.s.cActiveMappings)
    15551         iemMemRollback(pVCpu);
    15552 
    15553     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15554 }
    15555 
    15556 
    15557 /**
    15558  * Interface for HM and EM for executing string I/O IN (read) instructions.
    15559  *
    15560  * This API ASSUMES that the caller has already verified that the guest code is
    15561  * allowed to access the I/O port.  (The I/O port is in the DX register in the
    15562  * guest state.)
    15563  *
    15564  * @returns Strict VBox status code.
    15565  * @param   pVCpu               The cross context virtual CPU structure.
    15566  * @param   cbValue             The size of the I/O port access (1, 2, or 4).
    15567  * @param   enmAddrMode         The addressing mode.
    15568  * @param   fRepPrefix          Indicates whether a repeat prefix is used
    15569  *                              (doesn't matter which for this instruction).
    15570  * @param   cbInstr             The instruction length in bytes.
    15571  * @param   fIoChecked          Whether the access to the I/O port has been
    15572  *                              checked or not.  It's typically checked in the
    15573  *                              HM scenario.
    15574  */
    15575 VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
    15576                                                bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
    15577 {
    15578     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
    15579 
    15580     /*
    15581      * State init.
    15582      */
    15583     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15584 
    15585     /*
    15586      * Switch orgy for getting to the right handler.
    15587      */
    15588     VBOXSTRICTRC rcStrict;
    15589     if (fRepPrefix)
    15590     {
    15591         switch (enmAddrMode)
    15592         {
    15593             case IEMMODE_16BIT:
    15594                 switch (cbValue)
    15595                 {
    15596                     case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
    15597                     case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
    15598                     case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
    15599                     default:
    15600                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15601                 }
    15602                 break;
    15603 
    15604             case IEMMODE_32BIT:
    15605                 switch (cbValue)
    15606                 {
    15607                     case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
    15608                     case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
    15609                     case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
    15610                     default:
    15611                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15612                 }
    15613                 break;
    15614 
    15615             case IEMMODE_64BIT:
    15616                 switch (cbValue)
    15617                 {
    15618                     case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
    15619                     case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
    15620                     case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
    15621                     default:
    15622                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15623                 }
    15624                 break;
    15625 
    15626             default:
    15627                 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
    15628         }
    15629     }
    15630     else
    15631     {
    15632         switch (enmAddrMode)
    15633         {
    15634             case IEMMODE_16BIT:
    15635                 switch (cbValue)
    15636                 {
    15637                     case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
    15638                     case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
    15639                     case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
    15640                     default:
    15641                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15642                 }
    15643                 break;
    15644 
    15645             case IEMMODE_32BIT:
    15646                 switch (cbValue)
    15647                 {
    15648                     case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
    15649                     case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
    15650                     case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
    15651                     default:
    15652                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15653                 }
    15654                 break;
    15655 
    15656             case IEMMODE_64BIT:
    15657                 switch (cbValue)
    15658                 {
    15659                     case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
    15660                     case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
    15661                     case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
    15662                     default:
    15663                         AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
    15664                 }
    15665                 break;
    15666 
    15667             default:
    15668                 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
    15669         }
    15670     }
    15671 
    15672     if (   pVCpu->iem.s.cActiveMappings == 0
    15673         || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
    15674     { /* likely */ }
    15675     else
    15676     {
    15677         AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
    15678         iemMemRollback(pVCpu);
    15679     }
    15680     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15681 }
    15682 
    15683 
    15684 /**
    15685  * Interface for rawmode to write execute an OUT instruction.
    15686  *
    15687  * @returns Strict VBox status code.
    15688  * @param   pVCpu       The cross context virtual CPU structure.
    15689  * @param   cbInstr     The instruction length in bytes.
    15690  * @param   u16Port     The port to read.
    15691  * @param   fImm        Whether the port is specified using an immediate operand or
    15692  *                      using the implicit DX register.
    15693  * @param   cbReg       The register size.
    15694  *
    15695  * @remarks In ring-0 not all of the state needs to be synced in.
    15696  */
    15697 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
    15698 {
    15699     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
    15700     Assert(cbReg <= 4 && cbReg != 3);
    15701 
    15702     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15703     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
    15704     Assert(!pVCpu->iem.s.cActiveMappings);
    15705     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15706 }
    15707 
    15708 
    15709 /**
    15710  * Interface for rawmode to write execute an IN instruction.
    15711  *
    15712  * @returns Strict VBox status code.
    15713  * @param   pVCpu       The cross context virtual CPU structure.
    15714  * @param   cbInstr     The instruction length in bytes.
    15715  * @param   u16Port     The port to read.
    15716  * @param   fImm        Whether the port is specified using an immediate operand or
    15717  *                      using the implicit DX.
    15718  * @param   cbReg       The register size.
    15719  */
    15720 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
    15721 {
    15722     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
    15723     Assert(cbReg <= 4 && cbReg != 3);
    15724 
    15725     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15726     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
    15727     Assert(!pVCpu->iem.s.cActiveMappings);
    15728     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15729 }
    15730 
    15731 
    15732 /**
    15733  * Interface for HM and EM to write to a CRx register.
    15734  *
    15735  * @returns Strict VBox status code.
    15736  * @param   pVCpu       The cross context virtual CPU structure.
    15737  * @param   cbInstr     The instruction length in bytes.
    15738  * @param   iCrReg      The control register number (destination).
    15739  * @param   iGReg       The general purpose register number (source).
    15740  *
    15741  * @remarks In ring-0 not all of the state needs to be synced in.
    15742  */
    15743 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
    15744 {
    15745     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15746     Assert(iCrReg < 16);
    15747     Assert(iGReg < 16);
    15748 
    15749     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15750     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
    15751     Assert(!pVCpu->iem.s.cActiveMappings);
    15752     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15753 }
    15754 
    15755 
    15756 /**
    15757  * Interface for HM and EM to read from a CRx register.
    15758  *
    15759  * @returns Strict VBox status code.
    15760  * @param   pVCpu       The cross context virtual CPU structure.
    15761  * @param   cbInstr     The instruction length in bytes.
    15762  * @param   iGReg       The general purpose register number (destination).
    15763  * @param   iCrReg      The control register number (source).
    15764  *
    15765  * @remarks In ring-0 not all of the state needs to be synced in.
    15766  */
    15767 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
    15768 {
    15769     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15770     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
    15771                         | CPUMCTX_EXTRN_APIC_TPR);
    15772     Assert(iCrReg < 16);
    15773     Assert(iGReg < 16);
    15774 
    15775     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15776     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
    15777     Assert(!pVCpu->iem.s.cActiveMappings);
    15778     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15779 }
    15780 
    15781 
    15782 /**
    15783  * Interface for HM and EM to clear the CR0[TS] bit.
    15784  *
    15785  * @returns Strict VBox status code.
    15786  * @param   pVCpu       The cross context virtual CPU structure.
    15787  * @param   cbInstr     The instruction length in bytes.
    15788  *
    15789  * @remarks In ring-0 not all of the state needs to be synced in.
    15790  */
    15791 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
    15792 {
    15793     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15794 
    15795     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15796     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
    15797     Assert(!pVCpu->iem.s.cActiveMappings);
    15798     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15799 }
    15800 
    15801 
    15802 /**
    15803  * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
    15804  *
    15805  * @returns Strict VBox status code.
    15806  * @param   pVCpu           The cross context virtual CPU structure.
    15807  * @param   cbInstr         The instruction length in bytes.
    15808  * @param   uValue          The value to load into CR0.
    15809  * @param   GCPtrEffDst     The guest-linear address if the LMSW instruction has a
    15810  *                          memory operand. Otherwise pass NIL_RTGCPTR.
    15811  *
    15812  * @remarks In ring-0 not all of the state needs to be synced in.
    15813  */
    15814 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
    15815 {
    15816     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    15817 
    15818     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15819     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
    15820     Assert(!pVCpu->iem.s.cActiveMappings);
    15821     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15822 }
    15823 
    15824 
    15825 /**
    15826  * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
    15827  *
    15828  * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
    15829  *
    15830  * @returns Strict VBox status code.
    15831  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    15832  * @param   cbInstr     The instruction length in bytes.
    15833  * @remarks In ring-0 not all of the state needs to be synced in.
    15834  * @thread  EMT(pVCpu)
    15835  */
    15836 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
    15837 {
    15838     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    15839 
    15840     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15841     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
    15842     Assert(!pVCpu->iem.s.cActiveMappings);
    15843     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15844 }
    15845 
    15846 
    15847 /**
    15848  * Interface for HM and EM to emulate the WBINVD instruction.
    15849  *
    15850  * @returns Strict VBox status code.
    15851  * @param   pVCpu       The cross context virtual CPU structure.
    15852  * @param   cbInstr     The instruction length in bytes.
    15853  *
    15854  * @remarks In ring-0 not all of the state needs to be synced in.
    15855  */
    15856 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
    15857 {
    15858     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15859 
    15860     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15861     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
    15862     Assert(!pVCpu->iem.s.cActiveMappings);
    15863     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15864 }
    15865 
    15866 
    15867 /**
    15868  * Interface for HM and EM to emulate the INVD instruction.
    15869  *
    15870  * @returns Strict VBox status code.
    15871  * @param   pVCpu       The cross context virtual CPU structure.
    15872  * @param   cbInstr     The instruction length in bytes.
    15873  *
    15874  * @remarks In ring-0 not all of the state needs to be synced in.
    15875  */
    15876 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
    15877 {
    15878     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15879 
    15880     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15881     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
    15882     Assert(!pVCpu->iem.s.cActiveMappings);
    15883     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15884 }
    15885 
    15886 
    15887 /**
    15888  * Interface for HM and EM to emulate the INVLPG instruction.
    15889  *
    15890  * @returns Strict VBox status code.
    15891  * @retval  VINF_PGM_SYNC_CR3
    15892  *
    15893  * @param   pVCpu       The cross context virtual CPU structure.
    15894  * @param   cbInstr     The instruction length in bytes.
    15895  * @param   GCPtrPage   The effective address of the page to invalidate.
    15896  *
    15897  * @remarks In ring-0 not all of the state needs to be synced in.
    15898  */
    15899 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
    15900 {
    15901     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    15902 
    15903     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15904     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
    15905     Assert(!pVCpu->iem.s.cActiveMappings);
    15906     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15907 }
    15908 
    15909 
    15910 /**
    15911  * Interface for HM and EM to emulate the INVPCID instruction.
    15912  *
    15913  * @returns Strict VBox status code.
    15914  * @retval  VINF_PGM_SYNC_CR3
    15915  *
    15916  * @param   pVCpu       The cross context virtual CPU structure.
    15917  * @param   cbInstr     The instruction length in bytes.
    15918  * @param   iEffSeg     The effective segment register.
    15919  * @param   GCPtrDesc   The effective address of the INVPCID descriptor.
    15920  * @param   uType       The invalidation type.
    15921  *
    15922  * @remarks In ring-0 not all of the state needs to be synced in.
    15923  */
    15924 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
    15925                                                  uint64_t uType)
    15926 {
    15927     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
    15928 
    15929     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15930     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
    15931     Assert(!pVCpu->iem.s.cActiveMappings);
    15932     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15933 }
    15934 
    15935 
    15936 /**
    15937  * Interface for HM and EM to emulate the CPUID instruction.
    15938  *
    15939  * @returns Strict VBox status code.
    15940  *
    15941  * @param   pVCpu               The cross context virtual CPU structure.
    15942  * @param   cbInstr             The instruction length in bytes.
    15943  *
    15944  * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
    15945  */
    15946 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
    15947 {
    15948     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15949     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
    15950 
    15951     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15952     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
    15953     Assert(!pVCpu->iem.s.cActiveMappings);
    15954     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15955 }
    15956 
    15957 
    15958 /**
    15959  * Interface for HM and EM to emulate the RDPMC instruction.
    15960  *
    15961  * @returns Strict VBox status code.
    15962  *
    15963  * @param   pVCpu               The cross context virtual CPU structure.
    15964  * @param   cbInstr             The instruction length in bytes.
    15965  *
    15966  * @remarks Not all of the state needs to be synced in.
    15967  */
    15968 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
    15969 {
    15970     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15971     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
    15972 
    15973     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15974     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
    15975     Assert(!pVCpu->iem.s.cActiveMappings);
    15976     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    15977 }
    15978 
    15979 
    15980 /**
    15981  * Interface for HM and EM to emulate the RDTSC instruction.
    15982  *
    15983  * @returns Strict VBox status code.
    15984  * @retval  VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
    15985  *
    15986  * @param   pVCpu               The cross context virtual CPU structure.
    15987  * @param   cbInstr             The instruction length in bytes.
    15988  *
    15989  * @remarks Not all of the state needs to be synced in.
    15990  */
    15991 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
    15992 {
    15993     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    15994     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
    15995 
    15996     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    15997     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
    15998     Assert(!pVCpu->iem.s.cActiveMappings);
    15999     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16000 }
    16001 
    16002 
    16003 /**
    16004  * Interface for HM and EM to emulate the RDTSCP instruction.
    16005  *
    16006  * @returns Strict VBox status code.
    16007  * @retval  VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
    16008  *
    16009  * @param   pVCpu               The cross context virtual CPU structure.
    16010  * @param   cbInstr             The instruction length in bytes.
    16011  *
    16012  * @remarks Not all of the state needs to be synced in.  Recommended
    16013  *          to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
    16014  */
    16015 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
    16016 {
    16017     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16018     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
    16019 
    16020     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16021     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
    16022     Assert(!pVCpu->iem.s.cActiveMappings);
    16023     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16024 }
    16025 
    16026 
    16027 /**
    16028  * Interface for HM and EM to emulate the RDMSR instruction.
    16029  *
    16030  * @returns Strict VBox status code.
    16031  * @retval  VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
    16032  *
    16033  * @param   pVCpu               The cross context virtual CPU structure.
    16034  * @param   cbInstr             The instruction length in bytes.
    16035  *
    16036  * @remarks Not all of the state needs to be synced in.  Requires RCX and
    16037  *          (currently) all MSRs.
    16038  */
    16039 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
    16040 {
    16041     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    16042     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
    16043 
    16044     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16045     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
    16046     Assert(!pVCpu->iem.s.cActiveMappings);
    16047     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16048 }
    16049 
    16050 
    16051 /**
    16052  * Interface for HM and EM to emulate the WRMSR instruction.
    16053  *
    16054  * @returns Strict VBox status code.
    16055  * @retval  VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
    16056  *
    16057  * @param   pVCpu               The cross context virtual CPU structure.
    16058  * @param   cbInstr             The instruction length in bytes.
    16059  *
    16060  * @remarks Not all of the state needs to be synced in.  Requires RCX, RAX, RDX,
    16061  *          and (currently) all MSRs.
    16062  */
    16063 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
    16064 {
    16065     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
    16066     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
    16067                         | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
    16068 
    16069     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16070     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
    16071     Assert(!pVCpu->iem.s.cActiveMappings);
    16072     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16073 }
    16074 
    16075 
    16076 /**
    16077  * Interface for HM and EM to emulate the MONITOR instruction.
    16078  *
    16079  * @returns Strict VBox status code.
    16080  * @retval  VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
    16081  *
    16082  * @param   pVCpu               The cross context virtual CPU structure.
    16083  * @param   cbInstr             The instruction length in bytes.
    16084  *
    16085  * @remarks Not all of the state needs to be synced in.
    16086  * @remarks ASSUMES the default segment of DS and no segment override prefixes
    16087  *          are used.
    16088  */
    16089 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
    16090 {
    16091     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16092     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
    16093 
    16094     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16095     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
    16096     Assert(!pVCpu->iem.s.cActiveMappings);
    16097     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16098 }
    16099 
    16100 
    16101 /**
    16102  * Interface for HM and EM to emulate the MWAIT instruction.
    16103  *
    16104  * @returns Strict VBox status code.
    16105  * @retval  VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
    16106  *
    16107  * @param   pVCpu               The cross context virtual CPU structure.
    16108  * @param   cbInstr             The instruction length in bytes.
    16109  *
    16110  * @remarks Not all of the state needs to be synced in.
    16111  */
    16112 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
    16113 {
    16114     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16115     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
    16116 
    16117     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16118     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
    16119     Assert(!pVCpu->iem.s.cActiveMappings);
    16120     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16121 }
    16122 
    16123 
    16124 /**
    16125  * Interface for HM and EM to emulate the HLT instruction.
    16126  *
    16127  * @returns Strict VBox status code.
    16128  * @retval  VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
    16129  *
    16130  * @param   pVCpu               The cross context virtual CPU structure.
    16131  * @param   cbInstr             The instruction length in bytes.
    16132  *
    16133  * @remarks Not all of the state needs to be synced in.
    16134  */
    16135 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
    16136 {
    16137     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
    16138 
    16139     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16140     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
    16141     Assert(!pVCpu->iem.s.cActiveMappings);
    16142     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16143 }
    16144 
    16145 
    16146 /**
    16147  * Checks if IEM is in the process of delivering an event (interrupt or
    16148  * exception).
    16149  *
    16150  * @returns true if we're in the process of raising an interrupt or exception,
    16151  *          false otherwise.
    16152  * @param   pVCpu           The cross context virtual CPU structure.
    16153  * @param   puVector        Where to store the vector associated with the
    16154  *                          currently delivered event, optional.
    16155  * @param   pfFlags         Where to store th event delivery flags (see
    16156  *                          IEM_XCPT_FLAGS_XXX), optional.
    16157  * @param   puErr           Where to store the error code associated with the
    16158  *                          event, optional.
    16159  * @param   puCr2           Where to store the CR2 associated with the event,
    16160  *                          optional.
    16161  * @remarks The caller should check the flags to determine if the error code and
    16162  *          CR2 are valid for the event.
    16163  */
    16164 VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
    16165 {
    16166     bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
    16167     if (fRaisingXcpt)
    16168     {
    16169         if (puVector)
    16170             *puVector = pVCpu->iem.s.uCurXcpt;
    16171         if (pfFlags)
    16172             *pfFlags = pVCpu->iem.s.fCurXcpt;
    16173         if (puErr)
    16174             *puErr = pVCpu->iem.s.uCurXcptErr;
    16175         if (puCr2)
    16176             *puCr2 = pVCpu->iem.s.uCurXcptCr2;
    16177     }
    16178     return fRaisingXcpt;
    16179 }
    16180 
    16181 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    16182 
    16183 /**
    16184  * Interface for HM and EM to emulate the CLGI instruction.
    16185  *
    16186  * @returns Strict VBox status code.
    16187  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16188  * @param   cbInstr     The instruction length in bytes.
    16189  * @thread  EMT(pVCpu)
    16190  */
    16191 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
    16192 {
    16193     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16194 
    16195     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16196     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
    16197     Assert(!pVCpu->iem.s.cActiveMappings);
    16198     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16199 }
    16200 
    16201 
    16202 /**
    16203  * Interface for HM and EM to emulate the STGI instruction.
    16204  *
    16205  * @returns Strict VBox status code.
    16206  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16207  * @param   cbInstr     The instruction length in bytes.
    16208  * @thread  EMT(pVCpu)
    16209  */
    16210 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
    16211 {
    16212     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16213 
    16214     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16215     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
    16216     Assert(!pVCpu->iem.s.cActiveMappings);
    16217     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16218 }
    16219 
    16220 
    16221 /**
    16222  * Interface for HM and EM to emulate the VMLOAD instruction.
    16223  *
    16224  * @returns Strict VBox status code.
    16225  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16226  * @param   cbInstr     The instruction length in bytes.
    16227  * @thread  EMT(pVCpu)
    16228  */
    16229 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
    16230 {
    16231     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16232 
    16233     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16234     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
    16235     Assert(!pVCpu->iem.s.cActiveMappings);
    16236     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16237 }
    16238 
    16239 
    16240 /**
    16241  * Interface for HM and EM to emulate the VMSAVE instruction.
    16242  *
    16243  * @returns Strict VBox status code.
    16244  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16245  * @param   cbInstr     The instruction length in bytes.
    16246  * @thread  EMT(pVCpu)
    16247  */
    16248 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
    16249 {
    16250     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16251 
    16252     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16253     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
    16254     Assert(!pVCpu->iem.s.cActiveMappings);
    16255     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16256 }
    16257 
    16258 
    16259 /**
    16260  * Interface for HM and EM to emulate the INVLPGA instruction.
    16261  *
    16262  * @returns Strict VBox status code.
    16263  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16264  * @param   cbInstr     The instruction length in bytes.
    16265  * @thread  EMT(pVCpu)
    16266  */
    16267 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
    16268 {
    16269     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16270 
    16271     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16272     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
    16273     Assert(!pVCpu->iem.s.cActiveMappings);
    16274     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16275 }
    16276 
    16277 
    16278 /**
    16279  * Interface for HM and EM to emulate the VMRUN instruction.
    16280  *
    16281  * @returns Strict VBox status code.
    16282  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16283  * @param   cbInstr     The instruction length in bytes.
    16284  * @thread  EMT(pVCpu)
    16285  */
    16286 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
    16287 {
    16288     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16289     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
    16290 
    16291     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16292     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
    16293     Assert(!pVCpu->iem.s.cActiveMappings);
    16294     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16295 }
    16296 
    16297 
    16298 /**
    16299  * Interface for HM and EM to emulate \#VMEXIT.
    16300  *
    16301  * @returns Strict VBox status code.
    16302  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16303  * @param   uExitCode   The exit code.
    16304  * @param   uExitInfo1  The exit info. 1 field.
    16305  * @param   uExitInfo2  The exit info. 2 field.
    16306  * @thread  EMT(pVCpu)
    16307  */
    16308 VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
    16309 {
    16310     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
    16311     VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
    16312     if (pVCpu->iem.s.cActiveMappings)
    16313         iemMemRollback(pVCpu);
    16314     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16315 }
    16316 
    16317 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
    16318 
    16319 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    16320 
    16321 /**
    16322  * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
    16323  *
    16324  * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
    16325  * are performed. Bounds checks are strict builds only.
    16326  *
    16327  * @param   pVmcs           Pointer to the virtual VMCS.
    16328  * @param   u64VmcsField    The VMCS field.
    16329  * @param   pu64Dst         Where to store the VMCS value.
    16330  *
    16331  * @remarks May be called with interrupts disabled.
    16332  * @todo    This should probably be moved to CPUM someday.
    16333  */
    16334 VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
    16335 {
    16336     AssertPtr(pVmcs);
    16337     AssertPtr(pu64Dst);
    16338     iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
    16339 }
    16340 
    16341 
    16342 /**
    16343  * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
    16344  *
    16345  * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
    16346  * are performed. Bounds checks are strict builds only.
    16347  *
    16348  * @param   pVmcs           Pointer to the virtual VMCS.
    16349  * @param   u64VmcsField    The VMCS field.
    16350  * @param   u64Val          The value to write.
    16351  *
    16352  * @remarks May be called with interrupts disabled.
    16353  * @todo    This should probably be moved to CPUM someday.
    16354  */
    16355 VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
    16356 {
    16357     AssertPtr(pVmcs);
    16358     iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
    16359 }
    16360 
    16361 
    16362 /**
    16363  * Interface for HM and EM to virtualize x2APIC MSR accesses.
    16364  *
    16365  * @returns Strict VBox status code.
    16366  * @retval  VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
    16367  * @retval  VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
    16368  *          the x2APIC device.
    16369  * @retval  VERR_OUT_RANGE if the caller must raise \#GP(0).
    16370  *
    16371  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16372  * @param   idMsr       The MSR being read.
    16373  * @param   pu64Value   Pointer to the value being written or where to store the
    16374  *                      value being read.
    16375  * @param   fWrite      Whether this is an MSR write or read access.
    16376  * @thread  EMT(pVCpu)
    16377  */
    16378 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
    16379 {
    16380     Assert(pu64Value);
    16381 
    16382     VBOXSTRICTRC rcStrict;
    16383     if (fWrite)
    16384         rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
    16385     else
    16386         rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
    16387     Assert(!pVCpu->iem.s.cActiveMappings);
    16388     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16389 
    16390 }
    16391 
    16392 
    16393 /**
    16394  * Interface for HM and EM to virtualize memory-mapped APIC accesses.
    16395  *
    16396  * @returns Strict VBox status code.
    16397  * @retval  VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
    16398  * @retval  VINF_VMX_VMEXIT if the access causes a VM-exit.
    16399  *
    16400  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16401  * @param   pExitInfo       Pointer to the VM-exit information.
    16402  * @param   pExitEventInfo  Pointer to the VM-exit event information.
    16403  * @thread  EMT(pVCpu)
    16404  */
    16405 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
    16406 {
    16407     Assert(pExitInfo);
    16408     Assert(pExitEventInfo);
    16409     VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
    16410     Assert(!pVCpu->iem.s.cActiveMappings);
    16411     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16412 
    16413 }
    16414 
    16415 
    16416 /**
    16417  * Interface for HM and EM to perform an APIC-write emulation which may cause a
    16418  * VM-exit.
    16419  *
    16420  * @returns Strict VBox status code.
    16421  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16422  * @thread  EMT(pVCpu)
    16423  */
    16424 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
    16425 {
    16426     VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
    16427     Assert(!pVCpu->iem.s.cActiveMappings);
    16428     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16429 }
    16430 
    16431 
    16432 /**
    16433  * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
    16434  *
    16435  * @returns Strict VBox status code.
    16436  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16437  * @thread  EMT(pVCpu)
    16438  */
    16439 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
    16440 {
    16441     VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
    16442     Assert(!pVCpu->iem.s.cActiveMappings);
    16443     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16444 }
    16445 
    16446 
    16447 /**
    16448  * Interface for HM and EM to emulate VM-exit due to external interrupts.
    16449  *
    16450  * @returns Strict VBox status code.
    16451  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16452  * @param   uVector         The external interrupt vector (pass 0 if the external
    16453  *                          interrupt is still pending).
    16454  * @param   fIntPending     Whether the external interrupt is pending or
    16455  *                          acknowdledged in the interrupt controller.
    16456  * @thread  EMT(pVCpu)
    16457  */
    16458 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
    16459 {
    16460     VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
    16461     Assert(!pVCpu->iem.s.cActiveMappings);
    16462     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16463 }
    16464 
    16465 
    16466 /**
    16467  * Interface for HM and EM to emulate VM-exit due to exceptions.
    16468  *
    16469  * Exception includes NMIs, software exceptions (those generated by INT3 or
    16470  * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
    16471  *
    16472  * @returns Strict VBox status code.
    16473  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16474  * @param   pExitInfo       Pointer to the VM-exit information.
    16475  * @param   pExitEventInfo  Pointer to the VM-exit event information.
    16476  * @thread  EMT(pVCpu)
    16477  */
    16478 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
    16479 {
    16480     Assert(pExitInfo);
    16481     Assert(pExitEventInfo);
    16482     VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
    16483     Assert(!pVCpu->iem.s.cActiveMappings);
    16484     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16485 }
    16486 
    16487 
    16488 /**
    16489  * Interface for HM and EM to emulate VM-exit due to NMIs.
    16490  *
    16491  * @returns Strict VBox status code.
    16492  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16493  * @thread  EMT(pVCpu)
    16494  */
    16495 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
    16496 {
    16497     VMXVEXITINFO ExitInfo;
    16498     RT_ZERO(ExitInfo);
    16499     ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
    16500 
    16501     VMXVEXITEVENTINFO ExitEventInfo;
    16502     RT_ZERO(ExitEventInfo);
    16503     ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID,  1)
    16504                                | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE,   VMX_EXIT_INT_INFO_TYPE_NMI)
    16505                                | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
    16506 
    16507     VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
    16508     Assert(!pVCpu->iem.s.cActiveMappings);
    16509     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16510 }
    16511 
    16512 
    16513 /**
    16514  * Interface for HM and EM to emulate VM-exit due to a triple-fault.
    16515  *
    16516  * @returns Strict VBox status code.
    16517  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16518  * @thread  EMT(pVCpu)
    16519  */
    16520 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
    16521 {
    16522     VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
    16523     Assert(!pVCpu->iem.s.cActiveMappings);
    16524     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16525 }
    16526 
    16527 
    16528 /**
    16529  * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
    16530  *
    16531  * @returns Strict VBox status code.
    16532  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16533  * @param   uVector         The SIPI vector.
    16534  * @thread  EMT(pVCpu)
    16535  */
    16536 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
    16537 {
    16538     VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
    16539     Assert(!pVCpu->iem.s.cActiveMappings);
    16540     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16541 }
    16542 
    16543 
    16544 /**
    16545  * Interface for HM and EM to emulate a VM-exit.
    16546  *
    16547  * If a specialized version of a VM-exit handler exists, that must be used instead.
    16548  *
    16549  * @returns Strict VBox status code.
    16550  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16551  * @param   uExitReason     The VM-exit reason.
    16552  * @param   u64ExitQual     The Exit qualification.
    16553  * @thread  EMT(pVCpu)
    16554  */
    16555 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
    16556 {
    16557     VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
    16558     Assert(!pVCpu->iem.s.cActiveMappings);
    16559     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16560 }
    16561 
    16562 
    16563 /**
    16564  * Interface for HM and EM to emulate a VM-exit due to an instruction.
    16565  *
    16566  * This is meant to be used for those instructions that VMX provides additional
    16567  * decoding information beyond just the instruction length!
    16568  *
    16569  * @returns Strict VBox status code.
    16570  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16571  * @param   pExitInfo   Pointer to the VM-exit information.
    16572  * @thread  EMT(pVCpu)
    16573  */
    16574 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16575 {
    16576     VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
    16577     Assert(!pVCpu->iem.s.cActiveMappings);
    16578     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16579 }
    16580 
    16581 
    16582 /**
    16583  * Interface for HM and EM to emulate a VM-exit due to an instruction.
    16584  *
    16585  * This is meant to be used for those instructions that VMX provides only the
    16586  * instruction length.
    16587  *
    16588  * @returns Strict VBox status code.
    16589  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16590  * @param   pExitInfo   Pointer to the VM-exit information.
    16591  * @param   cbInstr     The instruction length in bytes.
    16592  * @thread  EMT(pVCpu)
    16593  */
    16594 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
    16595 {
    16596     VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
    16597     Assert(!pVCpu->iem.s.cActiveMappings);
    16598     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16599 }
    16600 
    16601 
    16602 /**
    16603  * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
    16604  * Virtualized-EOI, TPR-below threshold).
    16605  *
    16606  * @returns Strict VBox status code.
    16607  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16608  * @param   pExitInfo       Pointer to the VM-exit information.
    16609  * @thread  EMT(pVCpu)
    16610  */
    16611 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16612 {
    16613     Assert(pExitInfo);
    16614     VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
    16615     Assert(!pVCpu->iem.s.cActiveMappings);
    16616     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16617 }
    16618 
    16619 
    16620 /**
    16621  * Interface for HM and EM to emulate a VM-exit due to a task switch.
    16622  *
    16623  * @returns Strict VBox status code.
    16624  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16625  * @param   pExitInfo       Pointer to the VM-exit information.
    16626  * @param   pExitEventInfo  Pointer to the VM-exit event information.
    16627  * @thread  EMT(pVCpu)
    16628  */
    16629 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
    16630 {
    16631     Assert(pExitInfo);
    16632     Assert(pExitEventInfo);
    16633     Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
    16634     VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
    16635     Assert(!pVCpu->iem.s.cActiveMappings);
    16636     return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    16637 }
    16638 
    16639 
    16640 /**
    16641  * Interface for HM and EM to emulate the VMREAD instruction.
    16642  *
    16643  * @returns Strict VBox status code.
    16644  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16645  * @param   pExitInfo       Pointer to the VM-exit information.
    16646  * @thread  EMT(pVCpu)
    16647  */
    16648 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16649 {
    16650     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
    16651     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16652     Assert(pExitInfo);
    16653 
    16654     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16655 
    16656     VBOXSTRICTRC   rcStrict;
    16657     uint8_t const  cbInstr       = pExitInfo->cbInstr;
    16658     bool const     fIs64BitMode  = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
    16659     uint64_t const u64FieldEnc   = fIs64BitMode
    16660                                  ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
    16661                                  : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
    16662     if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
    16663     {
    16664         if (fIs64BitMode)
    16665         {
    16666             uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
    16667             rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
    16668         }
    16669         else
    16670         {
    16671             uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
    16672             rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
    16673         }
    16674     }
    16675     else
    16676     {
    16677         RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
    16678         uint8_t const iEffSeg  = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
    16679         rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
    16680     }
    16681     Assert(!pVCpu->iem.s.cActiveMappings);
    16682     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16683 }
    16684 
    16685 
    16686 /**
    16687  * Interface for HM and EM to emulate the VMWRITE instruction.
    16688  *
    16689  * @returns Strict VBox status code.
    16690  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16691  * @param   pExitInfo       Pointer to the VM-exit information.
    16692  * @thread  EMT(pVCpu)
    16693  */
    16694 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16695 {
    16696     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
    16697     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16698     Assert(pExitInfo);
    16699 
    16700     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16701 
    16702     uint64_t u64Val;
    16703     uint8_t  iEffSeg;
    16704     if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
    16705     {
    16706         u64Val  = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
    16707         iEffSeg = UINT8_MAX;
    16708     }
    16709     else
    16710     {
    16711         u64Val  = pExitInfo->GCPtrEffAddr;
    16712         iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
    16713     }
    16714     uint8_t const  cbInstr     = pExitInfo->cbInstr;
    16715     uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
    16716                                ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
    16717                                : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
    16718     VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
    16719     Assert(!pVCpu->iem.s.cActiveMappings);
    16720     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16721 }
    16722 
    16723 
    16724 /**
    16725  * Interface for HM and EM to emulate the VMPTRLD instruction.
    16726  *
    16727  * @returns Strict VBox status code.
    16728  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16729  * @param   pExitInfo       Pointer to the VM-exit information.
    16730  * @thread  EMT(pVCpu)
    16731  */
    16732 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16733 {
    16734     Assert(pExitInfo);
    16735     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
    16736     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16737 
    16738     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16739 
    16740     uint8_t const iEffSeg   = pExitInfo->InstrInfo.VmxXsave.iSegReg;
    16741     uint8_t const cbInstr   = pExitInfo->cbInstr;
    16742     RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
    16743     VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
    16744     Assert(!pVCpu->iem.s.cActiveMappings);
    16745     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16746 }
    16747 
    16748 
    16749 /**
    16750  * Interface for HM and EM to emulate the VMPTRST instruction.
    16751  *
    16752  * @returns Strict VBox status code.
    16753  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16754  * @param   pExitInfo       Pointer to the VM-exit information.
    16755  * @thread  EMT(pVCpu)
    16756  */
    16757 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16758 {
    16759     Assert(pExitInfo);
    16760     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
    16761     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16762 
    16763     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16764 
    16765     uint8_t const iEffSeg   = pExitInfo->InstrInfo.VmxXsave.iSegReg;
    16766     uint8_t const cbInstr   = pExitInfo->cbInstr;
    16767     RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
    16768     VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
    16769     Assert(!pVCpu->iem.s.cActiveMappings);
    16770     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16771 }
    16772 
    16773 
    16774 /**
    16775  * Interface for HM and EM to emulate the VMCLEAR instruction.
    16776  *
    16777  * @returns Strict VBox status code.
    16778  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16779  * @param   pExitInfo       Pointer to the VM-exit information.
    16780  * @thread  EMT(pVCpu)
    16781  */
    16782 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16783 {
    16784     Assert(pExitInfo);
    16785     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
    16786     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16787 
    16788     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16789 
    16790     uint8_t const iEffSeg   = pExitInfo->InstrInfo.VmxXsave.iSegReg;
    16791     uint8_t const cbInstr   = pExitInfo->cbInstr;
    16792     RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
    16793     VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
    16794     Assert(!pVCpu->iem.s.cActiveMappings);
    16795     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16796 }
    16797 
    16798 
    16799 /**
    16800  * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
    16801  *
    16802  * @returns Strict VBox status code.
    16803  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16804  * @param   cbInstr         The instruction length in bytes.
    16805  * @param   uInstrId        The instruction ID (VMXINSTRID_VMLAUNCH or
    16806  *                          VMXINSTRID_VMRESUME).
    16807  * @thread  EMT(pVCpu)
    16808  */
    16809 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
    16810 {
    16811     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16812     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
    16813 
    16814     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16815     VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
    16816     Assert(!pVCpu->iem.s.cActiveMappings);
    16817     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16818 }
    16819 
    16820 
    16821 /**
    16822  * Interface for HM and EM to emulate the VMXON instruction.
    16823  *
    16824  * @returns Strict VBox status code.
    16825  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16826  * @param   pExitInfo       Pointer to the VM-exit information.
    16827  * @thread  EMT(pVCpu)
    16828  */
    16829 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16830 {
    16831     Assert(pExitInfo);
    16832     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
    16833     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16834 
    16835     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16836 
    16837     uint8_t const iEffSeg    = pExitInfo->InstrInfo.VmxXsave.iSegReg;
    16838     uint8_t const cbInstr    = pExitInfo->cbInstr;
    16839     RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
    16840     VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
    16841     Assert(!pVCpu->iem.s.cActiveMappings);
    16842     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16843 }
    16844 
    16845 
    16846 /**
    16847  * Interface for HM and EM to emulate the VMXOFF instruction.
    16848  *
    16849  * @returns Strict VBox status code.
    16850  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    16851  * @param   cbInstr     The instruction length in bytes.
    16852  * @thread  EMT(pVCpu)
    16853  */
    16854 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
    16855 {
    16856     IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
    16857     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16858 
    16859     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16860     VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
    16861     Assert(!pVCpu->iem.s.cActiveMappings);
    16862     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16863 }
    16864 
    16865 
    16866 /**
    16867  * Interface for HM and EM to emulate the INVVPID instruction.
    16868  *
    16869  * @returns Strict VBox status code.
    16870  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16871  * @param   pExitInfo       Pointer to the VM-exit information.
    16872  * @thread  EMT(pVCpu)
    16873  */
    16874 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16875 {
    16876     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
    16877     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16878     Assert(pExitInfo);
    16879 
    16880     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16881 
    16882     uint8_t const  iEffSeg          = pExitInfo->InstrInfo.Inv.iSegReg;
    16883     uint8_t const  cbInstr          = pExitInfo->cbInstr;
    16884     RTGCPTR const  GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
    16885     uint64_t const u64InvvpidType   = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
    16886                                     ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
    16887                                     : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
    16888     VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
    16889     Assert(!pVCpu->iem.s.cActiveMappings);
    16890     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16891 }
    16892 
    16893 
    16894 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    16895 /**
    16896  * Interface for HM and EM to emulate the INVEPT instruction.
    16897  *
    16898  * @returns Strict VBox status code.
    16899  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16900  * @param   pExitInfo       Pointer to the VM-exit information.
    16901  * @thread  EMT(pVCpu)
    16902  */
    16903 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
    16904 {
    16905     IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
    16906     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16907     Assert(pExitInfo);
    16908 
    16909     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16910 
    16911     uint8_t const  iEffSeg          = pExitInfo->InstrInfo.Inv.iSegReg;
    16912     uint8_t const  cbInstr          = pExitInfo->cbInstr;
    16913     RTGCPTR const  GCPtrInveptDesc  = pExitInfo->GCPtrEffAddr;
    16914     uint64_t const u64InveptType    = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
    16915                                     ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
    16916                                     : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
    16917     VBOXSTRICTRC rcStrict = iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, u64InveptType, pExitInfo);
    16918     Assert(!pVCpu->iem.s.cActiveMappings);
    16919     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16920 }
    16921 
    16922 
    16923 /**
    16924  * Interface for HM and EM to emulate a VM-exit due to an EPT violation.
    16925  *
    16926  * @returns Strict VBox status code.
    16927  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16928  * @param   pExitInfo       Pointer to the VM-exit information.
    16929  * @param   pExitEventInfo  Pointer to the VM-exit event information.
    16930  * @thread  EMT(pVCpu)
    16931  */
    16932 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
    16933                                                         PCVMXVEXITEVENTINFO pExitEventInfo)
    16934 {
    16935     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16936 
    16937     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16938     VBOXSTRICTRC rcStrict = iemVmxVmexitEptViolationWithInfo(pVCpu, pExitInfo, pExitEventInfo);
    16939     Assert(!pVCpu->iem.s.cActiveMappings);
    16940     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16941 }
    16942 
    16943 
    16944 /**
    16945  * Interface for HM and EM to emulate a VM-exit due to an EPT misconfiguration.
    16946  *
    16947  * @returns Strict VBox status code.
    16948  * @param   pVCpu           The cross context virtual CPU structure of the calling EMT.
    16949  * @param   GCPhysAddr      The nested-guest physical address causing the EPT
    16950  *                          misconfiguration.
    16951  * @param   pExitEventInfo  Pointer to the VM-exit event information.
    16952  * @thread  EMT(pVCpu)
    16953  */
    16954 VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
    16955 {
    16956     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
    16957 
    16958     iemInitExec(pVCpu, false /*fBypassHandlers*/);
    16959     VBOXSTRICTRC rcStrict = iemVmxVmexitEptMisconfigWithInfo(pVCpu, GCPhysAddr, pExitEventInfo);
    16960     Assert(!pVCpu->iem.s.cActiveMappings);
    16961     return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
    16962 }
    16963 
    16964 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    16965 
    16966 
    16967 /**
    16968  * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
    16969  *
    16970  * @remarks The @a uUser argument is currently unused.
    16971  */
    16972 PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
    16973                                                            void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
    16974                                                            PGMACCESSORIGIN enmOrigin, uint64_t uUser)
    16975 {
    16976     RT_NOREF3(pvPhys, enmOrigin, uUser);
    16977 
    16978     RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
    16979     if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    16980     {
    16981         Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
    16982         Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
    16983 
    16984         uint32_t const fAccess   = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R;
    16985         uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
    16986         VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
    16987         if (RT_FAILURE(rcStrict))
    16988             return rcStrict;
    16989 
    16990         /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
    16991         return VINF_SUCCESS;
    16992     }
    16993 
    16994     LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
    16995     int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
    16996     if (RT_FAILURE(rc))
    16997         return rc;
    16998 
    16999     /* Instruct the caller of this handler to perform the read/write as normal memory. */
    17000     return VINF_PGM_HANDLER_DO_DEFAULT;
    17001 }
    17002 
    17003 
    17004 # ifndef IN_RING3
    17005 /**
    17006  * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
    17007  *      \#PF access handler callback for guest VMX APIC-access page.}
    17008  */
    17009 DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPagePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame,
    17010                                                          RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
    17011 
    17012 {
    17013     RT_NOREF4(pVM, pRegFrame, pvFault, uUser);
    17014 
    17015     /*
    17016      * Handle the VMX APIC-access page only when the guest is in VMX non-root mode.
    17017      * Otherwise we must deregister the page and allow regular RAM access.
    17018      * Failing to do so lands us with endless EPT misconfiguration VM-exits.
    17019      */
    17020     RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
    17021     if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    17022     {
    17023         Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
    17024         Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
    17025 
    17026         /*
    17027          * Check if the access causes an APIC-access VM-exit.
    17028          */
    17029         uint32_t fAccess;
    17030         if (uErr & X86_TRAP_PF_ID)
    17031             fAccess = IEM_ACCESS_INSTRUCTION;
    17032         else if (uErr & X86_TRAP_PF_RW)
    17033             fAccess = IEM_ACCESS_DATA_W;
    17034         else
    17035             fAccess = IEM_ACCESS_DATA_R;
    17036 
    17037         uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
    17038         bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 0 /* cbAccess */, fAccess);
    17039         if (fIntercept)
    17040         {
    17041             /*
    17042              * Query the source VM-exit (from the execution engine) that caused this access
    17043              * within the APIC-access page. Currently only HM is supported.
    17044              */
    17045             AssertMsgReturn(VM_IS_HM_ENABLED(pVM),
    17046                             ("VM-exit auxiliary info. fetching not supported for execution engine %d\n",
    17047                              pVM->bMainExecutionEngine), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
    17048             HMEXITAUX HmExitAux;
    17049             RT_ZERO(HmExitAux);
    17050             int const rc = HMR0GetExitAuxInfo(pVCpu, &HmExitAux, HMVMX_READ_EXIT_INSTR_LEN
    17051                                                                | HMVMX_READ_EXIT_QUALIFICATION
    17052                                                                | HMVMX_READ_IDT_VECTORING_INFO
    17053                                                                | HMVMX_READ_IDT_VECTORING_ERROR_CODE);
    17054             AssertRCReturn(rc, rc);
    17055 
    17056             /*
    17057              * Verify the VM-exit reason must be an EPT violation.
    17058              * Other accesses should go through the other handler (iemVmxApicAccessPageHandler).
    17059              */
    17060             AssertLogRelMsgReturn(HmExitAux.Vmx.uReason == VMX_EXIT_EPT_VIOLATION,
    17061                                   ("Unexpected call to the VMX APIC-access page #PF handler for %#RGp (off=%u) uReason=%#RX32\n",
    17062                                    GCPhysAccessBase, offAccess, HmExitAux.Vmx.uReason), VERR_IEM_IPE_9);
    17063 
    17064             /*
    17065              * Construct the virtual APIC-access VM-exit.
    17066              */
    17067             VMXAPICACCESS enmAccess;
    17068             if (HmExitAux.Vmx.u64Qual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID)
    17069             {
    17070                 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
    17071                     enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
    17072                 else if (fAccess == IEM_ACCESS_INSTRUCTION)
    17073                     enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
    17074                 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
    17075                     enmAccess = VMXAPICACCESS_LINEAR_WRITE;
    17076                 else
    17077                     enmAccess = VMXAPICACCESS_LINEAR_READ;
    17078             }
    17079             else
    17080             {
    17081                 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
    17082                     enmAccess = VMXAPICACCESS_PHYSICAL_EVENT_DELIVERY;
    17083                 else
    17084                 {
    17085                     /** @todo How to distinguish between monitoring/trace vs other instructions
    17086                      *        here? */
    17087                     enmAccess = VMXAPICACCESS_PHYSICAL_INSTR;
    17088                 }
    17089             }
    17090 
    17091             VMXVEXITINFO ExitInfo;
    17092             RT_ZERO(ExitInfo);
    17093             ExitInfo.uReason = VMX_EXIT_APIC_ACCESS;
    17094             ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
    17095                              | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE,   enmAccess);
    17096             ExitInfo.cbInstr = HmExitAux.Vmx.cbInstr;
    17097 
    17098             VMXVEXITEVENTINFO ExitEventInfo;
    17099             RT_ZERO(ExitEventInfo);
    17100             ExitEventInfo.uIdtVectoringInfo    = HmExitAux.Vmx.uIdtVectoringInfo;
    17101             ExitEventInfo.uIdtVectoringErrCode = HmExitAux.Vmx.uIdtVectoringErrCode;
    17102 
    17103             /*
    17104              * Raise the APIC-access VM-exit.
    17105              */
    17106             VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
    17107             return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    17108         }
    17109 
    17110         /*
    17111          * The access isn't intercepted, which means it needs to be virtualized.
    17112          *
    17113          * This requires emulating the instruction because we need the bytes being
    17114          * read/written by the instruction not just the offset being accessed within
    17115          * the APIC-access (which we derive from the faulting address).
    17116          */
    17117         return VINF_EM_RAW_EMULATE_INSTR;
    17118     }
    17119 
    17120     LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
    17121     int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
    17122     if (RT_FAILURE(rc))
    17123         return rc;
    17124 
    17125     return VINF_SUCCESS;
    17126 }
    17127 # endif /* !IN_RING3 */
    17128 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    17129 
    17130 
    17131 #ifdef IN_RING3
    17132 
    17133 /**
    17134  * Handles the unlikely and probably fatal merge cases.
    17135  *
    17136  * @returns Merged status code.
    17137  * @param   rcStrict        Current EM status code.
    17138  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    17139  *                          with @a rcStrict.
    17140  * @param   iMemMap         The memory mapping index. For error reporting only.
    17141  * @param   pVCpu           The cross context virtual CPU structure of the calling
    17142  *                          thread, for error reporting only.
    17143  */
    17144 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
    17145                                                           unsigned iMemMap, PVMCPUCC pVCpu)
    17146 {
    17147     if (RT_FAILURE_NP(rcStrict))
    17148         return rcStrict;
    17149 
    17150     if (RT_FAILURE_NP(rcStrictCommit))
    17151         return rcStrictCommit;
    17152 
    17153     if (rcStrict == rcStrictCommit)
    17154         return rcStrictCommit;
    17155 
    17156     AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
    17157                            VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
    17158                            pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
    17159                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
    17160                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
    17161     return VERR_IOM_FF_STATUS_IPE;
    17162 }
    17163 
    17164 
    17165 /**
    17166  * Helper for IOMR3ProcessForceFlag.
    17167  *
    17168  * @returns Merged status code.
    17169  * @param   rcStrict        Current EM status code.
    17170  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    17171  *                          with @a rcStrict.
    17172  * @param   iMemMap         The memory mapping index. For error reporting only.
    17173  * @param   pVCpu           The cross context virtual CPU structure of the calling
    17174  *                          thread, for error reporting only.
    17175  */
    17176 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
    17177 {
    17178     /* Simple. */
    17179     if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
    17180         return rcStrictCommit;
    17181 
    17182     if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
    17183         return rcStrict;
    17184 
    17185     /* EM scheduling status codes. */
    17186     if (RT_LIKELY(   rcStrict >= VINF_EM_FIRST
    17187                   && rcStrict <= VINF_EM_LAST))
    17188     {
    17189         if (RT_LIKELY(   rcStrictCommit >= VINF_EM_FIRST
    17190                       && rcStrictCommit <= VINF_EM_LAST))
    17191             return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
    17192     }
    17193 
    17194     /* Unlikely */
    17195     return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
    17196 }
    17197 
    17198 
    17199 /**
    17200  * Called by force-flag handling code when VMCPU_FF_IEM is set.
    17201  *
    17202  * @returns Merge between @a rcStrict and what the commit operation returned.
    17203  * @param   pVM         The cross context VM structure.
    17204  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    17205  * @param   rcStrict    The status code returned by ring-0 or raw-mode.
    17206  */
    17207 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    17208 {
    17209     /*
    17210      * Reset the pending commit.
    17211      */
    17212     AssertMsg(  (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
    17213               & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
    17214               ("%#x %#x %#x\n",
    17215                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    17216     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
    17217 
    17218     /*
    17219      * Commit the pending bounce buffers (usually just one).
    17220      */
    17221     unsigned cBufs = 0;
    17222     unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    17223     while (iMemMap-- > 0)
    17224         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
    17225         {
    17226             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    17227             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    17228             Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
    17229 
    17230             uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    17231             uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    17232             uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    17233 
    17234             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
    17235             {
    17236                 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
    17237                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    17238                                                             pbBuf,
    17239                                                             cbFirst,
    17240                                                             PGMACCESSORIGIN_IEM);
    17241                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
    17242                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
    17243                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    17244                      VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
    17245             }
    17246 
    17247             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
    17248             {
    17249                 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
    17250                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    17251                                                             pbBuf + cbFirst,
    17252                                                             cbSecond,
    17253                                                             PGMACCESSORIGIN_IEM);
    17254                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
    17255                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
    17256                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
    17257                      VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
    17258             }
    17259             cBufs++;
    17260             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    17261         }
    17262 
    17263     AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
    17264               ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
    17265                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    17266     pVCpu->iem.s.cActiveMappings = 0;
    17267     return rcStrict;
    17268 }
    17269 
    17270 #endif /* IN_RING3 */
    17271 
     1370#endif /* !VMM_INCLUDED_SRC_include_IEMMc_h */
     1371
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette