VirtualBox

Changeset 108245 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Feb 17, 2025 12:13:38 AM (3 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
167566
Message:

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

Location:
trunk/src/VBox/VMM
Files:
2 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r108244 r108245  
    195195        VMMAll/HMVMXAll.cpp \
    196196        VMMAll/IEMAll.cpp \
     197        VMMAll/target-x86/IEMAll-x86.cpp \
    197198        VMMAll/target-x86/IEMAllExec-x86.cpp \
    198199        VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp \
     
    943944        VMMAll/HMVMXAll.cpp \
    944945        VMMAll/IEMAll.cpp \
     946        VMMAll/target-x86/IEMAll-x86.cpp \
    945947        VMMAll/target-x86/IEMAllExec-x86.cpp \
    946948        VMMAll/target-x86/IEMAllOpcodeFetch-x86.cpp \
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r108244 r108245  
    177177size_t g_cbIemWrote;
    178178#endif
    179 
    180 
    181 /**
    182  * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
    183  * path.
    184  *
    185  * This will also invalidate TLB entries for any pages with active data
    186  * breakpoints on them.
    187  *
    188  * @returns IEM_F_BRK_PENDING_XXX or zero.
    189  * @param   pVCpu               The cross context virtual CPU structure of the
    190  *                              calling thread.
    191  *
    192  * @note    Don't call directly, use iemCalcExecDbgFlags instead.
    193  */
    194 uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
    195 {
    196     uint32_t fExec = 0;
    197 
    198     /*
    199      * Helper for invalidate the data TLB for breakpoint addresses.
    200      *
    201      * This is to make sure any access to the page will always trigger a TLB
    202      * load for as long as the breakpoint is enabled.
    203      */
    204 #ifdef IEM_WITH_DATA_TLB
    205 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
    206         RTGCPTR uTagNoRev = (a_uValue); \
    207         uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
    208         /** @todo do large page accounting */ \
    209         uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
    210         if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
    211             pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
    212         if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
    213             pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
    214     } while (0)
    215 #else
    216 # define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
    217 #endif
    218 
    219     /*
    220      * Process guest breakpoints.
    221      */
    222 #define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
    223         if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
    224         { \
    225             switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
    226             { \
    227                 case X86_DR7_RW_EO: \
    228                     fExec |= IEM_F_PENDING_BRK_INSTR; \
    229                     break; \
    230                 case X86_DR7_RW_WO: \
    231                 case X86_DR7_RW_RW: \
    232                     fExec |= IEM_F_PENDING_BRK_DATA; \
    233                     INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
    234                     break; \
    235                 case X86_DR7_RW_IO: \
    236                     fExec |= IEM_F_PENDING_BRK_X86_IO; \
    237                     break; \
    238             } \
    239         } \
    240     } while (0)
    241 
    242     uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
    243     if (fGstDr7 & X86_DR7_ENABLED_MASK)
    244     {
    245 /** @todo extract more details here to simplify matching later. */
    246 #ifdef IEM_WITH_DATA_TLB
    247         IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
    248 #endif
    249         PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
    250         PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
    251         PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
    252         PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
    253     }
    254 
    255     /*
    256      * Process hypervisor breakpoints.
    257      */
    258     PVMCC const    pVM       = pVCpu->CTX_SUFF(pVM);
    259     uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
    260     if (fHyperDr7 & X86_DR7_ENABLED_MASK)
    261     {
    262 /** @todo extract more details here to simplify matching later. */
    263         PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
    264         PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
    265         PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
    266         PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
    267     }
    268 
    269     return fExec;
    270 }
    271179
    272180
     
    471379
    472380
    473 
    474381/**
    475382 * Prefetch opcodes the first time when starting executing.
     
    760667#endif
    761668}
    762 
    763 
    764 /** @name   Register Access.
    765  * @{
    766  */
    767 
    768 /**
    769  * Adds a 8-bit signed jump offset to RIP/EIP/IP.
    770  *
    771  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    772  * segment limit.
    773  *
    774  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    775  * @param   cbInstr             Instruction size.
    776  * @param   offNextInstr        The offset of the next instruction.
    777  * @param   enmEffOpSize        Effective operand size.
    778  */
    779 VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
    780                                                         IEMMODE enmEffOpSize) RT_NOEXCEPT
    781 {
    782     switch (enmEffOpSize)
    783     {
    784         case IEMMODE_16BIT:
    785         {
    786             uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
    787             if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    788                           || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
    789                 pVCpu->cpum.GstCtx.rip = uNewIp;
    790             else
    791                 return iemRaiseGeneralProtectionFault0(pVCpu);
    792             break;
    793         }
    794 
    795         case IEMMODE_32BIT:
    796         {
    797             Assert(!IEM_IS_64BIT_CODE(pVCpu));
    798             Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
    799 
    800             uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
    801             if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    802                 pVCpu->cpum.GstCtx.rip = uNewEip;
    803             else
    804                 return iemRaiseGeneralProtectionFault0(pVCpu);
    805             break;
    806         }
    807 
    808         case IEMMODE_64BIT:
    809         {
    810             Assert(IEM_IS_64BIT_CODE(pVCpu));
    811 
    812             uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    813             if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    814                 pVCpu->cpum.GstCtx.rip = uNewRip;
    815             else
    816                 return iemRaiseGeneralProtectionFault0(pVCpu);
    817             break;
    818         }
    819 
    820         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    821     }
    822 
    823 #ifndef IEM_WITH_CODE_TLB
    824     /* Flush the prefetch buffer. */
    825     pVCpu->iem.s.cbOpcode = cbInstr;
    826 #endif
    827 
    828     /*
    829      * Clear RF and finish the instruction (maybe raise #DB).
    830      */
    831     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    832 }
    833 
    834 
    835 /**
    836  * Adds a 16-bit signed jump offset to RIP/EIP/IP.
    837  *
    838  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    839  * segment limit.
    840  *
    841  * @returns Strict VBox status code.
    842  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    843  * @param   cbInstr             Instruction size.
    844  * @param   offNextInstr        The offset of the next instruction.
    845  */
    846 VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
    847 {
    848     Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
    849 
    850     uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
    851     if (RT_LIKELY(   uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
    852                   || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
    853         pVCpu->cpum.GstCtx.rip = uNewIp;
    854     else
    855         return iemRaiseGeneralProtectionFault0(pVCpu);
    856 
    857 #ifndef IEM_WITH_CODE_TLB
    858     /* Flush the prefetch buffer. */
    859     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    860 #endif
    861 
    862     /*
    863      * Clear RF and finish the instruction (maybe raise #DB).
    864      */
    865     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    866 }
    867 
    868 
    869 /**
    870  * Adds a 32-bit signed jump offset to RIP/EIP/IP.
    871  *
    872  * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
    873  * segment limit.
    874  *
    875  * @returns Strict VBox status code.
    876  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    877  * @param   cbInstr             Instruction size.
    878  * @param   offNextInstr        The offset of the next instruction.
    879  * @param   enmEffOpSize        Effective operand size.
    880  */
    881 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
    882                                                          IEMMODE enmEffOpSize) RT_NOEXCEPT
    883 {
    884     if (enmEffOpSize == IEMMODE_32BIT)
    885     {
    886         Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
    887 
    888         uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
    889         if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
    890             pVCpu->cpum.GstCtx.rip = uNewEip;
    891         else
    892             return iemRaiseGeneralProtectionFault0(pVCpu);
    893     }
    894     else
    895     {
    896         Assert(enmEffOpSize == IEMMODE_64BIT);
    897 
    898         uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
    899         if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
    900             pVCpu->cpum.GstCtx.rip = uNewRip;
    901         else
    902             return iemRaiseGeneralProtectionFault0(pVCpu);
    903     }
    904 
    905 #ifndef IEM_WITH_CODE_TLB
    906     /* Flush the prefetch buffer. */
    907     pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
    908 #endif
    909 
    910     /*
    911      * Clear RF and finish the instruction (maybe raise #DB).
    912      */
    913     return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
    914 }
    915 
    916 /** @}  */
    917669
    918670
  • trunk/src/VBox/VMM/VMMAll/target-x86/IEMAll-x86.cpp

    r108244 r108245  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - All Contexts.
     3 * IEM - Interpreted Execution Manager - x86 target, miscellaneous.
    44 */
    55
     
    2525 * SPDX-License-Identifier: GPL-3.0-only
    2626 */
    27 
    28 
    29 /** @page pg_iem    IEM - Interpreted Execution Manager
    30  *
    31  * The interpreted exeuction manager (IEM) is for executing short guest code
    32  * sequences that are causing too many exits / virtualization traps.  It will
    33  * also be used to interpret single instructions, thus replacing the selective
    34  * interpreters in EM and IOM.
    35  *
    36  * Design goals:
    37  *      - Relatively small footprint, although we favour speed and correctness
    38  *        over size.
    39  *      - Reasonably fast.
    40  *      - Correctly handle lock prefixed instructions.
    41  *      - Complete instruction set - eventually.
    42  *      - Refactorable into a recompiler, maybe.
    43  *      - Replace EMInterpret*.
    44  *
    45  * Using the existing disassembler has been considered, however this is thought
    46  * to conflict with speed as the disassembler chews things a bit too much while
    47  * leaving us with a somewhat complicated state to interpret afterwards.
    48  *
    49  *
    50  * The current code is very much work in progress. You've been warned!
    51  *
    52  *
    53  * @section sec_iem_fpu_instr   FPU Instructions
    54  *
    55  * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
    56  * same or equivalent instructions on the host FPU.  To make life easy, we also
    57  * let the FPU prioritize the unmasked exceptions for us.  This however, only
    58  * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
    59  * for FPU exception delivery, because with CR0.NE=0 there is a window where we
    60  * can trigger spurious FPU exceptions.
    61  *
    62  * The guest FPU state is not loaded into the host CPU and kept there till we
    63  * leave IEM because the calling conventions have declared an all year open
    64  * season on much of the FPU state.  For instance an innocent looking call to
    65  * memcpy might end up using a whole bunch of XMM or MM registers if the
    66  * particular implementation finds it worthwhile.
    67  *
    68  *
    69  * @section sec_iem_logging     Logging
    70  *
    71  * The IEM code uses the \"IEM\" log group for the main logging. The different
    72  * logging levels/flags are generally used for the following purposes:
    73  *      - Level 1  (Log)  : Errors, exceptions, interrupts and such major events.
    74  *      - Flow  (LogFlow) : Basic enter/exit IEM state info.
    75  *      - Level 2  (Log2) : ?
    76  *      - Level 3  (Log3) : More detailed enter/exit IEM state info.
    77  *      - Level 4  (Log4) : Decoding mnemonics w/ EIP.
    78  *      - Level 5  (Log5) : Decoding details.
    79  *      - Level 6  (Log6) : Enables/disables the lockstep comparison with REM.
    80  *      - Level 7  (Log7) : iret++ execution logging.
    81  *      - Level 8  (Log8) :
    82  *      - Level 9  (Log9) :
    83  *      - Level 10 (Log10): TLBs.
    84  *      - Level 11 (Log11): Unmasked FPU exceptions.
    85  *
    86  * The \"IEM_MEM\" log group covers most of memory related details logging,
    87  * except for errors and exceptions:
    88  *      - Level 1  (Log)  : Reads.
    89  *      - Level 2  (Log2) : Read fallbacks.
    90  *      - Level 3  (Log3) : MemMap read.
    91  *      - Level 4  (Log4) : MemMap read fallbacks.
    92  *      - Level 5  (Log5) : Writes
    93  *      - Level 6  (Log6) : Write fallbacks.
    94  *      - Level 7  (Log7) : MemMap writes and read-writes.
    95  *      - Level 8  (Log8) : MemMap write and read-write fallbacks.
    96  *      - Level 9  (Log9) : Stack reads.
    97  *      - Level 10 (Log10): Stack read fallbacks.
    98  *      - Level 11 (Log11): Stack writes.
    99  *      - Level 12 (Log12): Stack write fallbacks.
    100  *      - Flow  (LogFlow) :
    101  *
    102  * The SVM (AMD-V) and VMX (VT-x) code has the following assignments:
    103  *      - Level 1  (Log)  : Errors and other major events.
    104  *      - Flow (LogFlow)  : Misc flow stuff (cleanup?)
    105  *      - Level 2  (Log2) : VM exits.
    106  *
    107  * The syscall logging level assignments:
    108  *      - Level 1: DOS and BIOS.
    109  *      - Level 2: Windows 3.x
    110  *      - Level 3: Linux.
    111  */
    112 
    113 /* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
    114 #ifdef _MSC_VER
    115 # pragma warning(disable:4505)
    116 #endif
    11727
    11828
     
    12737#include <VBox/vmm/iem.h>
    12838#include <VBox/vmm/cpum.h>
    129 #include <VBox/vmm/pdmapic.h>
    130 #include <VBox/vmm/pdm.h>
    131 #include <VBox/vmm/pgm.h>
    132 #include <VBox/vmm/iom.h>
    133 #include <VBox/vmm/em.h>
    134 #include <VBox/vmm/hm.h>
    135 #include <VBox/vmm/nem.h>
    136 #include <VBox/vmm/gcm.h>
    137 #include <VBox/vmm/gim.h>
    138 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    139 # include <VBox/vmm/em.h>
    140 # include <VBox/vmm/hm_svm.h>
    141 #endif
    142 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    143 # include <VBox/vmm/hmvmxinline.h>
    144 #endif
    145 #include <VBox/vmm/tm.h>
    14639#include <VBox/vmm/dbgf.h>
    147 #include <VBox/vmm/dbgftrace.h>
    14840#include "IEMInternal.h"
    14941#include <VBox/vmm/vmcc.h>
     
    15143#include <VBox/err.h>
    15244#include <VBox/param.h>
    153 #include <VBox/dis.h>
    154 #include <iprt/asm-math.h>
    155 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    156 # include <iprt/asm-amd64-x86.h>
    157 #elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
    158 # include <iprt/asm-arm.h>
    159 #endif
    16045#include <iprt/assert.h>
    16146#include <iprt/string.h>
     
    16348
    16449#include "IEMInline.h"
    165 #ifdef VBOX_VMM_TARGET_X86
    166 # include "target-x86/IEMAllTlbInline-x86.h"
    167 #endif
    168 
    169 
    170 /*********************************************************************************************************************************
    171 *   Global Variables                                                                                                             *
    172 *********************************************************************************************************************************/
    173 #if defined(IEM_LOG_MEMORY_WRITES)
    174 /** What IEM just wrote. */
    175 uint8_t g_abIemWrote[256];
    176 /** How much IEM just wrote. */
    177 size_t g_cbIemWrote;
    178 #endif
    17950
    18051
     
    271142
    272143
    273 /**
    274  * Initializes the decoder state.
    275  *
    276  * iemReInitDecoder is mostly a copy of this function.
    277  *
    278  * @param   pVCpu               The cross context virtual CPU structure of the
    279  *                              calling thread.
    280  * @param   fExecOpts           Optional execution flags:
    281  *                                  - IEM_F_BYPASS_HANDLERS
    282  *                                  - IEM_F_X86_DISREGARD_LOCK
    283  */
    284 DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, uint32_t fExecOpts)
    285 {
    286     IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
    287     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    288     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    289     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    290     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    291     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    292     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    293     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    294     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    295     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    296 
    297     /* Execution state: */
    298     uint32_t fExec;
    299     pVCpu->iem.s.fExec = fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
    300 
    301     /* Decoder state: */
    302     pVCpu->iem.s.enmDefAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    303     pVCpu->iem.s.enmEffAddrMode     = fExec & IEM_F_MODE_CPUMODE_MASK;
    304     if ((fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
    305     {
    306         pVCpu->iem.s.enmDefOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;  /** @todo check if this is correct... */
    307         pVCpu->iem.s.enmEffOpSize   = fExec & IEM_F_MODE_CPUMODE_MASK;
    308     }
    309     else
    310     {
    311         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    312         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    313     }
    314     pVCpu->iem.s.fPrefixes          = 0;
    315     pVCpu->iem.s.uRexReg            = 0;
    316     pVCpu->iem.s.uRexB              = 0;
    317     pVCpu->iem.s.uRexIndex          = 0;
    318     pVCpu->iem.s.idxPrefix          = 0;
    319     pVCpu->iem.s.uVex3rdReg         = 0;
    320     pVCpu->iem.s.uVexLength         = 0;
    321     pVCpu->iem.s.fEvexStuff         = 0;
    322     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    323 #ifdef IEM_WITH_CODE_TLB
    324     pVCpu->iem.s.pbInstrBuf         = NULL;
    325     pVCpu->iem.s.offInstrNextByte   = 0;
    326     pVCpu->iem.s.offCurInstrStart   = 0;
    327 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    328     pVCpu->iem.s.offOpcode          = 0;
    329 # endif
    330 # ifdef VBOX_STRICT
    331     pVCpu->iem.s.GCPhysInstrBuf     = NIL_RTGCPHYS;
    332     pVCpu->iem.s.cbInstrBuf         = UINT16_MAX;
    333     pVCpu->iem.s.cbInstrBufTotal    = UINT16_MAX;
    334     pVCpu->iem.s.uInstrBufPc        = UINT64_C(0xc0ffc0ffcff0c0ff);
    335 # endif
    336 #else
    337     pVCpu->iem.s.offOpcode          = 0;
    338     pVCpu->iem.s.cbOpcode           = 0;
    339 #endif
    340     pVCpu->iem.s.offModRm           = 0;
    341     pVCpu->iem.s.cActiveMappings    = 0;
    342     pVCpu->iem.s.iNextMapping       = 0;
    343     pVCpu->iem.s.rcPassUp           = VINF_SUCCESS;
    344 
    345 #ifdef DBGFTRACE_ENABLED
    346     switch (IEM_GET_CPU_MODE(pVCpu))
    347     {
    348         case IEMMODE_64BIT:
    349             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    350             break;
    351         case IEMMODE_32BIT:
    352             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    353             break;
    354         case IEMMODE_16BIT:
    355             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    356             break;
    357     }
    358 #endif
    359 }
    360 
    361 
    362 /**
    363  * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
    364  *
    365  * This is mostly a copy of iemInitDecoder.
    366  *
    367  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    368  */
    369 DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
    370 {
    371     Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
    372     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    373     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    374     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
    375     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
    376     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
    377     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
    378     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
    379     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
    380 
    381     /* ASSUMES: Anyone changing CPU state affecting the fExec bits will update them! */
    382     AssertMsg((pVCpu->iem.s.fExec & ~IEM_F_USER_OPTS) == iemCalcExecFlags(pVCpu),
    383               ("fExec=%#x iemCalcExecModeFlags=%#x\n", pVCpu->iem.s.fExec, iemCalcExecFlags(pVCpu)));
    384 
    385     IEMMODE const enmMode = IEM_GET_CPU_MODE(pVCpu);
    386     pVCpu->iem.s.enmDefAddrMode     = enmMode;  /** @todo check if this is correct... */
    387     pVCpu->iem.s.enmEffAddrMode     = enmMode;
    388     if (enmMode != IEMMODE_64BIT)
    389     {
    390         pVCpu->iem.s.enmDefOpSize   = enmMode;  /** @todo check if this is correct... */
    391         pVCpu->iem.s.enmEffOpSize   = enmMode;
    392     }
    393     else
    394     {
    395         pVCpu->iem.s.enmDefOpSize   = IEMMODE_32BIT;
    396         pVCpu->iem.s.enmEffOpSize   = IEMMODE_32BIT;
    397     }
    398     pVCpu->iem.s.fPrefixes          = 0;
    399     pVCpu->iem.s.uRexReg            = 0;
    400     pVCpu->iem.s.uRexB              = 0;
    401     pVCpu->iem.s.uRexIndex          = 0;
    402     pVCpu->iem.s.idxPrefix          = 0;
    403     pVCpu->iem.s.uVex3rdReg         = 0;
    404     pVCpu->iem.s.uVexLength         = 0;
    405     pVCpu->iem.s.fEvexStuff         = 0;
    406     pVCpu->iem.s.iEffSeg            = X86_SREG_DS;
    407 #ifdef IEM_WITH_CODE_TLB
    408     if (pVCpu->iem.s.pbInstrBuf)
    409     {
    410         uint64_t off = (enmMode == IEMMODE_64BIT
    411                         ? pVCpu->cpum.GstCtx.rip
    412                         : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
    413                      - pVCpu->iem.s.uInstrBufPc;
    414         if (off < pVCpu->iem.s.cbInstrBufTotal)
    415         {
    416             pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
    417             pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
    418             if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
    419                 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
    420             else
    421                 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
    422         }
    423         else
    424         {
    425             pVCpu->iem.s.pbInstrBuf       = NULL;
    426             pVCpu->iem.s.offInstrNextByte = 0;
    427             pVCpu->iem.s.offCurInstrStart = 0;
    428             pVCpu->iem.s.cbInstrBuf       = 0;
    429             pVCpu->iem.s.cbInstrBufTotal  = 0;
    430             pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    431         }
    432     }
    433     else
    434     {
    435         pVCpu->iem.s.offInstrNextByte = 0;
    436         pVCpu->iem.s.offCurInstrStart = 0;
    437         pVCpu->iem.s.cbInstrBuf       = 0;
    438         pVCpu->iem.s.cbInstrBufTotal  = 0;
    439 # ifdef VBOX_STRICT
    440         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    441 # endif
    442     }
    443 # ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
    444     pVCpu->iem.s.offOpcode          = 0;
    445 # endif
    446 #else  /* !IEM_WITH_CODE_TLB */
    447     pVCpu->iem.s.cbOpcode           = 0;
    448     pVCpu->iem.s.offOpcode          = 0;
    449 #endif /* !IEM_WITH_CODE_TLB */
    450     pVCpu->iem.s.offModRm           = 0;
    451     Assert(pVCpu->iem.s.cActiveMappings == 0);
    452     pVCpu->iem.s.iNextMapping       = 0;
    453     Assert(pVCpu->iem.s.rcPassUp   == VINF_SUCCESS);
    454     Assert(!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS));
    455 
    456 #ifdef DBGFTRACE_ENABLED
    457     switch (enmMode)
    458     {
    459         case IEMMODE_64BIT:
    460             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.rip);
    461             break;
    462         case IEMMODE_32BIT:
    463             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    464             break;
    465         case IEMMODE_16BIT:
    466             RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", IEM_GET_CPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
    467             break;
    468     }
    469 #endif
    470 }
    471 
    472 
    473 
    474 /**
    475  * Prefetch opcodes the first time when starting executing.
    476  *
    477  * @returns Strict VBox status code.
    478  * @param   pVCpu               The cross context virtual CPU structure of the
    479  *                              calling thread.
    480  * @param   fExecOpts           Optional execution flags:
    481  *                                  - IEM_F_BYPASS_HANDLERS
    482  *                                  - IEM_F_X86_DISREGARD_LOCK
    483  */
    484 DECLINLINE(VBOXSTRICTRC) iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
    485 {
    486     iemInitDecoder(pVCpu, fExecOpts);
    487 
    488 #ifndef IEM_WITH_CODE_TLB
    489     return iemOpcodeFetchPrefetch(pVCpu);
    490 #else
    491     return VINF_SUCCESS;
    492 #endif
    493 }
    494 
    495 
    496 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    497 /**
    498  * Worker for iemTlbInvalidateAll.
    499  */
    500 template<bool a_fGlobal>
    501 DECL_FORCE_INLINE(void) iemTlbInvalidateOne(IEMTLB *pTlb)
    502 {
    503     if (!a_fGlobal)
    504         pTlb->cTlsFlushes++;
    505     else
    506         pTlb->cTlsGlobalFlushes++;
    507 
    508     pTlb->uTlbRevision += IEMTLB_REVISION_INCR;
    509     if (RT_LIKELY(pTlb->uTlbRevision != 0))
    510     { /* very likely */ }
    511     else
    512     {
    513         pTlb->uTlbRevision = IEMTLB_REVISION_INCR;
    514         pTlb->cTlbRevisionRollovers++;
    515         unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    516         while (i-- > 0)
    517             pTlb->aEntries[i * 2].uTag = 0;
    518     }
    519 
    520     pTlb->cTlbNonGlobalLargePageCurLoads    = 0;
    521     pTlb->NonGlobalLargePageRange.uLastTag  = 0;
    522     pTlb->NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
    523 
    524     if (a_fGlobal)
    525     {
    526         pTlb->uTlbRevisionGlobal += IEMTLB_REVISION_INCR;
    527         if (RT_LIKELY(pTlb->uTlbRevisionGlobal != 0))
    528         { /* very likely */ }
    529         else
    530         {
    531             pTlb->uTlbRevisionGlobal = IEMTLB_REVISION_INCR;
    532             pTlb->cTlbRevisionRollovers++;
    533             unsigned i = RT_ELEMENTS(pTlb->aEntries) / 2;
    534             while (i-- > 0)
    535                 pTlb->aEntries[i * 2 + 1].uTag = 0;
    536         }
    537 
    538         pTlb->cTlbGlobalLargePageCurLoads    = 0;
    539         pTlb->GlobalLargePageRange.uLastTag  = 0;
    540         pTlb->GlobalLargePageRange.uFirstTag = UINT64_MAX;
    541     }
    542 }
    543 #endif
    544 
    545 
    546 /**
    547  * Worker for IEMTlbInvalidateAll and IEMTlbInvalidateAllGlobal.
    548  */
    549 template<bool a_fGlobal>
    550 DECL_FORCE_INLINE(void) iemTlbInvalidateAll(PVMCPUCC pVCpu)
    551 {
    552 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    553     Log10(("IEMTlbInvalidateAll\n"));
    554 
    555 # ifdef IEM_WITH_CODE_TLB
    556     pVCpu->iem.s.cbInstrBufTotal = 0;
    557     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.CodeTlb);
    558     if (a_fGlobal)
    559         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, false);
    560     else
    561         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.CodeTlb.uTlbRevision, false);
    562 # endif
    563 
    564 # ifdef IEM_WITH_DATA_TLB
    565     iemTlbInvalidateOne<a_fGlobal>(&pVCpu->iem.s.DataTlb);
    566     if (a_fGlobal)
    567         IEMTLBTRACE_FLUSH_GLOBAL(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, true);
    568     else
    569         IEMTLBTRACE_FLUSH(pVCpu, pVCpu->iem.s.DataTlb.uTlbRevision, true);
    570 # endif
    571 #else
    572     RT_NOREF(pVCpu);
    573 #endif
    574 }
    575 
    576 
    577 /**
    578  * Invalidates non-global the IEM TLB entries.
    579  *
    580  * This is called internally as well as by PGM when moving GC mappings.
    581  *
    582  * @param   pVCpu       The cross context virtual CPU structure of the calling
    583  *                      thread.
    584  */
    585 VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
    586 {
    587     iemTlbInvalidateAll<false>(pVCpu);
    588 }
    589 
    590 
    591 /**
    592  * Invalidates all the IEM TLB entries.
    593  *
    594  * This is called internally as well as by PGM when moving GC mappings.
    595  *
    596  * @param   pVCpu       The cross context virtual CPU structure of the calling
    597  *                      thread.
    598  */
    599 VMM_INT_DECL(void) IEMTlbInvalidateAllGlobal(PVMCPUCC pVCpu)
    600 {
    601     iemTlbInvalidateAll<true>(pVCpu);
    602 }
    603 
    604 
    605 /**
    606  * Invalidates a page in the TLBs.
    607  *
    608  * @param   pVCpu       The cross context virtual CPU structure of the calling
    609  *                      thread.
    610  * @param   GCPtr       The address of the page to invalidate
    611  * @thread EMT(pVCpu)
    612  */
    613 VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
    614 {
    615     IEMTLBTRACE_INVLPG(pVCpu, GCPtr);
    616 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    617     Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
    618     GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
    619     Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
    620     uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(GCPtr);
    621 
    622 # ifdef IEM_WITH_CODE_TLB
    623     iemTlbInvalidatePageWorker<false>(pVCpu, &pVCpu->iem.s.CodeTlb, GCPtr, idxEven);
    624 # endif
    625 # ifdef IEM_WITH_DATA_TLB
    626     iemTlbInvalidatePageWorker<true>(pVCpu, &pVCpu->iem.s.DataTlb, GCPtr, idxEven);
    627 # endif
    628 #else
    629     NOREF(pVCpu); NOREF(GCPtr);
    630 #endif
    631 }
    632 
    633 
    634 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    635 /**
    636  * Invalid both TLBs slow fashion following a rollover.
    637  *
    638  * Worker for IEMTlbInvalidateAllPhysical,
    639  * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
    640  * iemMemMapJmp and others.
    641  *
    642  * @thread EMT(pVCpu)
    643  */
    644 void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT
    645 {
    646     Log10(("iemTlbInvalidateAllPhysicalSlow\n"));
    647     ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    648     ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
    649 
    650     unsigned i;
    651 # ifdef IEM_WITH_CODE_TLB
    652     i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
    653     while (i-- > 0)
    654     {
    655         pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3       = NULL;
    656         pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    657                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    658     }
    659     pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers++;
    660     pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    661 # endif
    662 # ifdef IEM_WITH_DATA_TLB
    663     i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
    664     while (i-- > 0)
    665     {
    666         pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3       = NULL;
    667         pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(  IEMTLBE_F_PG_NO_WRITE   | IEMTLBE_F_PG_NO_READ
    668                                                                | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
    669     }
    670     pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers++;
    671     pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    672 # endif
    673 
    674 }
    675 #endif
    676 
    677 
    678 /**
    679  * Invalidates the host physical aspects of the IEM TLBs.
    680  *
    681  * This is called internally as well as by PGM when moving GC mappings.
    682  *
    683  * @param   pVCpu       The cross context virtual CPU structure of the calling
    684  *                      thread.
    685  * @note    Currently not used.
    686  */
    687 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
    688 {
    689 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    690     /* Note! This probably won't end up looking exactly like this, but it give an idea... */
    691     Log10(("IEMTlbInvalidateAllPhysical\n"));
    692 
    693 # ifdef IEM_WITH_CODE_TLB
    694     pVCpu->iem.s.cbInstrBufTotal = 0;
    695 # endif
    696     uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
    697     if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
    698     {
    699         pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
    700         pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    701         pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
    702         pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    703     }
    704     else
    705         iemTlbInvalidateAllPhysicalSlow(pVCpu);
    706 #else
    707     NOREF(pVCpu);
    708 #endif
    709 }
    710 
    711 
    712 /**
    713  * Invalidates the host physical aspects of the IEM TLBs.
    714  *
    715  * This is called internally as well as by PGM when moving GC mappings.
    716  *
    717  * @param   pVM         The cross context VM structure.
    718  * @param   idCpuCaller The ID of the calling EMT if available to the caller,
    719  *                      otherwise NIL_VMCPUID.
    720  * @param   enmReason   The reason we're called.
    721  *
    722  * @remarks Caller holds the PGM lock.
    723  */
    724 VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller, IEMTLBPHYSFLUSHREASON enmReason)
    725 {
    726 #if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
    727     PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
    728     if (pVCpuCaller)
    729         VMCPU_ASSERT_EMT(pVCpuCaller);
    730     Log10(("IEMTlbInvalidateAllPhysicalAllCpus: %d\n", enmReason)); RT_NOREF(enmReason);
    731 
    732     VMCC_FOR_EACH_VMCPU(pVM)
    733     {
    734 # ifdef IEM_WITH_CODE_TLB
    735         if (pVCpuCaller == pVCpu)
    736             pVCpu->iem.s.cbInstrBufTotal = 0;
    737 # endif
    738 
    739         uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
    740         uint64_t       uTlbPhysRevNew  = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
    741         if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
    742         { /* likely */}
    743         else if (pVCpuCaller != pVCpu)
    744             uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
    745         else
    746         {
    747             iemTlbInvalidateAllPhysicalSlow(pVCpu);
    748             continue;
    749         }
    750         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    751             pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes++;
    752 
    753         if (ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev))
    754             pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes++;
    755     }
    756     VMCC_FOR_EACH_VMCPU_END(pVM);
    757 
    758 #else
    759     RT_NOREF(pVM, idCpuCaller, enmReason);
    760 #endif
    761 }
    762 
    763 
    764144/** @name   Register Access.
    765145 * @{
     
    916296/** @}  */
    917297
    918 
    919 /** @name   Memory access.
    920  *
    921  * @{
    922  */
    923 
    924 #undef  LOG_GROUP
    925 #define LOG_GROUP LOG_GROUP_IEM_MEM
    926 
    927 #if 0 /*unused*/
    928 /**
    929  * Looks up a memory mapping entry.
    930  *
    931  * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
    932  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    933  * @param   pvMem           The memory address.
    934  * @param   fAccess         The access to.
    935  */
    936 DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
    937 {
    938     Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
    939     fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
    940     if (   pVCpu->iem.s.aMemMappings[0].pv == pvMem
    941         && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    942         return 0;
    943     if (   pVCpu->iem.s.aMemMappings[1].pv == pvMem
    944         && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    945         return 1;
    946     if (   pVCpu->iem.s.aMemMappings[2].pv == pvMem
    947         && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
    948         return 2;
    949     return VERR_NOT_FOUND;
    950 }
    951 #endif
    952 
    953 /**
    954  * Finds a free memmap entry when using iNextMapping doesn't work.
    955  *
    956  * @returns Memory mapping index, 1024 on failure.
    957  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    958  */
    959 static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
    960 {
    961     /*
    962      * The easy case.
    963      */
    964     if (pVCpu->iem.s.cActiveMappings == 0)
    965     {
    966         pVCpu->iem.s.iNextMapping = 1;
    967         return 0;
    968     }
    969 
    970     /* There should be enough mappings for all instructions. */
    971     AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
    972 
    973     for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
    974         if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
    975             return i;
    976 
    977     AssertFailedReturn(1024);
    978 }
    979 
    980 
    981 /**
    982  * Commits a bounce buffer that needs writing back and unmaps it.
    983  *
    984  * @returns Strict VBox status code.
    985  * @param   pVCpu           The cross context virtual CPU structure of the calling thread.
    986  * @param   iMemMap         The index of the buffer to commit.
    987  * @param   fPostponeFail   Whether we can postpone writer failures to ring-3.
    988  *                          Always false in ring-3, obviously.
    989  */
    990 static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
    991 {
    992     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    993     Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    994 #ifdef IN_RING3
    995     Assert(!fPostponeFail);
    996     RT_NOREF_PV(fPostponeFail);
    997 #endif
    998 
    999     /*
    1000      * Do the writing.
    1001      */
    1002     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1003     if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
    1004     {
    1005         uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    1006         uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    1007         uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    1008         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    1009         {
    1010             /*
    1011              * Carefully and efficiently dealing with access handler return
    1012              * codes make this a little bloated.
    1013              */
    1014             VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
    1015                                                  pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    1016                                                  pbBuf,
    1017                                                  cbFirst,
    1018                                                  PGMACCESSORIGIN_IEM);
    1019             if (rcStrict == VINF_SUCCESS)
    1020             {
    1021                 if (cbSecond)
    1022                 {
    1023                     rcStrict = PGMPhysWrite(pVM,
    1024                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    1025                                             pbBuf + cbFirst,
    1026                                             cbSecond,
    1027                                             PGMACCESSORIGIN_IEM);
    1028                     if (rcStrict == VINF_SUCCESS)
    1029                     { /* nothing */ }
    1030                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1031                     {
    1032                         LogEx(LOG_GROUP_IEM,
    1033                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
    1034                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    1035                               pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    1036                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1037                     }
    1038 #ifndef IN_RING3
    1039                     else if (fPostponeFail)
    1040                     {
    1041                         LogEx(LOG_GROUP_IEM,
    1042                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    1043                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    1044                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    1045                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    1046                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    1047                         return iemSetPassUpStatus(pVCpu, rcStrict);
    1048                     }
    1049 #endif
    1050                     else
    1051                     {
    1052                         LogEx(LOG_GROUP_IEM,
    1053                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    1054                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    1055                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    1056                         return rcStrict;
    1057                     }
    1058                 }
    1059             }
    1060             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1061             {
    1062                 if (!cbSecond)
    1063                 {
    1064                     LogEx(LOG_GROUP_IEM,
    1065                           ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
    1066                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    1067                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1068                 }
    1069                 else
    1070                 {
    1071                     VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
    1072                                                           pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    1073                                                           pbBuf + cbFirst,
    1074                                                           cbSecond,
    1075                                                           PGMACCESSORIGIN_IEM);
    1076                     if (rcStrict2 == VINF_SUCCESS)
    1077                     {
    1078                         LogEx(LOG_GROUP_IEM,
    1079                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
    1080                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    1081                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    1082                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1083                     }
    1084                     else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    1085                     {
    1086                         LogEx(LOG_GROUP_IEM,
    1087                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
    1088                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    1089                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    1090                         PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    1091                         rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1092                     }
    1093 #ifndef IN_RING3
    1094                     else if (fPostponeFail)
    1095                     {
    1096                         LogEx(LOG_GROUP_IEM,
    1097                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    1098                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    1099                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    1100                         pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
    1101                         VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    1102                         return iemSetPassUpStatus(pVCpu, rcStrict);
    1103                     }
    1104 #endif
    1105                     else
    1106                     {
    1107                         LogEx(LOG_GROUP_IEM,
    1108                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    1109                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    1110                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
    1111                         return rcStrict2;
    1112                     }
    1113                 }
    1114             }
    1115 #ifndef IN_RING3
    1116             else if (fPostponeFail)
    1117             {
    1118                 LogEx(LOG_GROUP_IEM,
    1119                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
    1120                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    1121                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    1122                 if (!cbSecond)
    1123                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
    1124                 else
    1125                     pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
    1126                 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
    1127                 return iemSetPassUpStatus(pVCpu, rcStrict);
    1128             }
    1129 #endif
    1130             else
    1131             {
    1132                 LogEx(LOG_GROUP_IEM,
    1133                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    1134                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
    1135                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    1136                 return rcStrict;
    1137             }
    1138         }
    1139         else
    1140         {
    1141             /*
    1142              * No access handlers, much simpler.
    1143              */
    1144             int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
    1145             if (RT_SUCCESS(rc))
    1146             {
    1147                 if (cbSecond)
    1148                 {
    1149                     rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
    1150                     if (RT_SUCCESS(rc))
    1151                     { /* likely */ }
    1152                     else
    1153                     {
    1154                         LogEx(LOG_GROUP_IEM,
    1155                               ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
    1156                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    1157                                pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
    1158                         return rc;
    1159                     }
    1160                 }
    1161             }
    1162             else
    1163             {
    1164                 LogEx(LOG_GROUP_IEM,
    1165                       ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
    1166                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
    1167                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
    1168                 return rc;
    1169             }
    1170         }
    1171     }
    1172 
    1173 #if defined(IEM_LOG_MEMORY_WRITES)
    1174     Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    1175           RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
    1176     if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
    1177         Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    1178               RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
    1179               &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
    1180 
    1181     size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    1182     g_cbIemWrote = cbWrote;
    1183     memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
    1184 #endif
    1185 
    1186     /*
    1187      * Free the mapping entry.
    1188      */
    1189     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1190     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1191     pVCpu->iem.s.cActiveMappings--;
    1192     return VINF_SUCCESS;
    1193 }
    1194 
    1195 
    1196 /**
    1197  * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
    1198  * @todo duplicated
    1199  */
    1200 DECL_FORCE_INLINE(uint32_t)
    1201 iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
    1202 {
    1203     bool const  fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
    1204     if (fAccess & IEM_ACCESS_TYPE_WRITE)
    1205         return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    1206     return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
    1207 }
    1208 
    1209 
    1210 /**
    1211  * iemMemMap worker that deals with a request crossing pages.
    1212  */
    1213 VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
    1214                                             size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
    1215 {
    1216     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
    1217     Assert(cbMem <= GUEST_PAGE_SIZE);
    1218 
    1219     /*
    1220      * Do the address translations.
    1221      */
    1222     uint32_t const cbFirstPage  = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
    1223     RTGCPHYS GCPhysFirst;
    1224     VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
    1225     if (rcStrict != VINF_SUCCESS)
    1226         return rcStrict;
    1227     Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
    1228 
    1229     uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
    1230     RTGCPHYS GCPhysSecond;
    1231     rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    1232                                                  cbSecondPage, fAccess, &GCPhysSecond);
    1233     if (rcStrict != VINF_SUCCESS)
    1234         return rcStrict;
    1235     Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
    1236     GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
    1237 
    1238     PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    1239 
    1240     /*
    1241      * Check for data breakpoints.
    1242      */
    1243     if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
    1244     { /* likely */ }
    1245     else
    1246     {
    1247         uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
    1248         fDataBps         |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
    1249                                                       cbSecondPage, fAccess);
    1250         pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
    1251         if (fDataBps > 1)
    1252             LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
    1253                                   fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
    1254     }
    1255 
    1256     /*
    1257      * Read in the current memory content if it's a read, execute or partial
    1258      * write access.
    1259      */
    1260     uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    1261 
    1262     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    1263     {
    1264         if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    1265         {
    1266             /*
    1267              * Must carefully deal with access handler status codes here,
    1268              * makes the code a bit bloated.
    1269              */
    1270             rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
    1271             if (rcStrict == VINF_SUCCESS)
    1272             {
    1273                 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    1274                 if (rcStrict == VINF_SUCCESS)
    1275                 { /*likely */ }
    1276                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1277                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1278                 else
    1279                 {
    1280                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
    1281                                           GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
    1282                     return rcStrict;
    1283                 }
    1284             }
    1285             else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1286             {
    1287                 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
    1288                 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
    1289                 {
    1290                     PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
    1291                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1292                 }
    1293                 else
    1294                 {
    1295                     LogEx(LOG_GROUP_IEM,
    1296                           ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
    1297                            GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
    1298                     return rcStrict2;
    1299                 }
    1300             }
    1301             else
    1302             {
    1303                 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    1304                                       GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    1305                 return rcStrict;
    1306             }
    1307         }
    1308         else
    1309         {
    1310             /*
    1311              * No informational status codes here, much more straight forward.
    1312              */
    1313             int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
    1314             if (RT_SUCCESS(rc))
    1315             {
    1316                 Assert(rc == VINF_SUCCESS);
    1317                 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
    1318                 if (RT_SUCCESS(rc))
    1319                     Assert(rc == VINF_SUCCESS);
    1320                 else
    1321                 {
    1322                     LogEx(LOG_GROUP_IEM,
    1323                           ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
    1324                     return rc;
    1325                 }
    1326             }
    1327             else
    1328             {
    1329                 LogEx(LOG_GROUP_IEM,
    1330                       ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
    1331                 return rc;
    1332             }
    1333         }
    1334     }
    1335 #ifdef VBOX_STRICT
    1336     else
    1337         memset(pbBuf, 0xcc, cbMem);
    1338     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    1339         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    1340 #endif
    1341     AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
    1342 
    1343     /*
    1344      * Commit the bounce buffer entry.
    1345      */
    1346     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    1347     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = GCPhysSecond;
    1348     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbFirstPage;
    1349     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = (uint16_t)cbSecondPage;
    1350     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = false;
    1351     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    1352     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    1353     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    1354     pVCpu->iem.s.cActiveMappings++;
    1355 
    1356     *ppvMem = pbBuf;
    1357     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    1358     return VINF_SUCCESS;
    1359 }
    1360 
    1361 
    1362 /**
    1363  * iemMemMap woker that deals with iemMemPageMap failures.
    1364  */
    1365 VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
    1366                                        RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
    1367 {
    1368     STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
    1369 
    1370     /*
    1371      * Filter out conditions we can handle and the ones which shouldn't happen.
    1372      */
    1373     if (   rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
    1374         && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
    1375         && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
    1376     {
    1377         AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
    1378         return rcMap;
    1379     }
    1380     pVCpu->iem.s.cPotentialExits++;
    1381 
    1382     /*
    1383      * Read in the current memory content if it's a read, execute or partial
    1384      * write access.
    1385      */
    1386     uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    1387     if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
    1388     {
    1389         if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
    1390             memset(pbBuf, 0xff, cbMem);
    1391         else
    1392         {
    1393             int rc;
    1394             if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
    1395             {
    1396                 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
    1397                 if (rcStrict == VINF_SUCCESS)
    1398                 { /* nothing */ }
    1399                 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
    1400                     rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
    1401                 else
    1402                 {
    1403                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    1404                                           GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
    1405                     return rcStrict;
    1406                 }
    1407             }
    1408             else
    1409             {
    1410                 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
    1411                 if (RT_SUCCESS(rc))
    1412                 { /* likely */ }
    1413                 else
    1414                 {
    1415                     LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
    1416                                           GCPhysFirst, rc));
    1417                     return rc;
    1418                 }
    1419             }
    1420         }
    1421     }
    1422 #ifdef VBOX_STRICT
    1423     else
    1424         memset(pbBuf, 0xcc, cbMem);
    1425 #endif
    1426 #ifdef VBOX_STRICT
    1427     if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
    1428         memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
    1429 #endif
    1430 
    1431     /*
    1432      * Commit the bounce buffer entry.
    1433      */
    1434     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst    = GCPhysFirst;
    1435     pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond   = NIL_RTGCPHYS;
    1436     pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst        = (uint16_t)cbMem;
    1437     pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond       = 0;
    1438     pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned    = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
    1439     pVCpu->iem.s.aMemMappings[iMemMap].pv               = pbBuf;
    1440     pVCpu->iem.s.aMemMappings[iMemMap].fAccess          = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
    1441     pVCpu->iem.s.iNextMapping = iMemMap + 1;
    1442     pVCpu->iem.s.cActiveMappings++;
    1443 
    1444     *ppvMem = pbBuf;
    1445     *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
    1446     return VINF_SUCCESS;
    1447 }
    1448 
    1449 
    1450 
    1451 /**
    1452  * Commits the guest memory if bounce buffered and unmaps it.
    1453  *
    1454  * @returns Strict VBox status code.
    1455  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1456  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    1457  */
    1458 VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1459 {
    1460     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1461     AssertMsgReturn(   (bUnmapInfo & 0x08)
    1462                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1463                     && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
    1464                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    1465                     VERR_NOT_FOUND);
    1466 
    1467     /* If it's bounce buffered, we may need to write back the buffer. */
    1468     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1469     {
    1470         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1471             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    1472     }
    1473     /* Otherwise unlock it. */
    1474     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1475         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1476 
    1477     /* Free the entry. */
    1478     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1479     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1480     pVCpu->iem.s.cActiveMappings--;
    1481     return VINF_SUCCESS;
    1482 }
    1483 
    1484 
    1485 /**
    1486  * Rolls back the guest memory (conceptually only) and unmaps it.
    1487  *
    1488  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1489  * @param   bUnmapInfo          Unmap info set by iemMemMap.
    1490  */
    1491 void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1492 {
    1493     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1494     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    1495                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1496                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1497                            == ((unsigned)bUnmapInfo >> 4),
    1498                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    1499 
    1500     /* Unlock it if necessary. */
    1501     if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1502         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1503 
    1504     /* Free the entry. */
    1505     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1506     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1507     pVCpu->iem.s.cActiveMappings--;
    1508 }
    1509 
    1510 #ifdef IEM_WITH_SETJMP
    1511 
    1512 /**
    1513  * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
    1514  *
    1515  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1516  * @param   pvMem               The mapping.
    1517  * @param   fAccess             The kind of access.
    1518  */
    1519 void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1520 {
    1521     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1522     AssertMsgReturnVoid(   (bUnmapInfo & 0x08)
    1523                         && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1524                         &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1525                            == ((unsigned)bUnmapInfo >> 4),
    1526                         ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
    1527 
    1528     /* If it's bounce buffered, we may need to write back the buffer. */
    1529     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1530     {
    1531         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1532         {
    1533             VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
    1534             if (rcStrict == VINF_SUCCESS)
    1535                 return;
    1536             IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
    1537         }
    1538     }
    1539     /* Otherwise unlock it. */
    1540     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1541         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1542 
    1543     /* Free the entry. */
    1544     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1545     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1546     pVCpu->iem.s.cActiveMappings--;
    1547 }
    1548 
    1549 
    1550 /** Fallback for iemMemCommitAndUnmapRwJmp.  */
    1551 void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1552 {
    1553     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1554     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1555 }
    1556 
    1557 
    1558 /** Fallback for iemMemCommitAndUnmapAtJmp.  */
    1559 void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1560 {
    1561     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
    1562     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1563 }
    1564 
    1565 
    1566 /** Fallback for iemMemCommitAndUnmapWoJmp.  */
    1567 void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1568 {
    1569     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1570     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1571 }
    1572 
    1573 
    1574 /** Fallback for iemMemCommitAndUnmapRoJmp.  */
    1575 void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
    1576 {
    1577     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
    1578     iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
    1579 }
    1580 
    1581 
    1582 /** Fallback for iemMemRollbackAndUnmapWo.  */
    1583 void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1584 {
    1585     Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
    1586     iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
    1587 }
    1588 
    1589 #endif /* IEM_WITH_SETJMP */
    1590 
    1591 #ifndef IN_RING3
    1592 /**
    1593  * Commits the guest memory if bounce buffered and unmaps it, if any bounce
    1594  * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
    1595  *
    1596  * Allows the instruction to be completed and retired, while the IEM user will
    1597  * return to ring-3 immediately afterwards and do the postponed writes there.
    1598  *
    1599  * @returns VBox status code (no strict statuses).  Caller must check
    1600  *          VMCPU_FF_IEM before repeating string instructions and similar stuff.
    1601  * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
    1602  * @param   pvMem               The mapping.
    1603  * @param   fAccess             The kind of access.
    1604  */
    1605 VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
    1606 {
    1607     uintptr_t const iMemMap = bUnmapInfo & 0x7;
    1608     AssertMsgReturn(   (bUnmapInfo & 0x08)
    1609                     && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
    1610                     &&    (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
    1611                        == ((unsigned)bUnmapInfo >> 4),
    1612                     ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
    1613                     VERR_NOT_FOUND);
    1614 
    1615     /* If it's bounce buffered, we may need to write back the buffer. */
    1616     if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
    1617     {
    1618         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
    1619             return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
    1620     }
    1621     /* Otherwise unlock it. */
    1622     else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
    1623         PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1624 
    1625     /* Free the entry. */
    1626     pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1627     Assert(pVCpu->iem.s.cActiveMappings != 0);
    1628     pVCpu->iem.s.cActiveMappings--;
    1629     return VINF_SUCCESS;
    1630 }
    1631 #endif
    1632 
    1633 
    1634 /**
    1635  * Rollbacks mappings, releasing page locks and such.
    1636  *
    1637  * The caller shall only call this after checking cActiveMappings.
    1638  *
    1639  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    1640  */
    1641 void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
    1642 {
    1643     Assert(pVCpu->iem.s.cActiveMappings > 0);
    1644 
    1645     uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    1646     while (iMemMap-- > 0)
    1647     {
    1648         uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
    1649         if (fAccess != IEM_ACCESS_INVALID)
    1650         {
    1651             AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
    1652             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    1653             if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
    1654                 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
    1655             AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
    1656                       ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
    1657                        iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
    1658                        pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
    1659             pVCpu->iem.s.cActiveMappings--;
    1660         }
    1661     }
    1662 }
    1663 
    1664 #undef  LOG_GROUP
    1665 #define LOG_GROUP LOG_GROUP_IEM
    1666 
    1667 /** @} */
    1668 
    1669 
    1670 #ifdef LOG_ENABLED
    1671 /**
    1672  * Logs the current instruction.
    1673  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1674  * @param   fSameCtx    Set if we have the same context information as the VMM,
    1675  *                      clear if we may have already executed an instruction in
    1676  *                      our debug context. When clear, we assume IEMCPU holds
    1677  *                      valid CPU mode info.
    1678  *
    1679  *                      The @a fSameCtx parameter is now misleading and obsolete.
    1680  * @param   pszFunction The IEM function doing the execution.
    1681  */
    1682 static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
    1683 {
    1684 # ifdef IN_RING3
    1685     if (LogIs2Enabled())
    1686     {
    1687         char     szInstr[256];
    1688         uint32_t cbInstr = 0;
    1689         if (fSameCtx)
    1690             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
    1691                                DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
    1692                                szInstr, sizeof(szInstr), &cbInstr);
    1693         else
    1694         {
    1695             uint32_t fFlags = 0;
    1696             switch (IEM_GET_CPU_MODE(pVCpu))
    1697             {
    1698                 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
    1699                 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
    1700                 case IEMMODE_16BIT:
    1701                     if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
    1702                         fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
    1703                     else
    1704                         fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
    1705                     break;
    1706             }
    1707             DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
    1708                                szInstr, sizeof(szInstr), &cbInstr);
    1709         }
    1710 
    1711         PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
    1712         Log2(("**** %s fExec=%x\n"
    1713               " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
    1714               " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
    1715               " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
    1716               " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
    1717               " %s\n"
    1718               , pszFunction, pVCpu->iem.s.fExec,
    1719               pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
    1720               pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
    1721               pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
    1722               pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
    1723               pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
    1724               szInstr));
    1725 
    1726         /* This stuff sucks atm. as it fills the log with MSRs. */
    1727         //if (LogIs3Enabled())
    1728         //    DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
    1729     }
    1730     else
    1731 # endif
    1732         LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
    1733                  pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
    1734     RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
    1735 }
    1736 #endif /* LOG_ENABLED */
    1737 
    1738 
    1739 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1740 /**
    1741  * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
    1742  * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
    1743  *
    1744  * @returns Modified rcStrict.
    1745  * @param   pVCpu       The cross context virtual CPU structure of the calling thread.
    1746  * @param   rcStrict    The instruction execution status.
    1747  */
    1748 static VBOXSTRICTRC iemHandleNestedInstructionBoundaryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
    1749 {
    1750     Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
    1751     if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
    1752     {
    1753         /* VMX preemption timer takes priority over NMI-window exits. */
    1754         if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
    1755         {
    1756             rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
    1757             Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
    1758         }
    1759         /*
    1760          * Check remaining intercepts.
    1761          *
    1762          * NMI-window and Interrupt-window VM-exits.
    1763          * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
    1764          * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
    1765          *
    1766          * See Intel spec. 26.7.6 "NMI-Window Exiting".
    1767          * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
    1768          */
    1769         else if (   VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
    1770                  && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    1771                  && !TRPMHasTrap(pVCpu))
    1772         {
    1773             Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
    1774             if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
    1775                 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
    1776             {
    1777                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
    1778                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
    1779             }
    1780             else if (   VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
    1781                      && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
    1782             {
    1783                 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
    1784                 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
    1785             }
    1786         }
    1787     }
    1788     /* TPR-below threshold/APIC write has the highest priority. */
    1789     else  if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
    1790     {
    1791         rcStrict = iemVmxApicWriteEmulation(pVCpu);
    1792         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    1793         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
    1794     }
    1795     /* MTF takes priority over VMX-preemption timer. */
    1796     else
    1797     {
    1798         rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
    1799         Assert(!CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx));
    1800         Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
    1801     }
    1802     return rcStrict;
    1803 }
    1804 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
    1805 
    1806 
    1807 /**
    1808  * The actual code execution bits of IEMExecOne, IEMExecOneWithPrefetchedByPC,
    1809  * IEMExecOneBypass and friends.
    1810  *
    1811  * Similar code is found in IEMExecLots.
    1812  *
    1813  * @return  Strict VBox status code.
    1814  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1815  * @param   fExecuteInhibit     If set, execute the instruction following CLI,
    1816  *                      POP SS and MOV SS,GR.
    1817  * @param   pszFunction The calling function name.
    1818  */
    1819 DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
    1820 {
    1821     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1822     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1823     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1824     RT_NOREF_PV(pszFunction);
    1825 
    1826 #ifdef IEM_WITH_SETJMP
    1827     VBOXSTRICTRC rcStrict;
    1828     IEM_TRY_SETJMP(pVCpu, rcStrict)
    1829     {
    1830         uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1831         rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1832     }
    1833     IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1834     {
    1835         pVCpu->iem.s.cLongJumps++;
    1836     }
    1837     IEM_CATCH_LONGJMP_END(pVCpu);
    1838 #else
    1839     uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1840     VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1841 #endif
    1842     if (rcStrict == VINF_SUCCESS)
    1843         pVCpu->iem.s.cInstructions++;
    1844     if (pVCpu->iem.s.cActiveMappings > 0)
    1845     {
    1846         Assert(rcStrict != VINF_SUCCESS);
    1847         iemMemRollback(pVCpu);
    1848     }
    1849     AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1850     AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1851     AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1852 
    1853 //#ifdef DEBUG
    1854 //    AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
    1855 //#endif
    1856 
    1857 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1858     /*
    1859      * Perform any VMX nested-guest instruction boundary actions.
    1860      *
    1861      * If any of these causes a VM-exit, we must skip executing the next
    1862      * instruction (would run into stale page tables). A VM-exit makes sure
    1863      * there is no interrupt-inhibition, so that should ensure we don't go
    1864      * to try execute the next instruction. Clearing fExecuteInhibit is
    1865      * problematic because of the setjmp/longjmp clobbering above.
    1866      */
    1867     if (   !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1868                                      | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)
    1869         || rcStrict != VINF_SUCCESS)
    1870     { /* likely */ }
    1871     else
    1872         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1873 #endif
    1874 
    1875     /* Execute the next instruction as well if a cli, pop ss or
    1876        mov ss, Gr has just completed successfully. */
    1877     if (   fExecuteInhibit
    1878         && rcStrict == VINF_SUCCESS
    1879         && CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
    1880     {
    1881         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fExec & (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK));
    1882         if (rcStrict == VINF_SUCCESS)
    1883         {
    1884 #ifdef LOG_ENABLED
    1885             iemLogCurInstr(pVCpu, false, pszFunction);
    1886 #endif
    1887 #ifdef IEM_WITH_SETJMP
    1888             IEM_TRY_SETJMP_AGAIN(pVCpu, rcStrict)
    1889             {
    1890                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    1891                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1892             }
    1893             IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    1894             {
    1895                 pVCpu->iem.s.cLongJumps++;
    1896             }
    1897             IEM_CATCH_LONGJMP_END(pVCpu);
    1898 #else
    1899             IEM_OPCODE_GET_FIRST_U8(&b);
    1900             rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    1901 #endif
    1902             if (rcStrict == VINF_SUCCESS)
    1903             {
    1904                 pVCpu->iem.s.cInstructions++;
    1905 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    1906                 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    1907                                               | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
    1908                 { /* likely */ }
    1909                 else
    1910                     rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    1911 #endif
    1912             }
    1913             if (pVCpu->iem.s.cActiveMappings > 0)
    1914             {
    1915                 Assert(rcStrict != VINF_SUCCESS);
    1916                 iemMemRollback(pVCpu);
    1917             }
    1918             AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
    1919             AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
    1920             AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
    1921         }
    1922         else if (pVCpu->iem.s.cActiveMappings > 0)
    1923             iemMemRollback(pVCpu);
    1924         /** @todo drop this after we bake this change into RIP advancing. */
    1925         CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx); /* hope this is correct for all exceptional cases... */
    1926     }
    1927 
    1928     /*
    1929      * Return value fiddling, statistics and sanity assertions.
    1930      */
    1931     rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    1932 
    1933     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    1934     Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    1935     return rcStrict;
    1936 }
    1937 
    1938 
    1939 /**
    1940  * Execute one instruction.
    1941  *
    1942  * @return  Strict VBox status code.
    1943  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    1944  */
    1945 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
    1946 {
    1947     AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
    1948 #ifdef LOG_ENABLED
    1949     iemLogCurInstr(pVCpu, true, "IEMExecOne");
    1950 #endif
    1951 
    1952     /*
    1953      * Do the decoding and emulation.
    1954      */
    1955     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1956     if (rcStrict == VINF_SUCCESS)
    1957         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
    1958     else if (pVCpu->iem.s.cActiveMappings > 0)
    1959         iemMemRollback(pVCpu);
    1960 
    1961     if (rcStrict != VINF_SUCCESS)
    1962         LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    1963                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    1964     return rcStrict;
    1965 }
    1966 
    1967 
    1968 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    1969                                                         const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    1970 {
    1971     VBOXSTRICTRC rcStrict;
    1972     if (   cbOpcodeBytes
    1973         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    1974     {
    1975         iemInitDecoder(pVCpu, 0 /*fExecOpts*/);
    1976 #ifdef IEM_WITH_CODE_TLB
    1977         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    1978         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    1979         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    1980         pVCpu->iem.s.offCurInstrStart = 0;
    1981         pVCpu->iem.s.offInstrNextByte = 0;
    1982         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    1983 #else
    1984         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    1985         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    1986 #endif
    1987         rcStrict = VINF_SUCCESS;
    1988     }
    1989     else
    1990         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    1991     if (rcStrict == VINF_SUCCESS)
    1992         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
    1993     else if (pVCpu->iem.s.cActiveMappings > 0)
    1994         iemMemRollback(pVCpu);
    1995 
    1996     return rcStrict;
    1997 }
    1998 
    1999 
    2000 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypass(PVMCPUCC pVCpu)
    2001 {
    2002     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    2003     if (rcStrict == VINF_SUCCESS)
    2004         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypass");
    2005     else if (pVCpu->iem.s.cActiveMappings > 0)
    2006         iemMemRollback(pVCpu);
    2007 
    2008     return rcStrict;
    2009 }
    2010 
    2011 
    2012 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, uint64_t OpcodeBytesPC,
    2013                                                               const void *pvOpcodeBytes, size_t cbOpcodeBytes)
    2014 {
    2015     VBOXSTRICTRC rcStrict;
    2016     if (   cbOpcodeBytes
    2017         && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
    2018     {
    2019         iemInitDecoder(pVCpu, IEM_F_BYPASS_HANDLERS);
    2020 #ifdef IEM_WITH_CODE_TLB
    2021         pVCpu->iem.s.uInstrBufPc      = OpcodeBytesPC;
    2022         pVCpu->iem.s.pbInstrBuf       = (uint8_t const *)pvOpcodeBytes;
    2023         pVCpu->iem.s.cbInstrBufTotal  = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
    2024         pVCpu->iem.s.offCurInstrStart = 0;
    2025         pVCpu->iem.s.offInstrNextByte = 0;
    2026         pVCpu->iem.s.GCPhysInstrBuf   = NIL_RTGCPHYS;
    2027 #else
    2028         pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
    2029         memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
    2030 #endif
    2031         rcStrict = VINF_SUCCESS;
    2032     }
    2033     else
    2034         rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_BYPASS_HANDLERS);
    2035     if (rcStrict == VINF_SUCCESS)
    2036         rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
    2037     else if (pVCpu->iem.s.cActiveMappings > 0)
    2038         iemMemRollback(pVCpu);
    2039 
    2040     return rcStrict;
    2041 }
    2042 
    2043 
    2044 /**
    2045  * For handling split cacheline lock operations when the host has split-lock
    2046  * detection enabled.
    2047  *
    2048  * This will cause the interpreter to disregard the lock prefix and implicit
    2049  * locking (xchg).
    2050  *
    2051  * @returns Strict VBox status code.
    2052  * @param   pVCpu   The cross context virtual CPU structure of the calling EMT.
    2053  */
    2054 VMM_INT_DECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
    2055 {
    2056     /*
    2057      * Do the decoding and emulation.
    2058      */
    2059     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, IEM_F_X86_DISREGARD_LOCK);
    2060     if (rcStrict == VINF_SUCCESS)
    2061         rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
    2062     else if (pVCpu->iem.s.cActiveMappings > 0)
    2063         iemMemRollback(pVCpu);
    2064 
    2065     if (rcStrict != VINF_SUCCESS)
    2066         LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    2067                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    2068     return rcStrict;
    2069 }
    2070 
    2071 
    2072 /**
    2073  * Code common to IEMExecLots and IEMExecRecompilerThreaded that attempts to
    2074  * inject a pending TRPM trap.
    2075  */
    2076 VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu)
    2077 {
    2078     Assert(TRPMHasTrap(pVCpu));
    2079 
    2080     if (   !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
    2081         && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
    2082     {
    2083         /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
    2084 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2085         bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
    2086         if (fIntrEnabled)
    2087         {
    2088             if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
    2089                 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    2090             else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
    2091                 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
    2092             else
    2093             {
    2094                 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
    2095                 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
    2096             }
    2097         }
    2098 #else
    2099         bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
    2100 #endif
    2101         if (fIntrEnabled)
    2102         {
    2103             uint8_t     u8TrapNo;
    2104             TRPMEVENT   enmType;
    2105             uint32_t    uErrCode;
    2106             RTGCPTR     uCr2;
    2107             int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /*pu8InstLen*/, NULL /*fIcebp*/);
    2108             AssertRC(rc2);
    2109             Assert(enmType == TRPM_HARDWARE_INT);
    2110             VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /*cbInstr*/);
    2111 
    2112             TRPMResetTrap(pVCpu);
    2113 
    2114 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2115             /* Injecting an event may cause a VM-exit. */
    2116             if (   rcStrict != VINF_SUCCESS
    2117                 && rcStrict != VINF_IEM_RAISED_XCPT)
    2118                 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2119 #else
    2120             NOREF(rcStrict);
    2121 #endif
    2122         }
    2123     }
    2124 
    2125     return VINF_SUCCESS;
    2126 }
    2127 
    2128 
    2129 VMM_INT_DECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
    2130 {
    2131     uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
    2132     AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
    2133     Assert(cMaxInstructions > 0);
    2134 
    2135     /*
    2136      * See if there is an interrupt pending in TRPM, inject it if we can.
    2137      */
    2138     /** @todo What if we are injecting an exception and not an interrupt? Is that
    2139      *        possible here? For now we assert it is indeed only an interrupt. */
    2140     if (!TRPMHasTrap(pVCpu))
    2141     { /* likely */ }
    2142     else
    2143     {
    2144         VBOXSTRICTRC rcStrict = iemExecInjectPendingTrap(pVCpu);
    2145         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2146         { /*likely */ }
    2147         else
    2148             return rcStrict;
    2149     }
    2150 
    2151     /*
    2152      * Initial decoder init w/ prefetch, then setup setjmp.
    2153      */
    2154     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    2155     if (rcStrict == VINF_SUCCESS)
    2156     {
    2157 #ifdef IEM_WITH_SETJMP
    2158         pVCpu->iem.s.cActiveMappings = 0; /** @todo wtf? */
    2159         IEM_TRY_SETJMP(pVCpu, rcStrict)
    2160 #endif
    2161         {
    2162             /*
    2163              * The run loop.  We limit ourselves to 4096 instructions right now.
    2164              */
    2165             uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
    2166             PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    2167             for (;;)
    2168             {
    2169                 /*
    2170                  * Log the state.
    2171                  */
    2172 #ifdef LOG_ENABLED
    2173                 iemLogCurInstr(pVCpu, true, "IEMExecLots");
    2174 #endif
    2175 
    2176                 /*
    2177                  * Do the decoding and emulation.
    2178                  */
    2179                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2180                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2181 #ifdef VBOX_STRICT
    2182                 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
    2183 #endif
    2184                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2185                 {
    2186                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    2187                     pVCpu->iem.s.cInstructions++;
    2188 
    2189 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2190                     /* Perform any VMX nested-guest instruction boundary actions. */
    2191                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    2192                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    2193                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    2194                     { /* likely */ }
    2195                     else
    2196                     {
    2197                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    2198                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2199                             fCpu = pVCpu->fLocalForcedActions;
    2200                         else
    2201                         {
    2202                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2203                             break;
    2204                         }
    2205                     }
    2206 #endif
    2207                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    2208                     {
    2209 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    2210                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    2211 #endif
    2212                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    2213                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    2214                                                       | VMCPU_FF_TLB_FLUSH
    2215                                                       | VMCPU_FF_UNHALT );
    2216 
    2217                         if (RT_LIKELY(   (   !fCpu
    2218                                           || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    2219                                               && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
    2220                                       && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
    2221                         {
    2222                             if (--cMaxInstructionsGccStupidity > 0)
    2223                             {
    2224                                 /* Poll timers every now an then according to the caller's specs. */
    2225                                 if (   (cMaxInstructionsGccStupidity & cPollRate) != 0
    2226                                     || !TMTimerPollBool(pVM, pVCpu))
    2227                                 {
    2228                                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    2229                                     iemReInitDecoder(pVCpu);
    2230                                     continue;
    2231                                 }
    2232                             }
    2233                         }
    2234                     }
    2235                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    2236                 }
    2237                 else if (pVCpu->iem.s.cActiveMappings > 0)
    2238                     iemMemRollback(pVCpu);
    2239                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2240                 break;
    2241             }
    2242         }
    2243 #ifdef IEM_WITH_SETJMP
    2244         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    2245         {
    2246             if (pVCpu->iem.s.cActiveMappings > 0)
    2247                 iemMemRollback(pVCpu);
    2248 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2249             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2250 # endif
    2251             pVCpu->iem.s.cLongJumps++;
    2252         }
    2253         IEM_CATCH_LONGJMP_END(pVCpu);
    2254 #endif
    2255 
    2256         /*
    2257          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    2258          */
    2259         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    2260         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    2261     }
    2262     else
    2263     {
    2264         if (pVCpu->iem.s.cActiveMappings > 0)
    2265             iemMemRollback(pVCpu);
    2266 
    2267 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2268         /*
    2269          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    2270          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    2271          */
    2272         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2273 #endif
    2274     }
    2275 
    2276     /*
    2277      * Maybe re-enter raw-mode and log.
    2278      */
    2279     if (rcStrict != VINF_SUCCESS)
    2280         LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
    2281                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
    2282     if (pcInstructions)
    2283         *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
    2284     return rcStrict;
    2285 }
    2286 
    2287 
    2288 /**
    2289  * Interface used by EMExecuteExec, does exit statistics and limits.
    2290  *
    2291  * @returns Strict VBox status code.
    2292  * @param   pVCpu               The cross context virtual CPU structure.
    2293  * @param   fWillExit           To be defined.
    2294  * @param   cMinInstructions    Minimum number of instructions to execute before checking for FFs.
    2295  * @param   cMaxInstructions    Maximum number of instructions to execute.
    2296  * @param   cMaxInstructionsWithoutExits
    2297  *                              The max number of instructions without exits.
    2298  * @param   pStats              Where to return statistics.
    2299  */
    2300 VMM_INT_DECL(VBOXSTRICTRC)
    2301 IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
    2302                 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
    2303 {
    2304     NOREF(fWillExit); /** @todo define flexible exit crits */
    2305 
    2306     /*
    2307      * Initialize return stats.
    2308      */
    2309     pStats->cInstructions    = 0;
    2310     pStats->cExits           = 0;
    2311     pStats->cMaxExitDistance = 0;
    2312     pStats->cReserved        = 0;
    2313 
    2314     /*
    2315      * Initial decoder init w/ prefetch, then setup setjmp.
    2316      */
    2317     VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, 0 /*fExecOpts*/);
    2318     if (rcStrict == VINF_SUCCESS)
    2319     {
    2320 #ifdef IEM_WITH_SETJMP
    2321         pVCpu->iem.s.cActiveMappings     = 0; /** @todo wtf?!? */
    2322         IEM_TRY_SETJMP(pVCpu, rcStrict)
    2323 #endif
    2324         {
    2325 #ifdef IN_RING0
    2326             bool const fCheckPreemptionPending   = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
    2327 #endif
    2328             uint32_t   cInstructionSinceLastExit = 0;
    2329 
    2330             /*
    2331              * The run loop.  We limit ourselves to 4096 instructions right now.
    2332              */
    2333             PVM pVM = pVCpu->CTX_SUFF(pVM);
    2334             for (;;)
    2335             {
    2336                 /*
    2337                  * Log the state.
    2338                  */
    2339 #ifdef LOG_ENABLED
    2340                 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
    2341 #endif
    2342 
    2343                 /*
    2344                  * Do the decoding and emulation.
    2345                  */
    2346                 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
    2347 
    2348                 uint8_t b; IEM_OPCODE_GET_FIRST_U8(&b);
    2349                 rcStrict = FNIEMOP_CALL(g_apfnIemInterpretOnlyOneByteMap[b]);
    2350 
    2351                 if (   cPotentialExits != pVCpu->iem.s.cPotentialExits
    2352                     && cInstructionSinceLastExit > 0 /* don't count the first */ )
    2353                 {
    2354                     pStats->cExits += 1;
    2355                     if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
    2356                         pStats->cMaxExitDistance = cInstructionSinceLastExit;
    2357                     cInstructionSinceLastExit = 0;
    2358                 }
    2359 
    2360                 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2361                 {
    2362                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    2363                     pVCpu->iem.s.cInstructions++;
    2364                     pStats->cInstructions++;
    2365                     cInstructionSinceLastExit++;
    2366 
    2367 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2368                     /* Perform any VMX nested-guest instruction boundary actions. */
    2369                     uint64_t fCpu = pVCpu->fLocalForcedActions;
    2370                     if (!(fCpu & (  VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
    2371                                   | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW)))
    2372                     { /* likely */ }
    2373                     else
    2374                     {
    2375                         rcStrict = iemHandleNestedInstructionBoundaryFFs(pVCpu, rcStrict);
    2376                         if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    2377                             fCpu = pVCpu->fLocalForcedActions;
    2378                         else
    2379                         {
    2380                             rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2381                             break;
    2382                         }
    2383                     }
    2384 #endif
    2385                     if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
    2386                     {
    2387 #ifndef VBOX_WITH_NESTED_HWVIRT_VMX
    2388                         uint64_t fCpu = pVCpu->fLocalForcedActions;
    2389 #endif
    2390                         fCpu &= VMCPU_FF_ALL_MASK & ~(  VMCPU_FF_PGM_SYNC_CR3
    2391                                                       | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
    2392                                                       | VMCPU_FF_TLB_FLUSH
    2393                                                       | VMCPU_FF_UNHALT );
    2394                         if (RT_LIKELY(   (   (   !fCpu
    2395                                               || (   !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
    2396                                                   && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
    2397                                           && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
    2398                                       || pStats->cInstructions < cMinInstructions))
    2399                         {
    2400                             if (pStats->cInstructions < cMaxInstructions)
    2401                             {
    2402                                 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
    2403                                 {
    2404 #ifdef IN_RING0
    2405                                     if (   !fCheckPreemptionPending
    2406                                         || !RTThreadPreemptIsPending(NIL_RTTHREAD))
    2407 #endif
    2408                                     {
    2409                                         Assert(pVCpu->iem.s.cActiveMappings == 0);
    2410                                         iemReInitDecoder(pVCpu);
    2411                                         continue;
    2412                                     }
    2413 #ifdef IN_RING0
    2414                                     rcStrict = VINF_EM_RAW_INTERRUPT;
    2415                                     break;
    2416 #endif
    2417                                 }
    2418                             }
    2419                         }
    2420                         Assert(!(fCpu & VMCPU_FF_IEM));
    2421                     }
    2422                     Assert(pVCpu->iem.s.cActiveMappings == 0);
    2423                 }
    2424                 else if (pVCpu->iem.s.cActiveMappings > 0)
    2425                         iemMemRollback(pVCpu);
    2426                 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2427                 break;
    2428             }
    2429         }
    2430 #ifdef IEM_WITH_SETJMP
    2431         IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
    2432         {
    2433             if (pVCpu->iem.s.cActiveMappings > 0)
    2434                 iemMemRollback(pVCpu);
    2435             pVCpu->iem.s.cLongJumps++;
    2436         }
    2437         IEM_CATCH_LONGJMP_END(pVCpu);
    2438 #endif
    2439 
    2440         /*
    2441          * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
    2442          */
    2443         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
    2444         Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
    2445     }
    2446     else
    2447     {
    2448         if (pVCpu->iem.s.cActiveMappings > 0)
    2449             iemMemRollback(pVCpu);
    2450 
    2451 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
    2452         /*
    2453          * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
    2454          * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
    2455          */
    2456         rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
    2457 #endif
    2458     }
    2459 
    2460     /*
    2461      * Maybe re-enter raw-mode and log.
    2462      */
    2463     if (rcStrict != VINF_SUCCESS)
    2464         LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
    2465                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
    2466                  pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
    2467     return rcStrict;
    2468 }
    2469 
    2470 
    2471 /**
    2472  * Injects a trap, fault, abort, software interrupt or external interrupt.
    2473  *
    2474  * The parameter list matches TRPMQueryTrapAll pretty closely.
    2475  *
    2476  * @returns Strict VBox status code.
    2477  * @param   pVCpu               The cross context virtual CPU structure of the calling EMT.
    2478  * @param   u8TrapNo            The trap number.
    2479  * @param   enmType             What type is it (trap/fault/abort), software
    2480  *                              interrupt or hardware interrupt.
    2481  * @param   uErrCode            The error code if applicable.
    2482  * @param   uCr2                The CR2 value if applicable.
    2483  * @param   cbInstr             The instruction length (only relevant for
    2484  *                              software interrupts).
    2485  * @note    x86 specific, but difficult to move due to iemInitDecoder dep.
    2486  */
    2487 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
    2488                                          uint8_t cbInstr)
    2489 {
    2490     iemInitDecoder(pVCpu, 0 /*fExecOpts*/); /** @todo wrong init function! */
    2491 #ifdef DBGFTRACE_ENABLED
    2492     RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
    2493                       u8TrapNo, enmType, uErrCode, uCr2);
    2494 #endif
    2495 
    2496     uint32_t fFlags;
    2497     switch (enmType)
    2498     {
    2499         case TRPM_HARDWARE_INT:
    2500             Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
    2501             fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
    2502             uErrCode = uCr2 = 0;
    2503             break;
    2504 
    2505         case TRPM_SOFTWARE_INT:
    2506             Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
    2507             fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
    2508             uErrCode = uCr2 = 0;
    2509             break;
    2510 
    2511         case TRPM_TRAP:
    2512         case TRPM_NMI: /** @todo Distinguish NMI from exception 2. */
    2513             Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
    2514             fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
    2515             if (u8TrapNo == X86_XCPT_PF)
    2516                 fFlags |= IEM_XCPT_FLAGS_CR2;
    2517             switch (u8TrapNo)
    2518             {
    2519                 case X86_XCPT_DF:
    2520                 case X86_XCPT_TS:
    2521                 case X86_XCPT_NP:
    2522                 case X86_XCPT_SS:
    2523                 case X86_XCPT_PF:
    2524                 case X86_XCPT_AC:
    2525                 case X86_XCPT_GP:
    2526                     fFlags |= IEM_XCPT_FLAGS_ERR;
    2527                     break;
    2528             }
    2529             break;
    2530 
    2531         IEM_NOT_REACHED_DEFAULT_CASE_RET();
    2532     }
    2533 
    2534     VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
    2535 
    2536     if (pVCpu->iem.s.cActiveMappings > 0)
    2537         iemMemRollback(pVCpu);
    2538 
    2539     return rcStrict;
    2540 }
    2541 
    2542 
    2543 /**
    2544  * Injects the active TRPM event.
    2545  *
    2546  * @returns Strict VBox status code.
    2547  * @param   pVCpu               The cross context virtual CPU structure.
    2548  */
    2549 VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
    2550 {
    2551 #ifndef IEM_IMPLEMENTS_TASKSWITCH
    2552     IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
    2553 #else
    2554     uint8_t     u8TrapNo;
    2555     TRPMEVENT   enmType;
    2556     uint32_t    uErrCode;
    2557     RTGCUINTPTR uCr2;
    2558     uint8_t     cbInstr;
    2559     int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
    2560     if (RT_FAILURE(rc))
    2561         return rc;
    2562 
    2563     /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
    2564      *        ICEBP \#DB injection as a special case. */
    2565     VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
    2566 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    2567     if (rcStrict == VINF_SVM_VMEXIT)
    2568         rcStrict = VINF_SUCCESS;
    2569 #endif
    2570 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
    2571     if (rcStrict == VINF_VMX_VMEXIT)
    2572         rcStrict = VINF_SUCCESS;
    2573 #endif
    2574     /** @todo Are there any other codes that imply the event was successfully
    2575      *        delivered to the guest? See @bugref{6607}.  */
    2576     if (   rcStrict == VINF_SUCCESS
    2577         || rcStrict == VINF_IEM_RAISED_XCPT)
    2578         TRPMResetTrap(pVCpu);
    2579 
    2580     return rcStrict;
    2581 #endif
    2582 }
    2583 
    2584 
    2585 VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
    2586 {
    2587     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    2588     return VERR_NOT_IMPLEMENTED;
    2589 }
    2590 
    2591 
    2592 VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
    2593 {
    2594     RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
    2595     return VERR_NOT_IMPLEMENTED;
    2596 }
    2597 
    2598 #ifdef IN_RING3
    2599 
    2600 /**
    2601  * Handles the unlikely and probably fatal merge cases.
    2602  *
    2603  * @returns Merged status code.
    2604  * @param   rcStrict        Current EM status code.
    2605  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2606  *                          with @a rcStrict.
    2607  * @param   iMemMap         The memory mapping index. For error reporting only.
    2608  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2609  *                          thread, for error reporting only.
    2610  */
    2611 DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
    2612                                                           unsigned iMemMap, PVMCPUCC pVCpu)
    2613 {
    2614     if (RT_FAILURE_NP(rcStrict))
    2615         return rcStrict;
    2616 
    2617     if (RT_FAILURE_NP(rcStrictCommit))
    2618         return rcStrictCommit;
    2619 
    2620     if (rcStrict == rcStrictCommit)
    2621         return rcStrictCommit;
    2622 
    2623     AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
    2624                            VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
    2625                            pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
    2626                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
    2627                            pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
    2628     return VERR_IOM_FF_STATUS_IPE;
    2629 }
    2630 
    2631 
    2632 /**
    2633  * Helper for IOMR3ProcessForceFlag.
    2634  *
    2635  * @returns Merged status code.
    2636  * @param   rcStrict        Current EM status code.
    2637  * @param   rcStrictCommit  The IOM I/O or MMIO write commit status to merge
    2638  *                          with @a rcStrict.
    2639  * @param   iMemMap         The memory mapping index. For error reporting only.
    2640  * @param   pVCpu           The cross context virtual CPU structure of the calling
    2641  *                          thread, for error reporting only.
    2642  */
    2643 DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
    2644 {
    2645     /* Simple. */
    2646     if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
    2647         return rcStrictCommit;
    2648 
    2649     if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
    2650         return rcStrict;
    2651 
    2652     /* EM scheduling status codes. */
    2653     if (RT_LIKELY(   rcStrict >= VINF_EM_FIRST
    2654                   && rcStrict <= VINF_EM_LAST))
    2655     {
    2656         if (RT_LIKELY(   rcStrictCommit >= VINF_EM_FIRST
    2657                       && rcStrictCommit <= VINF_EM_LAST))
    2658             return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
    2659     }
    2660 
    2661     /* Unlikely */
    2662     return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
    2663 }
    2664 
    2665 
    2666 /**
    2667  * Called by force-flag handling code when VMCPU_FF_IEM is set.
    2668  *
    2669  * @returns Merge between @a rcStrict and what the commit operation returned.
    2670  * @param   pVM         The cross context VM structure.
    2671  * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
    2672  * @param   rcStrict    The status code returned by ring-0 or raw-mode.
    2673  */
    2674 VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
    2675 {
    2676     /*
    2677      * Reset the pending commit.
    2678      */
    2679     AssertMsg(  (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
    2680               & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
    2681               ("%#x %#x %#x\n",
    2682                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2683     VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
    2684 
    2685     /*
    2686      * Commit the pending bounce buffers (usually just one).
    2687      */
    2688     unsigned cBufs = 0;
    2689     unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
    2690     while (iMemMap-- > 0)
    2691         if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
    2692         {
    2693             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
    2694             Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
    2695             Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
    2696 
    2697             uint16_t const  cbFirst  = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
    2698             uint16_t const  cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
    2699             uint8_t const  *pbBuf    = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
    2700 
    2701             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
    2702             {
    2703                 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
    2704                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
    2705                                                             pbBuf,
    2706                                                             cbFirst,
    2707                                                             PGMACCESSORIGIN_IEM);
    2708                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
    2709                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
    2710                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
    2711                      VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
    2712             }
    2713 
    2714             if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
    2715             {
    2716                 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
    2717                                                             pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
    2718                                                             pbBuf + cbFirst,
    2719                                                             cbSecond,
    2720                                                             PGMACCESSORIGIN_IEM);
    2721                 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
    2722                 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
    2723                      iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
    2724                      VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
    2725             }
    2726             cBufs++;
    2727             pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
    2728         }
    2729 
    2730     AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
    2731               ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
    2732                pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
    2733     pVCpu->iem.s.cActiveMappings = 0;
    2734     return rcStrict;
    2735 }
    2736 
    2737 #endif /* IN_RING3 */
    2738 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette